Commit eb6975f088

protty <45520026+kprotty@users.noreply.github.com>
2021-05-31 18:11:30
std.sync.atomic: extended atomic helper functions (#8866)
- deprecates `std.Thread.spinLoopHint` and moves it to `std.atomic.spinLoopHint` - added an Atomic(T) generic wrapper type which replaces atomic.Bool and atomic.Int - in Atomic(T), selectively expose member functions depending on T and include bitwise atomic methods when T is an Integer - added fence() and compilerFence() to std.atomic
1 parent 57cf9f7
lib/std/atomic/Atomic.zig
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2015-2021 Zig Contributors
+// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
+// The MIT license requires this copyright notice to be included in all copies
+// and substantial portions of the software.
+
+const std = @import("../std.zig");
+
+const testing = std.testing;
+const target = std.Target.current;
+const Ordering = std.atomic.Ordering;
+
+pub fn Atomic(comptime T: type) type {
+    return extern struct {
+        value: T,
+
+        const Self = @This();
+
+        pub fn init(value: T) Self {
+            return .{ .value = value };
+        }
+
+        /// Non-atomically load from the atomic value without synchronization.
+        /// Care must be taken to avoid data-races when interacting with other atomic operations.
+        pub fn loadUnchecked(self: Self) T {
+            return self.value;
+        }
+
+        /// Non-atomically store to the atomic value without synchronization.
+        /// Care must be taken to avoid data-races when interacting with other atomic operations.
+        pub fn storeUnchecked(self: *Self, value: T) void {
+            self.value = value;
+        }
+
+        pub fn load(self: *const Self, comptime ordering: Ordering) T {
+            return switch (ordering) {
+                .AcqRel => @compileError(@tagName(ordering) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on atomic stores"),
+                .Release => @compileError(@tagName(ordering) ++ " is only allowed on atomic stores"),
+                else => @atomicLoad(T, &self.value, ordering),
+            };
+        }
+
+        pub fn store(self: *Self, value: T, comptime ordering: Ordering) void {
+            return switch (ordering) {
+                .AcqRel => @compileError(@tagName(ordering) ++ " implies " ++ @tagName(Ordering.Acquire) ++ " which is only allowed on atomic loads"),
+                .Acquire => @compileError(@tagName(ordering) ++ " is only allowed on atomic loads"),
+                else => @atomicStore(T, &self.value, value, ordering),
+            };
+        }
+
+        pub fn swap(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+            return self.rmw(.Xchg, value, ordering);
+        }
+
+        pub fn compareAndSwap(
+            self: *Self,
+            compare: T,
+            exchange: T,
+            comptime success: Ordering,
+            comptime failure: Ordering,
+        ) callconv(.Inline) ?T {
+            return self.cmpxchg(true, compare, exchange, success, failure);
+        }
+
+        pub fn tryCompareAndSwap(
+            self: *Self,
+            compare: T,
+            exchange: T,
+            comptime success: Ordering,
+            comptime failure: Ordering,
+        ) callconv(.Inline) ?T {
+            return self.cmpxchg(false, compare, exchange, success, failure);
+        }
+
+        fn cmpxchg(
+            self: *Self,
+            comptime is_strong: bool,
+            compare: T,
+            exchange: T,
+            comptime success: Ordering,
+            comptime failure: Ordering,
+        ) callconv(.Inline) ?T {
+            if (success == .Unordered or failure == .Unordered) {
+                @compileError(@tagName(Ordering.Unordered) ++ " is only allowed on atomic loads and stores");
+            }
+
+            comptime var success_is_stronger = switch (failure) {
+                .SeqCst => success == .SeqCst,
+                .AcqRel => @compileError(@tagName(failure) ++ " implies " ++ @tagName(Ordering.Release) ++ " which is only allowed on success"),
+                .Acquire => success == .SeqCst or success == .AcqRel or success == .Acquire,
+                .Release => @compileError(@tagName(failure) ++ " is only allowed on success"),
+                .Monotonic => true,
+                .Unordered => unreachable,
+            };
+
+            if (!success_is_stronger) {
+                @compileError(@tagName(success) ++ " must be stronger than " ++ @tagName(failure));
+            }
+
+            return switch (is_strong) {
+                true => @cmpxchgStrong(T, &self.value, compare, exchange, success, failure),
+                false => @cmpxchgWeak(T, &self.value, compare, exchange, success, failure),
+            };
+        }
+
+        fn rmw(
+            self: *Self,
+            comptime op: std.builtin.AtomicRmwOp,
+            value: T,
+            comptime ordering: Ordering,
+        ) callconv(.Inline) T {
+            return @atomicRmw(T, &self.value, op, value, ordering);
+        }
+
+        fn exportWhen(comptime condition: bool, comptime functions: type) type {
+            return if (condition) functions else struct {};
+        }
+
+        pub usingnamespace exportWhen(std.meta.trait.isNumber(T), struct {
+            pub fn fetchAdd(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+                return self.rmw(.Add, value, ordering);
+            }
+
+            pub fn fetchSub(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+                return self.rmw(.Sub, value, ordering);
+            }
+
+            pub fn fetchMin(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+                return self.rmw(.Min, value, ordering);
+            }
+
+            pub fn fetchMax(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+                return self.rmw(.Max, value, ordering);
+            }
+        });
+
+        pub usingnamespace exportWhen(std.meta.trait.isIntegral(T), struct {
+            pub fn fetchAnd(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+                return self.rmw(.And, value, ordering);
+            }
+
+            pub fn fetchNand(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+                return self.rmw(.Nand, value, ordering);
+            }
+
+            pub fn fetchOr(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+                return self.rmw(.Or, value, ordering);
+            }
+
+            pub fn fetchXor(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T {
+                return self.rmw(.Xor, value, ordering);
+            }
+
+            const Bit = std.math.Log2Int(T);
+            const BitRmwOp = enum {
+                Set,
+                Reset,
+                Toggle,
+            };
+
+            pub fn bitSet(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 {
+                return bitRmw(self, .Set, bit, ordering);
+            }
+
+            pub fn bitReset(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 {
+                return bitRmw(self, .Reset, bit, ordering);
+            }
+
+            pub fn bitToggle(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 {
+                return bitRmw(self, .Toggle, bit, ordering);
+            }
+
+            fn bitRmw(
+                self: *Self,
+                comptime op: BitRmwOp,
+                bit: Bit,
+                comptime ordering: Ordering,
+            ) callconv(.Inline) u1 {
+                // x86 supports dedicated bitwise instructions
+                if (comptime target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) {
+                    const instruction = switch (op) {
+                        .Set => "lock bts",
+                        .Reset => "lock btr",
+                        .Toggle => "lock btc",
+                    };
+
+                    const suffix = switch (@sizeOf(T)) {
+                        2 => "w",
+                        4 => "l",
+                        8 => "q",
+                        else => @compileError("Invalid atomic type " ++ @typeName(T)),
+                    };
+
+                    const old_bit = asm volatile (instruction ++ suffix ++ " %[bit], %[ptr]"
+                        : [result] "={@ccc}" (-> u8) // LLVM doesn't support u1 flag register return values
+                        : [ptr] "*p" (&self.value),
+                          [bit] "X" (@as(T, bit))
+                        : "cc", "memory"
+                    );
+
+                    return @intCast(u1, old_bit);
+                }
+
+                const mask = @as(T, 1) << bit;
+                const value = switch (op) {
+                    .Set => self.fetchOr(mask, ordering),
+                    .Reset => self.fetchAnd(~mask, ordering),
+                    .Toggle => self.fetchXor(mask, ordering),
+                };
+
+                return @boolToInt(value & mask != 0);
+            }
+        });
+    };
+}
+
+fn atomicIntTypes() []const type {
+    comptime var bytes = 1;
+    comptime var types: []const type = &[_]type{};
+    inline while (bytes <= @sizeOf(usize)) : (bytes *= 2) {
+        types = types ++ &[_]type{std.meta.Int(.unsigned, bytes * 8)};
+    }
+    return types;
+}
+
+test "Atomic.loadUnchecked" {
+    inline for (atomicIntTypes()) |Int| {
+        var x = Atomic(Int).init(5);
+        try testing.expectEqual(x.loadUnchecked(), 5);
+    }
+}
+
+test "Atomic.storeUnchecked" {
+    inline for (atomicIntTypes()) |Int| {
+        var x = Atomic(usize).init(5);
+        x.storeUnchecked(10);
+        try testing.expectEqual(x.loadUnchecked(), 10);
+    }
+}
+
+test "Atomic.load" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (.{ .Unordered, .Monotonic, .Acquire, .SeqCst }) |ordering| {
+            var x = Atomic(Int).init(5);
+            try testing.expectEqual(x.load(ordering), 5);
+        }
+    }
+}
+
+test "Atomic.store" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (.{ .Unordered, .Monotonic, .Release, .SeqCst }) |ordering| {
+            var x = Atomic(usize).init(5);
+            x.store(10, ordering);
+            try testing.expectEqual(x.load(.SeqCst), 10);
+        }
+    }
+}
+
+const atomic_rmw_orderings = [_]Ordering{
+    .Monotonic,
+    .Acquire,
+    .Release,
+    .AcqRel,
+    .SeqCst,
+};
+
+test "Atomic.swap" {
+    inline for (atomic_rmw_orderings) |ordering| {
+        var x = Atomic(usize).init(5);
+        try testing.expectEqual(x.swap(10, ordering), 5);
+        try testing.expectEqual(x.load(.SeqCst), 10);
+
+        var y = Atomic(enum(usize) { a, b, c }).init(.c);
+        try testing.expectEqual(y.swap(.a, ordering), .c);
+        try testing.expectEqual(y.load(.SeqCst), .a);
+
+        var z = Atomic(f32).init(5.0);
+        try testing.expectEqual(z.swap(10.0, ordering), 5.0);
+        try testing.expectEqual(z.load(.SeqCst), 10.0);
+
+        var a = Atomic(bool).init(false);
+        try testing.expectEqual(a.swap(true, ordering), false);
+        try testing.expectEqual(a.load(.SeqCst), true);
+
+        var b = Atomic(?*u8).init(null);
+        try testing.expectEqual(b.swap(@intToPtr(?*u8, @alignOf(u8)), ordering), null);
+        try testing.expectEqual(b.load(.SeqCst), @intToPtr(?*u8, @alignOf(u8)));
+    }
+}
+
+const atomic_cmpxchg_orderings = [_][2]Ordering{
+    .{ .Monotonic, .Monotonic },
+    .{ .Acquire, .Monotonic },
+    .{ .Acquire, .Acquire },
+    .{ .Release, .Monotonic },
+    // Although accepted by LLVM, acquire failure implies AcqRel success
+    // .{ .Release, .Acquire },
+    .{ .AcqRel, .Monotonic },
+    .{ .AcqRel, .Acquire },
+    .{ .SeqCst, .Monotonic },
+    .{ .SeqCst, .Acquire },
+    .{ .SeqCst, .SeqCst },
+};
+
+test "Atomic.compareAndSwap" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_cmpxchg_orderings) |ordering| {
+            var x = Atomic(Int).init(0);
+            try testing.expectEqual(x.compareAndSwap(1, 0, ordering[0], ordering[1]), 0);
+            try testing.expectEqual(x.load(.SeqCst), 0);
+            try testing.expectEqual(x.compareAndSwap(0, 1, ordering[0], ordering[1]), null);
+            try testing.expectEqual(x.load(.SeqCst), 1);
+            try testing.expectEqual(x.compareAndSwap(1, 0, ordering[0], ordering[1]), null);
+            try testing.expectEqual(x.load(.SeqCst), 0);
+        }
+    }
+}
+
+test "Atomic.tryCompareAndSwap" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_cmpxchg_orderings) |ordering| {
+            var x = Atomic(Int).init(0);
+
+            try testing.expectEqual(x.tryCompareAndSwap(1, 0, ordering[0], ordering[1]), 0);
+            try testing.expectEqual(x.load(.SeqCst), 0);
+
+            while (x.tryCompareAndSwap(0, 1, ordering[0], ordering[1])) |_| {}
+            try testing.expectEqual(x.load(.SeqCst), 1);
+
+            while (x.tryCompareAndSwap(1, 0, ordering[0], ordering[1])) |_| {}
+            try testing.expectEqual(x.load(.SeqCst), 0);
+        }
+    }
+}
+
+test "Atomic.fetchAdd" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(5);
+            try testing.expectEqual(x.fetchAdd(5, ordering), 5);
+            try testing.expectEqual(x.load(.SeqCst), 10);
+            try testing.expectEqual(x.fetchAdd(std.math.maxInt(Int), ordering), 10);
+            try testing.expectEqual(x.load(.SeqCst), 9);
+        }
+    }
+}
+
+test "Atomic.fetchSub" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(5);
+            try testing.expectEqual(x.fetchSub(5, ordering), 5);
+            try testing.expectEqual(x.load(.SeqCst), 0);
+            try testing.expectEqual(x.fetchSub(1, ordering), 0);
+            try testing.expectEqual(x.load(.SeqCst), std.math.maxInt(Int));
+        }
+    }
+}
+
+test "Atomic.fetchMin" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(5);
+            try testing.expectEqual(x.fetchMin(0, ordering), 5);
+            try testing.expectEqual(x.load(.SeqCst), 0);
+            try testing.expectEqual(x.fetchMin(10, ordering), 0);
+            try testing.expectEqual(x.load(.SeqCst), 0);
+        }
+    }
+}
+
+test "Atomic.fetchMax" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(5);
+            try testing.expectEqual(x.fetchMax(10, ordering), 5);
+            try testing.expectEqual(x.load(.SeqCst), 10);
+            try testing.expectEqual(x.fetchMax(5, ordering), 10);
+            try testing.expectEqual(x.load(.SeqCst), 10);
+        }
+    }
+}
+
+test "Atomic.fetchAnd" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(0b11);
+            try testing.expectEqual(x.fetchAnd(0b10, ordering), 0b11);
+            try testing.expectEqual(x.load(.SeqCst), 0b10);
+            try testing.expectEqual(x.fetchAnd(0b00, ordering), 0b10);
+            try testing.expectEqual(x.load(.SeqCst), 0b00);
+        }
+    }
+}
+
+test "Atomic.fetchNand" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(0b11);
+            try testing.expectEqual(x.fetchNand(0b10, ordering), 0b11);
+            try testing.expectEqual(x.load(.SeqCst), ~@as(Int, 0b10));
+            try testing.expectEqual(x.fetchNand(0b00, ordering), ~@as(Int, 0b10));
+            try testing.expectEqual(x.load(.SeqCst), ~@as(Int, 0b00));
+        }
+    }
+}
+
+test "Atomic.fetchOr" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(0b11);
+            try testing.expectEqual(x.fetchOr(0b100, ordering), 0b11);
+            try testing.expectEqual(x.load(.SeqCst), 0b111);
+            try testing.expectEqual(x.fetchOr(0b010, ordering), 0b111);
+            try testing.expectEqual(x.load(.SeqCst), 0b111);
+        }
+    }
+}
+
+test "Atomic.fetchXor" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(0b11);
+            try testing.expectEqual(x.fetchXor(0b10, ordering), 0b11);
+            try testing.expectEqual(x.load(.SeqCst), 0b01);
+            try testing.expectEqual(x.fetchXor(0b01, ordering), 0b01);
+            try testing.expectEqual(x.load(.SeqCst), 0b00);
+        }
+    }
+}
+
+test "Atomic.bitSet" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(0);
+            const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
+
+            for (bit_array) |_, bit_index| {
+                const bit = @intCast(std.math.Log2Int(Int), bit_index);
+                const mask = @as(Int, 1) << bit;
+
+                // setting the bit should change the bit
+                try testing.expect(x.load(.SeqCst) & mask == 0);
+                try testing.expectEqual(x.bitSet(bit, ordering), 0);
+                try testing.expect(x.load(.SeqCst) & mask != 0);
+
+                // setting it again shouldn't change the bit
+                try testing.expectEqual(x.bitSet(bit, ordering), 1);
+                try testing.expect(x.load(.SeqCst) & mask != 0);
+
+                // all the previous bits should have not changed (still be set)
+                for (bit_array[0..bit_index]) |_, prev_bit_index| {
+                    const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+                    const prev_mask = @as(Int, 1) << prev_bit;
+                    try testing.expect(x.load(.SeqCst) & prev_mask != 0);
+                }
+            }
+        }
+    }
+}
+
+test "Atomic.bitReset" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(0);
+            const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
+
+            for (bit_array) |_, bit_index| {
+                const bit = @intCast(std.math.Log2Int(Int), bit_index);
+                const mask = @as(Int, 1) << bit;
+                x.storeUnchecked(x.loadUnchecked() | mask);
+
+                // unsetting the bit should change the bit
+                try testing.expect(x.load(.SeqCst) & mask != 0);
+                try testing.expectEqual(x.bitReset(bit, ordering), 1);
+                try testing.expect(x.load(.SeqCst) & mask == 0);
+
+                // unsetting it again shouldn't change the bit
+                try testing.expectEqual(x.bitReset(bit, ordering), 0);
+                try testing.expect(x.load(.SeqCst) & mask == 0);
+
+                // all the previous bits should have not changed (still be reset)
+                for (bit_array[0..bit_index]) |_, prev_bit_index| {
+                    const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+                    const prev_mask = @as(Int, 1) << prev_bit;
+                    try testing.expect(x.load(.SeqCst) & prev_mask == 0);
+                }
+            }
+        }
+    }
+}
+
+test "Atomic.bitToggle" {
+    inline for (atomicIntTypes()) |Int| {
+        inline for (atomic_rmw_orderings) |ordering| {
+            var x = Atomic(Int).init(0);
+            const bit_array = @as([std.meta.bitCount(Int)]void, undefined);
+
+            for (bit_array) |_, bit_index| {
+                const bit = @intCast(std.math.Log2Int(Int), bit_index);
+                const mask = @as(Int, 1) << bit;
+
+                // toggling the bit should change the bit
+                try testing.expect(x.load(.SeqCst) & mask == 0);
+                try testing.expectEqual(x.bitToggle(bit, ordering), 0);
+                try testing.expect(x.load(.SeqCst) & mask != 0);
+
+                // toggling it again *should* change the bit
+                try testing.expectEqual(x.bitToggle(bit, ordering), 1);
+                try testing.expect(x.load(.SeqCst) & mask == 0);
+
+                // all the previous bits should have not changed (still be toggled back)
+                for (bit_array[0..bit_index]) |_, prev_bit_index| {
+                    const prev_bit = @intCast(std.math.Log2Int(Int), prev_bit_index);
+                    const prev_mask = @as(Int, 1) << prev_bit;
+                    try testing.expect(x.load(.SeqCst) & prev_mask == 0);
+                }
+            }
+        }
+    }
+}
lib/std/atomic/bool.zig
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: MIT
-// Copyright (c) 2015-2021 Zig Contributors
-// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
-// The MIT license requires this copyright notice to be included in all copies
-// and substantial portions of the software.
-
-const std = @import("std");
-const builtin = std.builtin;
-const testing = std.testing;
-
-/// Thread-safe, lock-free boolean
-pub const Bool = extern struct {
-    unprotected_value: bool,
-
-    pub const Self = @This();
-
-    pub fn init(init_val: bool) Self {
-        return Self{ .unprotected_value = init_val };
-    }
-
-    // xchg is only valid rmw operation for a bool
-    /// Atomically modifies memory and then returns the previous value.
-    pub fn xchg(self: *Self, operand: bool, comptime ordering: std.builtin.AtomicOrder) bool {
-        switch (ordering) {
-            .Monotonic, .Acquire, .Release, .AcqRel, .SeqCst => {},
-            else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a RMW operation"),
-        }
-        return @atomicRmw(bool, &self.unprotected_value, .Xchg, operand, ordering);
-    }
-
-    pub fn load(self: *const Self, comptime ordering: std.builtin.AtomicOrder) bool {
-        switch (ordering) {
-            .Unordered, .Monotonic, .Acquire, .SeqCst => {},
-            else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a load operation"),
-        }
-        return @atomicLoad(bool, &self.unprotected_value, ordering);
-    }
-
-    pub fn store(self: *Self, value: bool, comptime ordering: std.builtin.AtomicOrder) void {
-        switch (ordering) {
-            .Unordered, .Monotonic, .Release, .SeqCst => {},
-            else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a store operation"),
-        }
-        @atomicStore(bool, &self.unprotected_value, value, ordering);
-    }
-};
-
-test "std.atomic.Bool" {
-    var a = Bool.init(false);
-    try testing.expectEqual(false, a.xchg(false, .SeqCst));
-    try testing.expectEqual(false, a.load(.SeqCst));
-    a.store(true, .SeqCst);
-    try testing.expectEqual(true, a.xchg(false, .SeqCst));
-    try testing.expectEqual(false, a.load(.SeqCst));
-}
lib/std/atomic/int.zig
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: MIT
-// Copyright (c) 2015-2021 Zig Contributors
-// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
-// The MIT license requires this copyright notice to be included in all copies
-// and substantial portions of the software.
-
-const std = @import("std");
-const builtin = std.builtin;
-const testing = std.testing;
-
-/// Thread-safe, lock-free integer
-pub fn Int(comptime T: type) type {
-    if (!std.meta.trait.isIntegral(T))
-        @compileError("Expected integral type, got '" ++ @typeName(T) ++ "'");
-
-    return extern struct {
-        unprotected_value: T,
-
-        pub const Self = @This();
-
-        pub fn init(init_val: T) Self {
-            return Self{ .unprotected_value = init_val };
-        }
-
-        /// Read, Modify, Write
-        pub fn rmw(self: *Self, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T {
-            switch (ordering) {
-                .Monotonic, .Acquire, .Release, .AcqRel, .SeqCst => {},
-                else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a RMW operation"),
-            }
-            return @atomicRmw(T, &self.unprotected_value, op, operand, ordering);
-        }
-
-        pub fn load(self: *const Self, comptime ordering: builtin.AtomicOrder) T {
-            switch (ordering) {
-                .Unordered, .Monotonic, .Acquire, .SeqCst => {},
-                else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a load operation"),
-            }
-            return @atomicLoad(T, &self.unprotected_value, ordering);
-        }
-
-        pub fn store(self: *Self, value: T, comptime ordering: builtin.AtomicOrder) void {
-            switch (ordering) {
-                .Unordered, .Monotonic, .Release, .SeqCst => {},
-                else => @compileError("Invalid ordering '" ++ @tagName(ordering) ++ "' for a store operation"),
-            }
-            @atomicStore(T, &self.unprotected_value, value, ordering);
-        }
-
-        /// Twos complement wraparound increment
-        /// Returns previous value
-        pub fn incr(self: *Self) T {
-            return self.rmw(.Add, 1, .SeqCst);
-        }
-
-        /// Twos complement wraparound decrement
-        /// Returns previous value
-        pub fn decr(self: *Self) T {
-            return self.rmw(.Sub, 1, .SeqCst);
-        }
-
-        pub fn get(self: *const Self) T {
-            return self.load(.SeqCst);
-        }
-
-        pub fn set(self: *Self, new_value: T) void {
-            self.store(new_value, .SeqCst);
-        }
-
-        pub fn xchg(self: *Self, new_value: T) T {
-            return self.rmw(.Xchg, new_value, .SeqCst);
-        }
-
-        /// Twos complement wraparound add
-        /// Returns previous value
-        pub fn fetchAdd(self: *Self, op: T) T {
-            return self.rmw(.Add, op, .SeqCst);
-        }
-    };
-}
-
-test "std.atomic.Int" {
-    var a = Int(u8).init(0);
-    try testing.expectEqual(@as(u8, 0), a.incr());
-    try testing.expectEqual(@as(u8, 1), a.load(.SeqCst));
-    a.store(42, .SeqCst);
-    try testing.expectEqual(@as(u8, 42), a.decr());
-    try testing.expectEqual(@as(u8, 41), a.xchg(100));
-    try testing.expectEqual(@as(u8, 100), a.fetchAdd(5));
-    try testing.expectEqual(@as(u8, 105), a.get());
-    a.set(200);
-}
lib/std/Thread/Condition.zig
@@ -115,7 +115,7 @@ pub const AtomicCondition = struct {
                             else => unreachable,
                         }
                     },
-                    else => spinLoopHint(),
+                    else => std.atomic.spinLoopHint(),
                 }
             }
         }
lib/std/Thread/Mutex.zig
@@ -126,7 +126,7 @@ pub const AtomicMutex = struct {
 
             var iter = std.math.min(32, spin + 1);
             while (iter > 0) : (iter -= 1)
-                std.Thread.spinLoopHint();
+                std.atomic.spinLoopHint();
         }
 
         new_state = .waiting;
@@ -149,7 +149,7 @@ pub const AtomicMutex = struct {
                         else => unreachable,
                     }
                 },
-                else => std.Thread.spinLoopHint(),
+                else => std.atomic.spinLoopHint(),
             }
         }
     }
lib/std/Thread/StaticResetEvent.zig
@@ -182,7 +182,7 @@ pub const AtomicEvent = struct {
                 timer = time.Timer.start() catch return error.TimedOut;
 
             while (@atomicLoad(u32, waiters, .Acquire) != WAKE) {
-                std.os.sched_yield() catch std.Thread.spinLoopHint();
+                std.os.sched_yield() catch std.atomic.spinLoopHint();
                 if (timeout) |timeout_ns| {
                     if (timer.read() >= timeout_ns)
                         return error.TimedOut;
@@ -293,7 +293,7 @@ pub const AtomicEvent = struct {
                         return @intToPtr(?windows.HANDLE, handle);
                     },
                     LOADING => {
-                        std.os.sched_yield() catch std.Thread.spinLoopHint();
+                        std.os.sched_yield() catch std.atomic.spinLoopHint();
                         handle = @atomicLoad(usize, &event_handle, .Monotonic);
                     },
                     else => {
lib/std/atomic.zig
@@ -3,14 +3,82 @@
 // This file is part of [zig](https://ziglang.org/), which is MIT licensed.
 // The MIT license requires this copyright notice to be included in all copies
 // and substantial portions of the software.
+
+const std = @import("std.zig");
+const target = std.Target.current;
+
+pub const Ordering = std.builtin.AtomicOrder;
+
 pub const Stack = @import("atomic/stack.zig").Stack;
 pub const Queue = @import("atomic/queue.zig").Queue;
-pub const Bool = @import("atomic/bool.zig").Bool;
-pub const Int = @import("atomic/int.zig").Int;
+pub const Atomic = @import("atomic/Atomic.zig").Atomic;
 
 test "std.atomic" {
     _ = @import("atomic/stack.zig");
     _ = @import("atomic/queue.zig");
-    _ = @import("atomic/bool.zig");
-    _ = @import("atomic/int.zig");
+    _ = @import("atomic/Atomic.zig");
+}
+
+pub fn fence(comptime ordering: Ordering) callconv(.Inline) void {
+    switch (ordering) {
+        .Acquire, .Release, .AcqRel, .SeqCst => {
+            @fence(ordering);
+        },
+        else => {
+            @compileLog(ordering, " only applies to a given memory location");
+        },
+    }
+}
+
+pub fn compilerFence(comptime ordering: Ordering) callconv(.Inline) void {
+    switch (ordering) {
+        .Acquire, .Release, .AcqRel, .SeqCst => asm volatile ("" ::: "memory"),
+        else => @compileLog(ordering, " only applies to a given memory location"),
+    }
+}
+
+test "fence/compilerFence" {
+    inline for (.{ .Acquire, .Release, .AcqRel, .SeqCst }) |ordering| {
+        compilerFence(ordering);
+        fence(ordering);
+    }
+}
+
+/// Signals to the processor that the caller is inside a busy-wait spin-loop.
+pub fn spinLoopHint() callconv(.Inline) void {
+    const hint_instruction = switch (target.cpu.arch) {
+        // No-op instruction that can hint to save (or share with a hardware-thread) pipelining/power resources
+        // https://software.intel.com/content/www/us/en/develop/articles/benefitting-power-and-performance-sleep-loops.html
+        .i386, .x86_64 => "pause",
+
+        // No-op instruction that serves as a hardware-thread resource yield hint.
+        // https://stackoverflow.com/a/7588941
+        .powerpc64, .powerpc64le => "or 27, 27, 27",
+
+        // `isb` appears more reliable for releasing execution resources than `yield` on common aarch64 CPUs.
+        // https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8258604
+        // https://bugs.mysql.com/bug.php?id=100664
+        .aarch64, .aarch64_be, .aarch64_32 => "isb",
+
+        // `yield` was introduced in v6k but is also available on v6m.
+        // https://www.keil.com/support/man/docs/armasm/armasm_dom1361289926796.htm
+        .arm, .armeb, .thumb, .thumbeb => blk: {
+            const can_yield = comptime std.Target.arm.featureSetHasAny(target.cpu.features, .{ .has_v6k, .has_v6m });
+            const instruction = if (can_yield) "yield" else "";
+            break :blk instruction;
+        },
+
+        else => "",
+    };
+
+    // Memory barrier to prevent the compiler from optimizing away the spin-loop
+    // even if no hint_instruction was provided.
+    asm volatile (hint_instruction ::: "memory");
+}
+
+test "spinLoopHint" {
+    var i: usize = 10;
+    while (i > 0) : (i -= 1) {
+        spinLoopHint();
+    }
 }
lib/std/json.zig
@@ -2111,7 +2111,10 @@ test "parse into struct with duplicate field" {
     const ballast = try testing.allocator.alloc(u64, 1);
     defer testing.allocator.free(ballast);
 
-    const options_first = ParseOptions{ .allocator = testing.allocator, .duplicate_field_behavior = .UseFirst };
+    const options_first = ParseOptions{
+        .allocator = testing.allocator,
+        .duplicate_field_behavior = .UseFirst,
+    };
 
     const options_last = ParseOptions{
         .allocator = testing.allocator,
lib/std/os.zig
@@ -5534,7 +5534,7 @@ pub const CopyFileRangeError = error{
 
 var has_copy_file_range_syscall = init: {
     const kernel_has_syscall = std.Target.current.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) orelse true;
-    break :init std.atomic.Bool.init(kernel_has_syscall);
+    break :init std.atomic.Atomic(bool).init(kernel_has_syscall);
 };
 
 /// Transfer data between file descriptors at specified offsets.
lib/std/packed_int_array.zig
@@ -379,9 +379,9 @@ test "PackedIntArray" {
 }
 
 test "PackedIntIo" {
-    const bytes = [_]u8 { 0b01101_000, 0b01011_110, 0b00011_101 };
-    try testing.expectEqual(@as(u15,  0x2bcd), PackedIntIo(u15, .Little).get(&bytes, 0, 3));
-    try testing.expectEqual(@as(u16,  0xabcd), PackedIntIo(u16, .Little).get(&bytes, 0, 3));
+    const bytes = [_]u8{ 0b01101_000, 0b01011_110, 0b00011_101 };
+    try testing.expectEqual(@as(u15, 0x2bcd), PackedIntIo(u15, .Little).get(&bytes, 0, 3));
+    try testing.expectEqual(@as(u16, 0xabcd), PackedIntIo(u16, .Little).get(&bytes, 0, 3));
     try testing.expectEqual(@as(u17, 0x1abcd), PackedIntIo(u17, .Little).get(&bytes, 0, 3));
     try testing.expectEqual(@as(u18, 0x3abcd), PackedIntIo(u18, .Little).get(&bytes, 0, 3));
 }
lib/std/target.zig
@@ -767,6 +767,13 @@ pub const Target = struct {
             spirv32,
             spirv64,
 
+            pub fn isX86(arch: Arch) bool {
+                return switch (arch) {
+                    .i386, .x86_64 => true,
+                    else => false,
+                };
+            }
+
             pub fn isARM(arch: Arch) bool {
                 return switch (arch) {
                     .arm, .armeb => true,
lib/std/Thread.zig
@@ -67,33 +67,7 @@ else switch (std.Target.current.os.tag) {
     else => struct {},
 };
 
-/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
-pub inline fn spinLoopHint() void {
-    switch (std.Target.current.cpu.arch) {
-        .i386, .x86_64 => {
-            asm volatile ("pause" ::: "memory");
-        },
-        .arm, .armeb, .thumb, .thumbeb => {
-            // `yield` was introduced in v6k but are also available on v6m.
-            const can_yield = comptime std.Target.arm.featureSetHasAny(std.Target.current.cpu.features, .{ .has_v6k, .has_v6m });
-            if (can_yield) asm volatile ("yield" ::: "memory")
-            // Fallback.
-            else asm volatile ("" ::: "memory");
-        },
-        .aarch64, .aarch64_be, .aarch64_32 => {
-            asm volatile ("isb" ::: "memory");
-        },
-        .powerpc64, .powerpc64le => {
-            // No-op that serves as `yield` hint.
-            asm volatile ("or 27, 27, 27" ::: "memory");
-        },
-        else => {
-            // Do nothing but prevent the compiler from optimizing away the
-            // spinning loop.
-            asm volatile ("" ::: "memory");
-        },
-    }
-}
+pub const spinLoopHint = @compileError("deprecated: use std.atomic.spinLoopHint");
 
 /// Returns the ID of the calling thread.
 /// Makes a syscall every time the function is called.
@@ -597,8 +571,13 @@ pub fn getCurrentThreadId() u64 {
     }
 }
 
-test {
+test "std.Thread" {
     if (!builtin.single_threaded) {
-        std.testing.refAllDecls(@This());
+        _ = AutoResetEvent;
+        _ = ResetEvent;
+        _ = StaticResetEvent;
+        _ = Mutex;
+        _ = Semaphore;
+        _ = Condition;
     }
 }
src/BuiltinFn.zig
@@ -400,7 +400,7 @@ pub const list = list: {
             "@fence",
             .{
                 .tag = .fence,
-                .param_count = 0,
+                .param_count = 1,
             },
         },
         .{
CMakeLists.txt
@@ -337,8 +337,7 @@ set(ZIG_STAGE2_SOURCES
     "${CMAKE_SOURCE_DIR}/lib/std/array_list.zig"
     "${CMAKE_SOURCE_DIR}/lib/std/ascii.zig"
     "${CMAKE_SOURCE_DIR}/lib/std/atomic.zig"
-    "${CMAKE_SOURCE_DIR}/lib/std/atomic/bool.zig"
-    "${CMAKE_SOURCE_DIR}/lib/std/atomic/int.zig"
+    "${CMAKE_SOURCE_DIR}/lib/std/atomic/Atomic.zig"
     "${CMAKE_SOURCE_DIR}/lib/std/atomic/queue.zig"
     "${CMAKE_SOURCE_DIR}/lib/std/atomic/stack.zig"
     "${CMAKE_SOURCE_DIR}/lib/std/base64.zig"