Commit c2b1c88953

Andrew Kelley <andrew@ziglang.org>
2020-12-24 04:35:53
std: fix compile errors introduced in previous commit
1 parent 177377b
lib/std/fs/test.zig
@@ -758,7 +758,8 @@ test "open file with exclusive lock twice, make sure it waits" {
         }
     };
 
-    var evt = std.ResetEvent.init();
+    var evt: std.ResetEvent = undefined;
+    try evt.init();
     defer evt.deinit();
 
     const t = try std.Thread.spawn(S.C{ .dir = &tmp.dir, .evt = &evt }, S.checkFn);
lib/std/auto_reset_event.zig
@@ -7,14 +7,15 @@ const std = @import("std.zig");
 const builtin = @import("builtin");
 const testing = std.testing;
 const assert = std.debug.assert;
+const StaticResetEvent = std.StaticResetEvent;
 
-/// Similar to std.ResetEvent but on `set()` it also (atomically) does `reset()`.
-/// Unlike std.ResetEvent, `wait()` can only be called by one thread (MPSC-like).
+/// Similar to `StaticResetEvent` but on `set()` it also (atomically) does `reset()`.
+/// Unlike StaticResetEvent, `wait()` can only be called by one thread (MPSC-like).
 pub const AutoResetEvent = struct {
     /// AutoResetEvent has 3 possible states:
     /// - UNSET: the AutoResetEvent is currently unset
     /// - SET: the AutoResetEvent was notified before a wait() was called
-    /// - <std.ResetEvent pointer>: there is an active waiter waiting for a notification.
+    /// - <StaticResetEvent pointer>: there is an active waiter waiting for a notification.
     ///
     /// When attempting to wait:
     ///  if the event is unset, it registers a ResetEvent pointer to be notified when the event is set
@@ -25,20 +26,20 @@ pub const AutoResetEvent = struct {
     ///  if theres a waiting ResetEvent, then we unset the event and notify the ResetEvent
     ///
     /// This ensures that the event is automatically reset after a wait() has been issued
-    /// and avoids the race condition when using std.ResetEvent in the following scenario:
-    ///  thread 1                | thread 2
-    ///  std.ResetEvent.wait()   |
-    ///                          | std.ResetEvent.set()
-    ///                          | std.ResetEvent.set()
-    ///  std.ResetEvent.reset()  |
-    ///  std.ResetEvent.wait()   | (missed the second .set() notification above)
+    /// and avoids the race condition when using StaticResetEvent in the following scenario:
+    ///  thread 1                  | thread 2
+    ///  StaticResetEvent.wait()   |
+    ///                            | StaticResetEvent.set()
+    ///                            | StaticResetEvent.set()
+    ///  StaticResetEvent.reset()  |
+    ///  StaticResetEvent.wait()   | (missed the second .set() notification above)
     state: usize = UNSET,
 
     const UNSET = 0;
     const SET = 1;
 
-    /// the minimum alignment for the `*std.ResetEvent` created by wait*()
-    const event_align = std.math.max(@alignOf(std.ResetEvent), 2);
+    /// the minimum alignment for the `*StaticResetEvent` created by wait*()
+    const event_align = std.math.max(@alignOf(StaticResetEvent), 2);
 
     pub fn wait(self: *AutoResetEvent) void {
         self.waitFor(null) catch unreachable;
@@ -49,12 +50,9 @@ pub const AutoResetEvent = struct {
     }
 
     fn waitFor(self: *AutoResetEvent, timeout: ?u64) error{TimedOut}!void {
-        // lazily initialized std.ResetEvent
-        var reset_event: std.ResetEvent align(event_align) = undefined;
+        // lazily initialized StaticResetEvent
+        var reset_event: StaticResetEvent align(event_align) = undefined;
         var has_reset_event = false;
-        defer if (has_reset_event) {
-            reset_event.deinit();
-        };
 
         var state = @atomicLoad(usize, &self.state, .SeqCst);
         while (true) {
@@ -72,7 +70,7 @@ pub const AutoResetEvent = struct {
             // lazily initialize the ResetEvent if it hasn't been already
             if (!has_reset_event) {
                 has_reset_event = true;
-                reset_event = std.ResetEvent.init();
+                reset_event = .{};
             }
 
             // Since the AutoResetEvent currently isnt set,
@@ -97,9 +95,10 @@ pub const AutoResetEvent = struct {
             };
 
             // wait with a timeout and return if signalled via set()
-            if (reset_event.timedWait(timeout_ns)) |_| {
-                return;
-            } else |timed_out| {}
+            switch (reset_event.timedWait(timeout_ns)) {
+                .event_set => return,
+                .timed_out => {},
+            }
 
             // If we timed out, we need to transition the AutoResetEvent back to UNSET.
             // If we don't, then when we return, a set() thread could observe a pointer to an invalid ResetEvent.
@@ -164,7 +163,7 @@ pub const AutoResetEvent = struct {
                 continue;
             }
 
-            const reset_event = @intToPtr(*align(event_align) std.ResetEvent, state);
+            const reset_event = @intToPtr(*align(event_align) StaticResetEvent, state);
             reset_event.set();
             return;
         }
lib/std/mutex.zig
@@ -10,7 +10,7 @@ const assert = std.debug.assert;
 const windows = os.windows;
 const testing = std.testing;
 const SpinLock = std.SpinLock;
-const ResetEvent = std.ResetEvent;
+const StaticResetEvent = std.StaticResetEvent;
 
 /// Lock may be held only once. If the same thread tries to acquire
 /// the same mutex twice, it deadlocks.  This type supports static
@@ -54,7 +54,7 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
 
         const Node = struct {
             next: ?*Node,
-            event: ResetEvent,
+            event: StaticResetEvent,
         };
 
         pub fn tryAcquire(self: *Mutex) ?Held {
@@ -90,11 +90,12 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
                     state = @atomicLoad(usize, &self.state, .Monotonic);
                 }
 
-                // create the ResetEvent node on the stack
+                // create the StaticResetEvent node on the stack
                 // (faster than threadlocal on platforms like OSX)
-                var node: Node = undefined;
-                node.event = ResetEvent.init();
-                defer node.event.deinit();
+                var node: Node = .{
+                    .next = undefined,
+                    .event = .{},
+                };
 
                 // we've spun too long, try and add our node to the LIFO queue.
                 // if the mutex becomes available in the process, try and grab it instead.
@@ -284,7 +285,7 @@ const WindowsMutex = struct {
     fn acquireSlow(self: *WindowsMutex) Held {
         // try to use NT keyed events for blocking, falling back to spinlock if unavailable
         @setCold(true);
-        const handle = ResetEvent.Impl.Futex.getEventHandle() orelse return self.acquireSpinning();
+        const handle = StaticResetEvent.Impl.Futex.getEventHandle() orelse return self.acquireSpinning();
         const key = @ptrCast(*const c_void, &self.state.waiters);
 
         while (true) : (SpinLock.loopHint(1)) {
@@ -312,7 +313,7 @@ const WindowsMutex = struct {
         pub fn release(self: Held) void {
             // unlock without a rmw/cmpxchg instruction
             @atomicStore(u8, @ptrCast(*u8, &self.mutex.state.locked), 0, .Release);
-            const handle = ResetEvent.Impl.Futex.getEventHandle() orelse return;
+            const handle = StaticResetEvent.Impl.Futex.getEventHandle() orelse return;
             const key = @ptrCast(*const c_void, &self.mutex.state.waiters);
 
             while (true) : (SpinLock.loopHint(1)) {
lib/std/StaticResetEvent.zig
@@ -105,7 +105,7 @@ pub const DebugEvent = struct {
         }
     }
 
-    fn timedWait(ev: *DebugEvent, timeout: u64) TimedWaitResult {
+    pub fn timedWait(ev: *DebugEvent, timeout: u64) TimedWaitResult {
         switch (ev.state) {
             .unset => return .timed_out,
             .set => return .event_set,