master
  1//! A lock that supports one writer or many readers.
  2//! This API is for kernel threads, not evented I/O.
  3//! This API requires being initialized at runtime, and initialization
  4//! can fail. Once initialized, the core operations cannot fail.
  5
  6impl: Impl = .{},
  7
  8const RwLock = @This();
  9const std = @import("../std.zig");
 10const builtin = @import("builtin");
 11const assert = std.debug.assert;
 12const testing = std.testing;
 13
 14pub const Impl = if (builtin.single_threaded)
 15    SingleThreadedRwLock
 16else if (std.Thread.use_pthreads)
 17    PthreadRwLock
 18else
 19    DefaultRwLock;
 20
 21/// Attempts to obtain exclusive lock ownership.
 22/// Returns `true` if the lock is obtained, `false` otherwise.
 23pub fn tryLock(rwl: *RwLock) bool {
 24    return rwl.impl.tryLock();
 25}
 26
 27/// Blocks until exclusive lock ownership is acquired.
 28pub fn lock(rwl: *RwLock) void {
 29    return rwl.impl.lock();
 30}
 31
 32/// Releases a held exclusive lock.
 33/// Asserts the lock is held exclusively.
 34pub fn unlock(rwl: *RwLock) void {
 35    return rwl.impl.unlock();
 36}
 37
 38/// Attempts to obtain shared lock ownership.
 39/// Returns `true` if the lock is obtained, `false` otherwise.
 40pub fn tryLockShared(rwl: *RwLock) bool {
 41    return rwl.impl.tryLockShared();
 42}
 43
 44/// Obtains shared lock ownership.
 45/// Blocks if another thread has exclusive ownership.
 46/// May block if another thread is attempting to get exclusive ownership.
 47pub fn lockShared(rwl: *RwLock) void {
 48    return rwl.impl.lockShared();
 49}
 50
 51/// Releases a held shared lock.
 52pub fn unlockShared(rwl: *RwLock) void {
 53    return rwl.impl.unlockShared();
 54}
 55
 56/// Single-threaded applications use this for deadlock checks in
 57/// debug mode, and no-ops in release modes.
 58pub const SingleThreadedRwLock = struct {
 59    state: enum { unlocked, locked_exclusive, locked_shared } = .unlocked,
 60    shared_count: usize = 0,
 61
 62    /// Attempts to obtain exclusive lock ownership.
 63    /// Returns `true` if the lock is obtained, `false` otherwise.
 64    pub fn tryLock(rwl: *SingleThreadedRwLock) bool {
 65        switch (rwl.state) {
 66            .unlocked => {
 67                assert(rwl.shared_count == 0);
 68                rwl.state = .locked_exclusive;
 69                return true;
 70            },
 71            .locked_exclusive, .locked_shared => return false,
 72        }
 73    }
 74
 75    /// Blocks until exclusive lock ownership is acquired.
 76    pub fn lock(rwl: *SingleThreadedRwLock) void {
 77        assert(rwl.state == .unlocked); // deadlock detected
 78        assert(rwl.shared_count == 0); // corrupted state detected
 79        rwl.state = .locked_exclusive;
 80    }
 81
 82    /// Releases a held exclusive lock.
 83    /// Asserts the lock is held exclusively.
 84    pub fn unlock(rwl: *SingleThreadedRwLock) void {
 85        assert(rwl.state == .locked_exclusive);
 86        assert(rwl.shared_count == 0); // corrupted state detected
 87        rwl.state = .unlocked;
 88    }
 89
 90    /// Attempts to obtain shared lock ownership.
 91    /// Returns `true` if the lock is obtained, `false` otherwise.
 92    pub fn tryLockShared(rwl: *SingleThreadedRwLock) bool {
 93        switch (rwl.state) {
 94            .unlocked => {
 95                rwl.state = .locked_shared;
 96                assert(rwl.shared_count == 0);
 97                rwl.shared_count = 1;
 98                return true;
 99            },
100            .locked_shared => {
101                rwl.shared_count += 1;
102                return true;
103            },
104            .locked_exclusive => return false,
105        }
106    }
107
108    /// Blocks until shared lock ownership is acquired.
109    pub fn lockShared(rwl: *SingleThreadedRwLock) void {
110        switch (rwl.state) {
111            .unlocked => {
112                rwl.state = .locked_shared;
113                assert(rwl.shared_count == 0);
114                rwl.shared_count = 1;
115            },
116            .locked_shared => {
117                rwl.shared_count += 1;
118            },
119            .locked_exclusive => unreachable, // deadlock detected
120        }
121    }
122
123    /// Releases a held shared lock.
124    pub fn unlockShared(rwl: *SingleThreadedRwLock) void {
125        switch (rwl.state) {
126            .unlocked => unreachable, // too many calls to `unlockShared`
127            .locked_exclusive => unreachable, // exclusively held lock
128            .locked_shared => {
129                rwl.shared_count -= 1;
130                if (rwl.shared_count == 0) {
131                    rwl.state = .unlocked;
132                }
133            },
134        }
135    }
136};
137
138pub const PthreadRwLock = struct {
139    rwlock: std.c.pthread_rwlock_t = .{},
140
141    pub fn tryLock(rwl: *PthreadRwLock) bool {
142        return std.c.pthread_rwlock_trywrlock(&rwl.rwlock) == .SUCCESS;
143    }
144
145    pub fn lock(rwl: *PthreadRwLock) void {
146        const rc = std.c.pthread_rwlock_wrlock(&rwl.rwlock);
147        assert(rc == .SUCCESS);
148    }
149
150    pub fn unlock(rwl: *PthreadRwLock) void {
151        const rc = std.c.pthread_rwlock_unlock(&rwl.rwlock);
152        assert(rc == .SUCCESS);
153    }
154
155    pub fn tryLockShared(rwl: *PthreadRwLock) bool {
156        return std.c.pthread_rwlock_tryrdlock(&rwl.rwlock) == .SUCCESS;
157    }
158
159    pub fn lockShared(rwl: *PthreadRwLock) void {
160        const rc = std.c.pthread_rwlock_rdlock(&rwl.rwlock);
161        assert(rc == .SUCCESS);
162    }
163
164    pub fn unlockShared(rwl: *PthreadRwLock) void {
165        const rc = std.c.pthread_rwlock_unlock(&rwl.rwlock);
166        assert(rc == .SUCCESS);
167    }
168};
169
170pub const DefaultRwLock = struct {
171    state: usize = 0,
172    mutex: std.Thread.Mutex = .{},
173    semaphore: std.Thread.Semaphore = .{},
174
175    const IS_WRITING: usize = 1;
176    const WRITER: usize = 1 << 1;
177    const READER: usize = 1 << (1 + @bitSizeOf(Count));
178    const WRITER_MASK: usize = std.math.maxInt(Count) << @ctz(WRITER);
179    const READER_MASK: usize = std.math.maxInt(Count) << @ctz(READER);
180    const Count = std.meta.Int(.unsigned, @divFloor(@bitSizeOf(usize) - 1, 2));
181
182    pub fn tryLock(rwl: *DefaultRwLock) bool {
183        if (rwl.mutex.tryLock()) {
184            const state = @atomicLoad(usize, &rwl.state, .seq_cst);
185            if (state & READER_MASK == 0) {
186                _ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .seq_cst);
187                return true;
188            }
189
190            rwl.mutex.unlock();
191        }
192
193        return false;
194    }
195
196    pub fn lock(rwl: *DefaultRwLock) void {
197        _ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .seq_cst);
198        rwl.mutex.lock();
199
200        const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .seq_cst);
201        if (state & READER_MASK != 0)
202            rwl.semaphore.wait();
203    }
204
205    pub fn unlock(rwl: *DefaultRwLock) void {
206        _ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .seq_cst);
207        rwl.mutex.unlock();
208    }
209
210    pub fn tryLockShared(rwl: *DefaultRwLock) bool {
211        const state = @atomicLoad(usize, &rwl.state, .seq_cst);
212        if (state & (IS_WRITING | WRITER_MASK) == 0) {
213            _ = @cmpxchgStrong(
214                usize,
215                &rwl.state,
216                state,
217                state + READER,
218                .seq_cst,
219                .seq_cst,
220            ) orelse return true;
221        }
222
223        if (rwl.mutex.tryLock()) {
224            _ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
225            rwl.mutex.unlock();
226            return true;
227        }
228
229        return false;
230    }
231
232    pub fn lockShared(rwl: *DefaultRwLock) void {
233        var state = @atomicLoad(usize, &rwl.state, .seq_cst);
234        while (state & (IS_WRITING | WRITER_MASK) == 0) {
235            state = @cmpxchgWeak(
236                usize,
237                &rwl.state,
238                state,
239                state + READER,
240                .seq_cst,
241                .seq_cst,
242            ) orelse return;
243        }
244
245        rwl.mutex.lock();
246        _ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
247        rwl.mutex.unlock();
248    }
249
250    pub fn unlockShared(rwl: *DefaultRwLock) void {
251        const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .seq_cst);
252
253        if ((state & READER_MASK == READER) and (state & IS_WRITING != 0))
254            rwl.semaphore.post();
255    }
256};
257
258test "DefaultRwLock - internal state" {
259    var rwl = DefaultRwLock{};
260
261    // The following failed prior to the fix for Issue #13163,
262    // where the WRITER flag was subtracted by the lock method.
263
264    rwl.lock();
265    rwl.unlock();
266    try testing.expectEqual(rwl, DefaultRwLock{});
267}
268
269test "smoke test" {
270    var rwl = RwLock{};
271
272    rwl.lock();
273    try testing.expect(!rwl.tryLock());
274    try testing.expect(!rwl.tryLockShared());
275    rwl.unlock();
276
277    try testing.expect(rwl.tryLock());
278    try testing.expect(!rwl.tryLock());
279    try testing.expect(!rwl.tryLockShared());
280    rwl.unlock();
281
282    rwl.lockShared();
283    try testing.expect(!rwl.tryLock());
284    try testing.expect(rwl.tryLockShared());
285    rwl.unlockShared();
286    rwl.unlockShared();
287
288    try testing.expect(rwl.tryLockShared());
289    try testing.expect(!rwl.tryLock());
290    try testing.expect(rwl.tryLockShared());
291    rwl.unlockShared();
292    rwl.unlockShared();
293
294    rwl.lock();
295    rwl.unlock();
296}
297
298test "concurrent access" {
299    if (builtin.single_threaded)
300        return;
301
302    const num_writers: usize = 2;
303    const num_readers: usize = 4;
304    const num_writes: usize = 1000;
305    const num_reads: usize = 2000;
306
307    const Runner = struct {
308        const Runner = @This();
309
310        rwl: RwLock,
311        writes: usize,
312        reads: std.atomic.Value(usize),
313
314        val_a: usize,
315        val_b: usize,
316
317        fn reader(run: *Runner, thread_idx: usize) !void {
318            var prng = std.Random.DefaultPrng.init(thread_idx);
319            const rnd = prng.random();
320            while (true) {
321                run.rwl.lockShared();
322                defer run.rwl.unlockShared();
323
324                try testing.expect(run.writes <= num_writes);
325                if (run.reads.fetchAdd(1, .monotonic) >= num_reads) break;
326
327                // We use `volatile` accesses so that we can make sure the memory is accessed either
328                // side of a yield, maximising chances of a race.
329                const a_ptr: *const volatile usize = &run.val_a;
330                const b_ptr: *const volatile usize = &run.val_b;
331
332                const old_a = a_ptr.*;
333                if (rnd.boolean()) try std.Thread.yield();
334                const old_b = b_ptr.*;
335                try testing.expect(old_a == old_b);
336            }
337        }
338
339        fn writer(run: *Runner, thread_idx: usize) !void {
340            var prng = std.Random.DefaultPrng.init(thread_idx);
341            const rnd = prng.random();
342            while (true) {
343                run.rwl.lock();
344                defer run.rwl.unlock();
345
346                try testing.expect(run.writes <= num_writes);
347                if (run.writes == num_writes) break;
348
349                // We use `volatile` accesses so that we can make sure the memory is accessed either
350                // side of a yield, maximising chances of a race.
351                const a_ptr: *volatile usize = &run.val_a;
352                const b_ptr: *volatile usize = &run.val_b;
353
354                const new_val = rnd.int(usize);
355
356                const old_a = a_ptr.*;
357                a_ptr.* = new_val;
358                if (rnd.boolean()) try std.Thread.yield();
359                const old_b = b_ptr.*;
360                b_ptr.* = new_val;
361                try testing.expect(old_a == old_b);
362
363                run.writes += 1;
364            }
365        }
366    };
367
368    var run: Runner = .{
369        .rwl = .{},
370        .writes = 0,
371        .reads = .init(0),
372        .val_a = 0,
373        .val_b = 0,
374    };
375    var write_threads: [num_writers]std.Thread = undefined;
376    var read_threads: [num_readers]std.Thread = undefined;
377
378    for (&write_threads, 0..) |*t, i| t.* = try .spawn(.{}, Runner.writer, .{ &run, i });
379    for (&read_threads, num_writers..) |*t, i| t.* = try .spawn(.{}, Runner.reader, .{ &run, i });
380
381    for (write_threads) |t| t.join();
382    for (read_threads) |t| t.join();
383
384    try testing.expect(run.writes == num_writes);
385    try testing.expect(run.reads.raw >= num_reads);
386}