master
1/*
2 Copyright (c) 2011, 2014 mingw-w64 project
3 Copyright (c) 2015 Intel Corporation
4
5 Permission is hereby granted, free of charge, to any person obtaining a
6 copy of this software and associated documentation files (the "Software"),
7 to deal in the Software without restriction, including without limitation
8 the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 and/or sell copies of the Software, and to permit persons to whom the
10 Software is furnished to do so, subject to the following conditions:
11
12 The above copyright notice and this permission notice shall be included in
13 all copies or substantial portions of the Software.
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 DEALINGS IN THE SOFTWARE.
22*/
23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <malloc.h>
29#include <stdbool.h>
30#include <stdio.h>
31
32#define WIN32_LEAN_AND_MEAN
33#include <windows.h>
34
35#define WINPTHREAD_MUTEX_DECL WINPTHREAD_API
36
37/* public header files */
38#include "pthread.h"
39/* internal header files */
40#include "misc.h"
41
42typedef enum {
43 Unlocked, /* Not locked. */
44 Locked, /* Locked but without waiters. */
45 Waiting, /* Locked, may have waiters. */
46} mutex_state_t;
47
48typedef enum {
49 Normal,
50 Errorcheck,
51 Recursive,
52} mutex_type_t;
53
54/* The heap-allocated part of a mutex. */
55typedef struct {
56 mutex_state_t state;
57 mutex_type_t type;
58 HANDLE event; /* Auto-reset event, or NULL if not yet allocated. */
59 unsigned rec_lock; /* For recursive mutexes, the number of times the
60 mutex has been locked in excess by the same thread. */
61 volatile DWORD owner; /* For recursive and error-checking mutexes, the
62 ID of the owning thread if the mutex is locked. */
63} mutex_impl_t;
64
65/* Whether a mutex is still a static initializer (not a pointer to
66 a mutex_impl_t). */
67static bool
68is_static_initializer(pthread_mutex_t m)
69{
70 /* Treat 0 as a static initializer as well (for normal mutexes),
71 to tolerate sloppy code in libgomp. (We should rather fix that code!) */
72 intptr_t v = (intptr_t)m;
73 return v >= -3 && v <= 0;
74/* Should be simple:
75 return (uintptr_t)m >= (uintptr_t)-3; */
76}
77
78/* Create and return the implementation part of a mutex from a static
79 initialiser. Return NULL on out-of-memory error. */
80static WINPTHREADS_ATTRIBUTE((noinline)) mutex_impl_t *
81mutex_impl_init(pthread_mutex_t *m, mutex_impl_t *mi)
82{
83 mutex_impl_t *new_mi = malloc(sizeof(mutex_impl_t));
84 if (new_mi == NULL)
85 return NULL;
86 new_mi->state = Unlocked;
87 new_mi->type = (mi == (void *)PTHREAD_RECURSIVE_MUTEX_INITIALIZER ? Recursive
88 : mi == (void *)PTHREAD_ERRORCHECK_MUTEX_INITIALIZER ? Errorcheck
89 : Normal);
90 new_mi->event = NULL;
91 new_mi->rec_lock = 0;
92 new_mi->owner = (DWORD)-1;
93 if (InterlockedCompareExchangePointer((PVOID volatile *)m, new_mi, mi) == mi) {
94 return new_mi;
95 } else {
96 /* Someone created the struct before us. */
97 free(new_mi);
98 return (mutex_impl_t *)*m;
99 }
100}
101
102/* Return the implementation part of a mutex, creating it if necessary.
103 Return NULL on out-of-memory error. */
104static inline mutex_impl_t *
105mutex_impl(pthread_mutex_t *m)
106{
107 mutex_impl_t *mi = (mutex_impl_t *)*m;
108 if (is_static_initializer((pthread_mutex_t)mi)) {
109 return mutex_impl_init(m, mi);
110 } else {
111 /* mi cannot be null here; avoid a test in the fast path. */
112 if (mi == NULL)
113 UNREACHABLE();
114 return mi;
115 }
116}
117
118/* Lock a mutex. Give up after 'timeout' ms (with ETIMEDOUT),
119 or never if timeout=INFINITE. */
120static inline int
121pthread_mutex_lock_intern (pthread_mutex_t *m, DWORD timeout)
122{
123 mutex_impl_t *mi = mutex_impl(m);
124 if (mi == NULL)
125 return ENOMEM;
126 mutex_state_t old_state = InterlockedExchange((long *)&mi->state, Locked);
127 if (unlikely(old_state != Unlocked)) {
128 /* The mutex is already locked. */
129
130 if (mi->type != Normal) {
131 /* Recursive or Errorcheck */
132 if (mi->owner == GetCurrentThreadId()) {
133 /* FIXME: A recursive mutex should not need two atomic ops when locking
134 recursively. We could rewrite by doing compare-and-swap instead of
135 test-and-set the first time, but it would lead to more code
136 duplication and add a conditional branch to the critical path. */
137 InterlockedCompareExchange((long *)&mi->state, old_state, Locked);
138 if (mi->type == Recursive) {
139 mi->rec_lock++;
140 return 0;
141 } else {
142 /* type == Errorcheck */
143 return EDEADLK;
144 }
145 }
146 }
147
148 /* Make sure there is an event object on which to wait. */
149 if (mi->event == NULL) {
150 /* Make an auto-reset event object. */
151 HANDLE ev = CreateEvent(NULL, false, false, NULL);
152 if (ev == NULL) {
153 switch (GetLastError()) {
154 case ERROR_ACCESS_DENIED:
155 return EPERM;
156 default:
157 return ENOMEM; /* Probably accurate enough. */
158 }
159 }
160 if (InterlockedCompareExchangePointer(&mi->event, ev, NULL) != NULL) {
161 /* Someone created the event before us. */
162 CloseHandle(ev);
163 }
164 }
165
166 /* At this point, mi->event is non-NULL. */
167
168 while (InterlockedExchange((long *)&mi->state, Waiting) != Unlocked) {
169 /* For timed locking attempts, it is possible (although unlikely)
170 that we are woken up but someone else grabs the lock before us,
171 and we have to go back to sleep again. In that case, the total
172 wait may be longer than expected. */
173
174 unsigned r = _pthread_wait_for_single_object(mi->event, timeout);
175 switch (r) {
176 case WAIT_TIMEOUT:
177 return ETIMEDOUT;
178 case WAIT_OBJECT_0:
179 break;
180 default:
181 return EINVAL;
182 }
183 }
184 }
185
186 if (mi->type != Normal)
187 mi->owner = GetCurrentThreadId();
188
189 return 0;
190}
191
192int
193pthread_mutex_lock (pthread_mutex_t *m)
194{
195 return pthread_mutex_lock_intern (m, INFINITE);
196}
197
198/* Internal version which always uses `struct _timespec64`. */
199static int __pthread_mutex_timedlock(pthread_mutex_t *m, const struct _timespec64 *ts)
200{
201 unsigned long long patience;
202 if (ts != NULL) {
203 unsigned long long end = _pthread_time_in_ms_from_timespec(ts);
204 unsigned long long now = _pthread_time_in_ms();
205 patience = end > now ? end - now : 0;
206 if (patience > 0xffffffff)
207 patience = INFINITE;
208 } else {
209 patience = INFINITE;
210 }
211 return pthread_mutex_lock_intern(m, patience);
212}
213
214int pthread_mutex_timedlock64(pthread_mutex_t *m, const struct _timespec64 *ts)
215{
216 return __pthread_mutex_timedlock (m, ts);
217}
218
219int pthread_mutex_timedlock32(pthread_mutex_t *m, const struct _timespec32 *ts)
220{
221 struct _timespec64 ts64 = {.tv_sec = ts->tv_sec, .tv_nsec = ts->tv_nsec};
222 return __pthread_mutex_timedlock (m, &ts64);
223}
224
225int pthread_mutex_unlock(pthread_mutex_t *m)
226{
227 /* Here m might an initialiser of an error-checking or recursive mutex, in
228 which case the behaviour is well-defined, so we can't skip this check. */
229 mutex_impl_t *mi = mutex_impl(m);
230 if (mi == NULL)
231 return ENOMEM;
232
233 if (unlikely(mi->type != Normal)) {
234 if (mi->state == Unlocked)
235 return EINVAL;
236 if (mi->owner != GetCurrentThreadId())
237 return EPERM;
238 if (mi->rec_lock > 0) {
239 mi->rec_lock--;
240 return 0;
241 }
242 mi->owner = (DWORD)-1;
243 }
244 if (unlikely(InterlockedExchange((long *)&mi->state, Unlocked) == Waiting)) {
245 if (!SetEvent(mi->event))
246 return EPERM;
247 }
248 return 0;
249}
250
251int pthread_mutex_trylock(pthread_mutex_t *m)
252{
253 mutex_impl_t *mi = mutex_impl(m);
254 if (mi == NULL)
255 return ENOMEM;
256
257 if (InterlockedCompareExchange((long *)&mi->state, Locked, Unlocked) == Unlocked) {
258 if (mi->type != Normal)
259 mi->owner = GetCurrentThreadId();
260 return 0;
261 } else {
262 if (mi->type == Recursive && mi->owner == GetCurrentThreadId()) {
263 mi->rec_lock++;
264 return 0;
265 }
266 return EBUSY;
267 }
268}
269
270int
271pthread_mutex_init (pthread_mutex_t *m, const pthread_mutexattr_t *a)
272{
273 pthread_mutex_t init = PTHREAD_MUTEX_INITIALIZER;
274 if (a != NULL) {
275 int pshared;
276 if (pthread_mutexattr_getpshared(a, &pshared) == 0
277 && pshared == PTHREAD_PROCESS_SHARED)
278 return ENOSYS;
279
280 int type;
281 if (pthread_mutexattr_gettype(a, &type) == 0) {
282 switch (type) {
283 case PTHREAD_MUTEX_ERRORCHECK:
284 init = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER;
285 break;
286 case PTHREAD_MUTEX_RECURSIVE:
287 init = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
288 break;
289 default:
290 init = PTHREAD_MUTEX_INITIALIZER;
291 break;
292 }
293 }
294 }
295 *m = init;
296 return 0;
297}
298
299int pthread_mutex_destroy (pthread_mutex_t *m)
300{
301 mutex_impl_t *mi = (mutex_impl_t *)*m;
302 if (!is_static_initializer((pthread_mutex_t)mi)) {
303 if (mi->event != NULL)
304 CloseHandle(mi->event);
305 free(mi);
306 /* Sabotage attempts to re-use the mutex before initialising it again. */
307 *m = (pthread_mutex_t)NULL;
308 }
309
310 return 0;
311}
312
313int pthread_mutexattr_init(pthread_mutexattr_t *a)
314{
315 *a = PTHREAD_MUTEX_NORMAL | (PTHREAD_PROCESS_PRIVATE << 3);
316 return 0;
317}
318
319int pthread_mutexattr_destroy(pthread_mutexattr_t *a)
320{
321 if (!a)
322 return EINVAL;
323
324 return 0;
325}
326
327int pthread_mutexattr_gettype(const pthread_mutexattr_t *a, int *type)
328{
329 if (!a || !type)
330 return EINVAL;
331
332 *type = *a & 3;
333
334 return 0;
335}
336
337int pthread_mutexattr_settype(pthread_mutexattr_t *a, int type)
338{
339 if (!a || (type != PTHREAD_MUTEX_NORMAL && type != PTHREAD_MUTEX_RECURSIVE && type != PTHREAD_MUTEX_ERRORCHECK))
340 return EINVAL;
341 *a &= ~3;
342 *a |= type;
343
344 return 0;
345}
346
347int pthread_mutexattr_getpshared(const pthread_mutexattr_t *a, int *type)
348{
349 if (!a || !type)
350 return EINVAL;
351 *type = (*a & 4 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE);
352
353 return 0;
354}
355
356int pthread_mutexattr_setpshared(pthread_mutexattr_t * a, int type)
357{
358 int r = 0;
359 if (!a || (type != PTHREAD_PROCESS_SHARED
360 && type != PTHREAD_PROCESS_PRIVATE))
361 return EINVAL;
362 if (type == PTHREAD_PROCESS_SHARED)
363 {
364 type = PTHREAD_PROCESS_PRIVATE;
365 r = ENOSYS;
366 }
367 type = (type == PTHREAD_PROCESS_SHARED ? 4 : 0);
368
369 *a &= ~4;
370 *a |= type;
371
372 return r;
373}
374
375int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *a, int *type)
376{
377 *type = *a & (8 + 16);
378
379 return 0;
380}
381
382int pthread_mutexattr_setprotocol(pthread_mutexattr_t *a, int type)
383{
384 if ((type & (8 + 16)) != 8 + 16) return EINVAL;
385
386 *a &= ~(8 + 16);
387 *a |= type;
388
389 return 0;
390}
391
392int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *a, int * prio)
393{
394 *prio = *a / PTHREAD_PRIO_MULT;
395 return 0;
396}
397
398int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *a, int prio)
399{
400 *a &= (PTHREAD_PRIO_MULT - 1);
401 *a += prio * PTHREAD_PRIO_MULT;
402
403 return 0;
404}