1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include "uv.h"
23 #include "internal.h"
24
25 #include <pthread.h>
26 #include <assert.h>
27 #include <errno.h>
28
29 #include <sys/time.h>
30 #include <sys/resource.h> /* getrlimit() */
31 #include <unistd.h> /* getpagesize() */
32
33 #include <limits.h>
34
35 #ifdef __MVS__
36 #include <sys/ipc.h>
37 #include <sys/sem.h>
38 #endif
39
40 #if defined(__GLIBC__) && !defined(__UCLIBC__)
41 #include <gnu/libc-version.h> /* gnu_get_libc_version() */
42 #endif
43
44 #if defined(__linux__)
45 # include <sched.h>
46 # define uv__cpu_set_t cpu_set_t
47 #elif defined(__FreeBSD__)
48 # include <sys/param.h>
49 # include <sys/cpuset.h>
50 # include <pthread_np.h>
51 # define uv__cpu_set_t cpuset_t
52 #endif
53
54
55 #undef NANOSEC
56 #define NANOSEC ((uint64_t) 1e9)
57
58 /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
59 * too small to safely receive signals on.
60 *
61 * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
62 * the largest MINSIGSTKSZ of the architectures that musl supports) so
63 * let's use that as a lower bound.
64 *
65 * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
66 * is between 28 and 133 KB when compiling against glibc, depending
67 * on the architecture.
68 */
uv__min_stack_size(void)69 static size_t uv__min_stack_size(void) {
70 static const size_t min = 8192;
71
72 #ifdef PTHREAD_STACK_MIN /* Not defined on NetBSD. */
73 if (min < (size_t) PTHREAD_STACK_MIN)
74 return PTHREAD_STACK_MIN;
75 #endif /* PTHREAD_STACK_MIN */
76
77 return min;
78 }
79
80
81 /* On Linux, threads created by musl have a much smaller stack than threads
82 * created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
83 */
uv__default_stack_size(void)84 static size_t uv__default_stack_size(void) {
85 #if !defined(__linux__)
86 return 0;
87 #elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
88 return 4 << 20; /* glibc default. */
89 #else
90 return 2 << 20; /* glibc default. */
91 #endif
92 }
93
94
95 /* On MacOS, threads other than the main thread are created with a reduced
96 * stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
97 */
uv__thread_stack_size(void)98 size_t uv__thread_stack_size(void) {
99 #if defined(__APPLE__) || defined(__linux__)
100 struct rlimit lim;
101
102 /* getrlimit() can fail on some aarch64 systems due to a glibc bug where
103 * the system call wrapper invokes the wrong system call. Don't treat
104 * that as fatal, just use the default stack size instead.
105 */
106 if (getrlimit(RLIMIT_STACK, &lim))
107 return uv__default_stack_size();
108
109 if (lim.rlim_cur == RLIM_INFINITY)
110 return uv__default_stack_size();
111
112 /* pthread_attr_setstacksize() expects page-aligned values. */
113 lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
114
115 if (lim.rlim_cur >= (rlim_t) uv__min_stack_size())
116 return lim.rlim_cur;
117 #endif
118
119 return uv__default_stack_size();
120 }
121
122
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)123 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
124 uv_thread_options_t params;
125 params.flags = UV_THREAD_NO_FLAGS;
126 return uv_thread_create_ex(tid, ¶ms, entry, arg);
127 }
128
uv_thread_create_ex(uv_thread_t * tid,const uv_thread_options_t * params,void (* entry)(void * arg),void * arg)129 int uv_thread_create_ex(uv_thread_t* tid,
130 const uv_thread_options_t* params,
131 void (*entry)(void *arg),
132 void *arg) {
133 int err;
134 pthread_attr_t* attr;
135 pthread_attr_t attr_storage;
136 size_t pagesize;
137 size_t stack_size;
138 size_t min_stack_size;
139
140 /* Used to squelch a -Wcast-function-type warning. */
141 union {
142 void (*in)(void*);
143 void* (*out)(void*);
144 } f;
145
146 stack_size =
147 params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
148
149 attr = NULL;
150 if (stack_size == 0) {
151 stack_size = uv__thread_stack_size();
152 } else {
153 pagesize = (size_t)getpagesize();
154 /* Round up to the nearest page boundary. */
155 stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
156 min_stack_size = uv__min_stack_size();
157 if (stack_size < min_stack_size)
158 stack_size = min_stack_size;
159 }
160
161 if (stack_size > 0) {
162 attr = &attr_storage;
163
164 if (pthread_attr_init(attr))
165 abort();
166
167 if (pthread_attr_setstacksize(attr, stack_size))
168 abort();
169 }
170
171 f.in = entry;
172 err = pthread_create(tid, attr, f.out, arg);
173
174 if (attr != NULL)
175 pthread_attr_destroy(attr);
176
177 return UV__ERR(err);
178 }
179
180 #if UV__CPU_AFFINITY_SUPPORTED
181
uv_thread_setaffinity(uv_thread_t * tid,char * cpumask,char * oldmask,size_t mask_size)182 int uv_thread_setaffinity(uv_thread_t* tid,
183 char* cpumask,
184 char* oldmask,
185 size_t mask_size) {
186 int i;
187 int r;
188 uv__cpu_set_t cpuset;
189 int cpumasksize;
190
191 cpumasksize = uv_cpumask_size();
192 if (cpumasksize < 0)
193 return cpumasksize;
194 if (mask_size < (size_t)cpumasksize)
195 return UV_EINVAL;
196
197 if (oldmask != NULL) {
198 r = uv_thread_getaffinity(tid, oldmask, mask_size);
199 if (r < 0)
200 return r;
201 }
202
203 CPU_ZERO(&cpuset);
204 for (i = 0; i < cpumasksize; i++)
205 if (cpumask[i])
206 CPU_SET(i, &cpuset);
207
208 #if defined(__ANDROID__)
209 if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
210 r = errno;
211 else
212 r = 0;
213 #else
214 r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset);
215 #endif
216
217 return UV__ERR(r);
218 }
219
220
uv_thread_getaffinity(uv_thread_t * tid,char * cpumask,size_t mask_size)221 int uv_thread_getaffinity(uv_thread_t* tid,
222 char* cpumask,
223 size_t mask_size) {
224 int r;
225 int i;
226 uv__cpu_set_t cpuset;
227 int cpumasksize;
228
229 cpumasksize = uv_cpumask_size();
230 if (cpumasksize < 0)
231 return cpumasksize;
232 if (mask_size < (size_t)cpumasksize)
233 return UV_EINVAL;
234
235 CPU_ZERO(&cpuset);
236 #if defined(__ANDROID__)
237 if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
238 r = errno;
239 else
240 r = 0;
241 #else
242 r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset);
243 #endif
244 if (r)
245 return UV__ERR(r);
246 for (i = 0; i < cpumasksize; i++)
247 cpumask[i] = !!CPU_ISSET(i, &cpuset);
248
249 return 0;
250 }
251 #else
uv_thread_setaffinity(uv_thread_t * tid,char * cpumask,char * oldmask,size_t mask_size)252 int uv_thread_setaffinity(uv_thread_t* tid,
253 char* cpumask,
254 char* oldmask,
255 size_t mask_size) {
256 return UV_ENOTSUP;
257 }
258
259
uv_thread_getaffinity(uv_thread_t * tid,char * cpumask,size_t mask_size)260 int uv_thread_getaffinity(uv_thread_t* tid,
261 char* cpumask,
262 size_t mask_size) {
263 return UV_ENOTSUP;
264 }
265 #endif /* defined(__linux__) || defined(UV_BSD_H) */
266
uv_thread_getcpu(void)267 int uv_thread_getcpu(void) {
268 #if UV__CPU_AFFINITY_SUPPORTED
269 int cpu;
270
271 cpu = sched_getcpu();
272 if (cpu < 0)
273 return UV__ERR(errno);
274
275 return cpu;
276 #else
277 return UV_ENOTSUP;
278 #endif
279 }
280
uv_thread_self(void)281 uv_thread_t uv_thread_self(void) {
282 return pthread_self();
283 }
284
uv_thread_join(uv_thread_t * tid)285 int uv_thread_join(uv_thread_t *tid) {
286 return UV__ERR(pthread_join(*tid, NULL));
287 }
288
289
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)290 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
291 return pthread_equal(*t1, *t2);
292 }
293
294
uv_mutex_init(uv_mutex_t * mutex)295 int uv_mutex_init(uv_mutex_t* mutex) {
296 #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
297 return UV__ERR(pthread_mutex_init(mutex, NULL));
298 #else
299 pthread_mutexattr_t attr;
300 int err;
301
302 if (pthread_mutexattr_init(&attr))
303 abort();
304
305 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
306 abort();
307
308 err = pthread_mutex_init(mutex, &attr);
309
310 if (pthread_mutexattr_destroy(&attr))
311 abort();
312
313 return UV__ERR(err);
314 #endif
315 }
316
317
uv_mutex_init_recursive(uv_mutex_t * mutex)318 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
319 pthread_mutexattr_t attr;
320 int err;
321
322 if (pthread_mutexattr_init(&attr))
323 abort();
324
325 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
326 abort();
327
328 err = pthread_mutex_init(mutex, &attr);
329
330 if (pthread_mutexattr_destroy(&attr))
331 abort();
332
333 return UV__ERR(err);
334 }
335
336
uv_mutex_destroy(uv_mutex_t * mutex)337 void uv_mutex_destroy(uv_mutex_t* mutex) {
338 if (pthread_mutex_destroy(mutex))
339 abort();
340 }
341
342
uv_mutex_lock(uv_mutex_t * mutex)343 void uv_mutex_lock(uv_mutex_t* mutex) {
344 if (pthread_mutex_lock(mutex))
345 abort();
346 }
347
348
uv_mutex_trylock(uv_mutex_t * mutex)349 int uv_mutex_trylock(uv_mutex_t* mutex) {
350 int err;
351
352 err = pthread_mutex_trylock(mutex);
353 if (err) {
354 if (err != EBUSY && err != EAGAIN)
355 abort();
356 return UV_EBUSY;
357 }
358
359 return 0;
360 }
361
362
uv_mutex_unlock(uv_mutex_t * mutex)363 void uv_mutex_unlock(uv_mutex_t* mutex) {
364 if (pthread_mutex_unlock(mutex))
365 abort();
366 }
367
368
uv_rwlock_init(uv_rwlock_t * rwlock)369 int uv_rwlock_init(uv_rwlock_t* rwlock) {
370 return UV__ERR(pthread_rwlock_init(rwlock, NULL));
371 }
372
373
uv_rwlock_destroy(uv_rwlock_t * rwlock)374 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
375 if (pthread_rwlock_destroy(rwlock))
376 abort();
377 }
378
379
uv_rwlock_rdlock(uv_rwlock_t * rwlock)380 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
381 if (pthread_rwlock_rdlock(rwlock))
382 abort();
383 }
384
385
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)386 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
387 int err;
388
389 err = pthread_rwlock_tryrdlock(rwlock);
390 if (err) {
391 if (err != EBUSY && err != EAGAIN)
392 abort();
393 return UV_EBUSY;
394 }
395
396 return 0;
397 }
398
399
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)400 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
401 if (pthread_rwlock_unlock(rwlock))
402 abort();
403 }
404
405
uv_rwlock_wrlock(uv_rwlock_t * rwlock)406 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
407 if (pthread_rwlock_wrlock(rwlock))
408 abort();
409 }
410
411
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)412 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
413 int err;
414
415 err = pthread_rwlock_trywrlock(rwlock);
416 if (err) {
417 if (err != EBUSY && err != EAGAIN)
418 abort();
419 return UV_EBUSY;
420 }
421
422 return 0;
423 }
424
425
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)426 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
427 if (pthread_rwlock_unlock(rwlock))
428 abort();
429 }
430
431
uv_once(uv_once_t * guard,void (* callback)(void))432 void uv_once(uv_once_t* guard, void (*callback)(void)) {
433 if (pthread_once(guard, callback))
434 abort();
435 }
436
437 #if defined(__APPLE__) && defined(__MACH__)
438
uv_sem_init(uv_sem_t * sem,unsigned int value)439 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
440 kern_return_t err;
441
442 err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
443 if (err == KERN_SUCCESS)
444 return 0;
445 if (err == KERN_INVALID_ARGUMENT)
446 return UV_EINVAL;
447 if (err == KERN_RESOURCE_SHORTAGE)
448 return UV_ENOMEM;
449
450 abort();
451 return UV_EINVAL; /* Satisfy the compiler. */
452 }
453
454
uv_sem_destroy(uv_sem_t * sem)455 void uv_sem_destroy(uv_sem_t* sem) {
456 if (semaphore_destroy(mach_task_self(), *sem))
457 abort();
458 }
459
460
uv_sem_post(uv_sem_t * sem)461 void uv_sem_post(uv_sem_t* sem) {
462 if (semaphore_signal(*sem))
463 abort();
464 }
465
466
uv_sem_wait(uv_sem_t * sem)467 void uv_sem_wait(uv_sem_t* sem) {
468 int r;
469
470 do
471 r = semaphore_wait(*sem);
472 while (r == KERN_ABORTED);
473
474 if (r != KERN_SUCCESS)
475 abort();
476 }
477
478
uv_sem_trywait(uv_sem_t * sem)479 int uv_sem_trywait(uv_sem_t* sem) {
480 mach_timespec_t interval;
481 kern_return_t err;
482
483 interval.tv_sec = 0;
484 interval.tv_nsec = 0;
485
486 err = semaphore_timedwait(*sem, interval);
487 if (err == KERN_SUCCESS)
488 return 0;
489 if (err == KERN_OPERATION_TIMED_OUT)
490 return UV_EAGAIN;
491
492 abort();
493 return UV_EINVAL; /* Satisfy the compiler. */
494 }
495
496 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
497
498 #if defined(__GLIBC__) && !defined(__UCLIBC__)
499
500 /* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
501 * by providing a custom implementation for glibc < 2.21 in terms of other
502 * concurrency primitives.
503 * Refs: https://github.com/nodejs/node/issues/19903 */
504
505 /* To preserve ABI compatibility, we treat the uv_sem_t as storage for
506 * a pointer to the actual struct we're using underneath. */
507
508 static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
509 static int platform_needs_custom_semaphore = 0;
510
glibc_version_check(void)511 static void glibc_version_check(void) {
512 const char* version = gnu_get_libc_version();
513 platform_needs_custom_semaphore =
514 version[0] == '2' && version[1] == '.' &&
515 atoi(version + 2) < 21;
516 }
517
518 #elif defined(__MVS__)
519
520 #define platform_needs_custom_semaphore 1
521
522 #else /* !defined(__GLIBC__) && !defined(__MVS__) */
523
524 #define platform_needs_custom_semaphore 0
525
526 #endif
527
528 typedef struct uv_semaphore_s {
529 uv_mutex_t mutex;
530 uv_cond_t cond;
531 unsigned int value;
532 } uv_semaphore_t;
533
534 #if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
535 platform_needs_custom_semaphore
536 STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
537 #endif
538
uv__custom_sem_init(uv_sem_t * sem_,unsigned int value)539 static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
540 int err;
541 uv_semaphore_t* sem;
542
543 sem = uv__malloc(sizeof(*sem));
544 if (sem == NULL)
545 return UV_ENOMEM;
546
547 if ((err = uv_mutex_init(&sem->mutex)) != 0) {
548 uv__free(sem);
549 return err;
550 }
551
552 if ((err = uv_cond_init(&sem->cond)) != 0) {
553 uv_mutex_destroy(&sem->mutex);
554 uv__free(sem);
555 return err;
556 }
557
558 sem->value = value;
559 *(uv_semaphore_t**)sem_ = sem;
560 return 0;
561 }
562
563
uv__custom_sem_destroy(uv_sem_t * sem_)564 static void uv__custom_sem_destroy(uv_sem_t* sem_) {
565 uv_semaphore_t* sem;
566
567 sem = *(uv_semaphore_t**)sem_;
568 uv_cond_destroy(&sem->cond);
569 uv_mutex_destroy(&sem->mutex);
570 uv__free(sem);
571 }
572
573
uv__custom_sem_post(uv_sem_t * sem_)574 static void uv__custom_sem_post(uv_sem_t* sem_) {
575 uv_semaphore_t* sem;
576
577 sem = *(uv_semaphore_t**)sem_;
578 uv_mutex_lock(&sem->mutex);
579 sem->value++;
580 if (sem->value == 1)
581 uv_cond_signal(&sem->cond); /* Release one to replace us. */
582 uv_mutex_unlock(&sem->mutex);
583 }
584
585
uv__custom_sem_wait(uv_sem_t * sem_)586 static void uv__custom_sem_wait(uv_sem_t* sem_) {
587 uv_semaphore_t* sem;
588
589 sem = *(uv_semaphore_t**)sem_;
590 uv_mutex_lock(&sem->mutex);
591 while (sem->value == 0)
592 uv_cond_wait(&sem->cond, &sem->mutex);
593 sem->value--;
594 uv_mutex_unlock(&sem->mutex);
595 }
596
597
uv__custom_sem_trywait(uv_sem_t * sem_)598 static int uv__custom_sem_trywait(uv_sem_t* sem_) {
599 uv_semaphore_t* sem;
600
601 sem = *(uv_semaphore_t**)sem_;
602 if (uv_mutex_trylock(&sem->mutex) != 0)
603 return UV_EAGAIN;
604
605 if (sem->value == 0) {
606 uv_mutex_unlock(&sem->mutex);
607 return UV_EAGAIN;
608 }
609
610 sem->value--;
611 uv_mutex_unlock(&sem->mutex);
612
613 return 0;
614 }
615
uv__sem_init(uv_sem_t * sem,unsigned int value)616 static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
617 if (sem_init(sem, 0, value))
618 return UV__ERR(errno);
619 return 0;
620 }
621
622
uv__sem_destroy(uv_sem_t * sem)623 static void uv__sem_destroy(uv_sem_t* sem) {
624 if (sem_destroy(sem))
625 abort();
626 }
627
628
uv__sem_post(uv_sem_t * sem)629 static void uv__sem_post(uv_sem_t* sem) {
630 if (sem_post(sem))
631 abort();
632 }
633
634
uv__sem_wait(uv_sem_t * sem)635 static void uv__sem_wait(uv_sem_t* sem) {
636 int r;
637
638 do
639 r = sem_wait(sem);
640 while (r == -1 && errno == EINTR);
641
642 if (r)
643 abort();
644 }
645
646
uv__sem_trywait(uv_sem_t * sem)647 static int uv__sem_trywait(uv_sem_t* sem) {
648 int r;
649
650 do
651 r = sem_trywait(sem);
652 while (r == -1 && errno == EINTR);
653
654 if (r) {
655 if (errno == EAGAIN)
656 return UV_EAGAIN;
657 abort();
658 }
659
660 return 0;
661 }
662
uv_sem_init(uv_sem_t * sem,unsigned int value)663 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
664 #if defined(__GLIBC__) && !defined(__UCLIBC__)
665 uv_once(&glibc_version_check_once, glibc_version_check);
666 #endif
667
668 if (platform_needs_custom_semaphore)
669 return uv__custom_sem_init(sem, value);
670 else
671 return uv__sem_init(sem, value);
672 }
673
674
uv_sem_destroy(uv_sem_t * sem)675 void uv_sem_destroy(uv_sem_t* sem) {
676 if (platform_needs_custom_semaphore)
677 uv__custom_sem_destroy(sem);
678 else
679 uv__sem_destroy(sem);
680 }
681
682
uv_sem_post(uv_sem_t * sem)683 void uv_sem_post(uv_sem_t* sem) {
684 if (platform_needs_custom_semaphore)
685 uv__custom_sem_post(sem);
686 else
687 uv__sem_post(sem);
688 }
689
690
uv_sem_wait(uv_sem_t * sem)691 void uv_sem_wait(uv_sem_t* sem) {
692 if (platform_needs_custom_semaphore)
693 uv__custom_sem_wait(sem);
694 else
695 uv__sem_wait(sem);
696 }
697
698
uv_sem_trywait(uv_sem_t * sem)699 int uv_sem_trywait(uv_sem_t* sem) {
700 if (platform_needs_custom_semaphore)
701 return uv__custom_sem_trywait(sem);
702 else
703 return uv__sem_trywait(sem);
704 }
705
706 #endif /* defined(__APPLE__) && defined(__MACH__) */
707
708
709 #if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
710
uv_cond_init(uv_cond_t * cond)711 int uv_cond_init(uv_cond_t* cond) {
712 return UV__ERR(pthread_cond_init(cond, NULL));
713 }
714
715 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
716
uv_cond_init(uv_cond_t * cond)717 int uv_cond_init(uv_cond_t* cond) {
718 pthread_condattr_t attr;
719 int err;
720
721 err = pthread_condattr_init(&attr);
722 if (err)
723 return UV__ERR(err);
724
725 err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
726 if (err)
727 goto error2;
728
729 err = pthread_cond_init(cond, &attr);
730 if (err)
731 goto error2;
732
733 err = pthread_condattr_destroy(&attr);
734 if (err)
735 goto error;
736
737 return 0;
738
739 error:
740 pthread_cond_destroy(cond);
741 error2:
742 pthread_condattr_destroy(&attr);
743 return UV__ERR(err);
744 }
745
746 #endif /* defined(__APPLE__) && defined(__MACH__) */
747
uv_cond_destroy(uv_cond_t * cond)748 void uv_cond_destroy(uv_cond_t* cond) {
749 #if defined(__APPLE__) && defined(__MACH__)
750 /* It has been reported that destroying condition variables that have been
751 * signalled but not waited on can sometimes result in application crashes.
752 * See https://codereview.chromium.org/1323293005.
753 */
754 pthread_mutex_t mutex;
755 struct timespec ts;
756 int err;
757
758 if (pthread_mutex_init(&mutex, NULL))
759 abort();
760
761 if (pthread_mutex_lock(&mutex))
762 abort();
763
764 ts.tv_sec = 0;
765 ts.tv_nsec = 1;
766
767 err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
768 if (err != 0 && err != ETIMEDOUT)
769 abort();
770
771 if (pthread_mutex_unlock(&mutex))
772 abort();
773
774 if (pthread_mutex_destroy(&mutex))
775 abort();
776 #endif /* defined(__APPLE__) && defined(__MACH__) */
777
778 if (pthread_cond_destroy(cond))
779 abort();
780 }
781
uv_cond_signal(uv_cond_t * cond)782 void uv_cond_signal(uv_cond_t* cond) {
783 if (pthread_cond_signal(cond))
784 abort();
785 }
786
uv_cond_broadcast(uv_cond_t * cond)787 void uv_cond_broadcast(uv_cond_t* cond) {
788 if (pthread_cond_broadcast(cond))
789 abort();
790 }
791
792 #if defined(__APPLE__) && defined(__MACH__)
793
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)794 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
795 int r;
796
797 errno = 0;
798 r = pthread_cond_wait(cond, mutex);
799
800 /* Workaround for a bug in OS X at least up to 13.6
801 * See https://github.com/libuv/libuv/issues/4165
802 */
803 if (r == EINVAL)
804 if (errno == EBUSY)
805 return;
806
807 if (r)
808 abort();
809 }
810
811 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
812
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)813 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
814 if (pthread_cond_wait(cond, mutex))
815 abort();
816 }
817
818 #endif
819
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)820 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
821 int r;
822 struct timespec ts;
823 #if defined(__MVS__)
824 struct timeval tv;
825 #endif
826
827 #if defined(__APPLE__) && defined(__MACH__)
828 ts.tv_sec = timeout / NANOSEC;
829 ts.tv_nsec = timeout % NANOSEC;
830 r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
831 #else
832 #if defined(__MVS__)
833 if (gettimeofday(&tv, NULL))
834 abort();
835 timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
836 #else
837 timeout += uv__hrtime(UV_CLOCK_PRECISE);
838 #endif
839 ts.tv_sec = timeout / NANOSEC;
840 ts.tv_nsec = timeout % NANOSEC;
841 r = pthread_cond_timedwait(cond, mutex, &ts);
842 #endif
843
844
845 if (r == 0)
846 return 0;
847
848 if (r == ETIMEDOUT)
849 return UV_ETIMEDOUT;
850
851 abort();
852 #ifndef __SUNPRO_C
853 return UV_EINVAL; /* Satisfy the compiler. */
854 #endif
855 }
856
857
uv_key_create(uv_key_t * key)858 int uv_key_create(uv_key_t* key) {
859 return UV__ERR(pthread_key_create(key, NULL));
860 }
861
862
uv_key_delete(uv_key_t * key)863 void uv_key_delete(uv_key_t* key) {
864 if (pthread_key_delete(*key))
865 abort();
866 }
867
868
uv_key_get(uv_key_t * key)869 void* uv_key_get(uv_key_t* key) {
870 return pthread_getspecific(*key);
871 }
872
873
uv_key_set(uv_key_t * key,void * value)874 void uv_key_set(uv_key_t* key, void* value) {
875 if (pthread_setspecific(*key, value))
876 abort();
877 }
878