xref: /libuv/src/unix/thread.c (revision 61c966cf)
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "internal.h"
24 
25 #include <pthread.h>
26 #ifdef __OpenBSD__
27 #include <pthread_np.h>
28 #endif
29 #include <assert.h>
30 #include <errno.h>
31 
32 #include <sys/time.h>
33 #include <sys/resource.h>  /* getrlimit() */
34 #include <unistd.h>  /* getpagesize() */
35 
36 #include <limits.h>
37 
38 #ifdef __MVS__
39 #include <sys/ipc.h>
40 #include <sys/sem.h>
41 #endif
42 
43 #if defined(__GLIBC__) && !defined(__UCLIBC__)
44 #include <gnu/libc-version.h>  /* gnu_get_libc_version() */
45 #endif
46 
47 #if defined(__linux__)
48 # include <sched.h>
49 # define uv__cpu_set_t cpu_set_t
50 #elif defined(__FreeBSD__)
51 # include <sys/param.h>
52 # include <sys/cpuset.h>
53 # include <pthread_np.h>
54 # define uv__cpu_set_t cpuset_t
55 #endif
56 
57 
58 #undef NANOSEC
59 #define NANOSEC ((uint64_t) 1e9)
60 
61 /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
62  * too small to safely receive signals on.
63  *
64  * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
65  * the largest MINSIGSTKSZ of the architectures that musl supports) so
66  * let's use that as a lower bound.
67  *
68  * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
69  * is between 28 and 133 KB when compiling against glibc, depending
70  * on the architecture.
71  */
uv__min_stack_size(void)72 static size_t uv__min_stack_size(void) {
73   static const size_t min = 8192;
74 
75 #ifdef PTHREAD_STACK_MIN  /* Not defined on NetBSD. */
76   if (min < (size_t) PTHREAD_STACK_MIN)
77     return PTHREAD_STACK_MIN;
78 #endif  /* PTHREAD_STACK_MIN */
79 
80   return min;
81 }
82 
83 
84 /* On Linux, threads created by musl have a much smaller stack than threads
85  * created by glibc (80 vs. 2048 or 4096 kB.)  Follow glibc for consistency.
86  */
uv__default_stack_size(void)87 static size_t uv__default_stack_size(void) {
88 #if !defined(__linux__)
89   return 0;
90 #elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
91   return 4 << 20;  /* glibc default. */
92 #else
93   return 2 << 20;  /* glibc default. */
94 #endif
95 }
96 
97 
98 /* On MacOS, threads other than the main thread are created with a reduced
99  * stack size by default.  Adjust to RLIMIT_STACK aligned to the page size.
100  */
uv__thread_stack_size(void)101 size_t uv__thread_stack_size(void) {
102 #if defined(__APPLE__) || defined(__linux__)
103   struct rlimit lim;
104 
105   /* getrlimit() can fail on some aarch64 systems due to a glibc bug where
106    * the system call wrapper invokes the wrong system call. Don't treat
107    * that as fatal, just use the default stack size instead.
108    */
109   if (getrlimit(RLIMIT_STACK, &lim))
110     return uv__default_stack_size();
111 
112   if (lim.rlim_cur == RLIM_INFINITY)
113     return uv__default_stack_size();
114 
115   /* pthread_attr_setstacksize() expects page-aligned values. */
116   lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
117 
118   if (lim.rlim_cur >= (rlim_t) uv__min_stack_size())
119     return lim.rlim_cur;
120 #endif
121 
122   return uv__default_stack_size();
123 }
124 
125 
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)126 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
127   uv_thread_options_t params;
128   params.flags = UV_THREAD_NO_FLAGS;
129   return uv_thread_create_ex(tid, &params, entry, arg);
130 }
131 
132 
uv_thread_detach(uv_thread_t * tid)133 int uv_thread_detach(uv_thread_t *tid) {
134   return UV__ERR(pthread_detach(*tid));
135 }
136 
137 
uv_thread_create_ex(uv_thread_t * tid,const uv_thread_options_t * params,void (* entry)(void * arg),void * arg)138 int uv_thread_create_ex(uv_thread_t* tid,
139                         const uv_thread_options_t* params,
140                         void (*entry)(void *arg),
141                         void *arg) {
142   int err;
143   pthread_attr_t* attr;
144   pthread_attr_t attr_storage;
145   size_t pagesize;
146   size_t stack_size;
147   size_t min_stack_size;
148 
149   /* Used to squelch a -Wcast-function-type warning. */
150   union {
151     void (*in)(void*);
152     void* (*out)(void*);
153   } f;
154 
155   stack_size =
156       params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
157 
158   attr = NULL;
159   if (stack_size == 0) {
160     stack_size = uv__thread_stack_size();
161   } else {
162     pagesize = (size_t)getpagesize();
163     /* Round up to the nearest page boundary. */
164     stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
165     min_stack_size = uv__min_stack_size();
166     if (stack_size < min_stack_size)
167       stack_size = min_stack_size;
168   }
169 
170   if (stack_size > 0) {
171     attr = &attr_storage;
172 
173     if (pthread_attr_init(attr))
174       abort();
175 
176     if (pthread_attr_setstacksize(attr, stack_size))
177       abort();
178   }
179 
180   f.in = entry;
181   err = pthread_create(tid, attr, f.out, arg);
182 
183   if (attr != NULL)
184     pthread_attr_destroy(attr);
185 
186   return UV__ERR(err);
187 }
188 
189 #if UV__CPU_AFFINITY_SUPPORTED
190 
uv_thread_setaffinity(uv_thread_t * tid,char * cpumask,char * oldmask,size_t mask_size)191 int uv_thread_setaffinity(uv_thread_t* tid,
192                           char* cpumask,
193                           char* oldmask,
194                           size_t mask_size) {
195   int i;
196   int r;
197   uv__cpu_set_t cpuset;
198   int cpumasksize;
199 
200   cpumasksize = uv_cpumask_size();
201   if (cpumasksize < 0)
202     return cpumasksize;
203   if (mask_size < (size_t)cpumasksize)
204     return UV_EINVAL;
205 
206   if (oldmask != NULL) {
207     r = uv_thread_getaffinity(tid, oldmask, mask_size);
208     if (r < 0)
209       return r;
210   }
211 
212   CPU_ZERO(&cpuset);
213   for (i = 0; i < cpumasksize; i++)
214     if (cpumask[i])
215       CPU_SET(i, &cpuset);
216 
217 #if defined(__ANDROID__)
218   if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
219     r = errno;
220   else
221     r = 0;
222 #else
223   r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset);
224 #endif
225 
226   return UV__ERR(r);
227 }
228 
229 
uv_thread_getaffinity(uv_thread_t * tid,char * cpumask,size_t mask_size)230 int uv_thread_getaffinity(uv_thread_t* tid,
231                           char* cpumask,
232                           size_t mask_size) {
233   int r;
234   int i;
235   uv__cpu_set_t cpuset;
236   int cpumasksize;
237 
238   cpumasksize = uv_cpumask_size();
239   if (cpumasksize < 0)
240     return cpumasksize;
241   if (mask_size < (size_t)cpumasksize)
242     return UV_EINVAL;
243 
244   CPU_ZERO(&cpuset);
245 #if defined(__ANDROID__)
246   if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
247     r = errno;
248   else
249     r = 0;
250 #else
251   r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset);
252 #endif
253   if (r)
254     return UV__ERR(r);
255   for (i = 0; i < cpumasksize; i++)
256     cpumask[i] = !!CPU_ISSET(i, &cpuset);
257 
258   return 0;
259 }
260 #else
uv_thread_setaffinity(uv_thread_t * tid,char * cpumask,char * oldmask,size_t mask_size)261 int uv_thread_setaffinity(uv_thread_t* tid,
262                           char* cpumask,
263                           char* oldmask,
264                           size_t mask_size) {
265   return UV_ENOTSUP;
266 }
267 
268 
uv_thread_getaffinity(uv_thread_t * tid,char * cpumask,size_t mask_size)269 int uv_thread_getaffinity(uv_thread_t* tid,
270                           char* cpumask,
271                           size_t mask_size) {
272   return UV_ENOTSUP;
273 }
274 #endif /* defined(__linux__) || defined(UV_BSD_H) */
275 
uv_thread_getcpu(void)276 int uv_thread_getcpu(void) {
277 #if UV__CPU_AFFINITY_SUPPORTED
278   int cpu;
279 
280   cpu = sched_getcpu();
281   if (cpu < 0)
282     return UV__ERR(errno);
283 
284   return cpu;
285 #else
286   return UV_ENOTSUP;
287 #endif
288 }
289 
uv_thread_self(void)290 uv_thread_t uv_thread_self(void) {
291   return pthread_self();
292 }
293 
uv_thread_join(uv_thread_t * tid)294 int uv_thread_join(uv_thread_t *tid) {
295   return UV__ERR(pthread_join(*tid, NULL));
296 }
297 
298 
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)299 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
300   return pthread_equal(*t1, *t2);
301 }
302 
uv_thread_setname(const char * name)303 int uv_thread_setname(const char* name) {
304   if (name == NULL)
305     return UV_EINVAL;
306   return uv__thread_setname(name);
307 }
308 
uv_thread_getname(uv_thread_t * tid,char * name,size_t size)309 int uv_thread_getname(uv_thread_t* tid, char* name, size_t size) {
310   if (name == NULL || size == 0)
311     return UV_EINVAL;
312 
313   return uv__thread_getname(tid, name, size);
314 }
315 
uv_mutex_init(uv_mutex_t * mutex)316 int uv_mutex_init(uv_mutex_t* mutex) {
317 #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
318   return UV__ERR(pthread_mutex_init(mutex, NULL));
319 #else
320   pthread_mutexattr_t attr;
321   int err;
322 
323   if (pthread_mutexattr_init(&attr))
324     abort();
325 
326   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
327     abort();
328 
329   err = pthread_mutex_init(mutex, &attr);
330 
331   if (pthread_mutexattr_destroy(&attr))
332     abort();
333 
334   return UV__ERR(err);
335 #endif
336 }
337 
338 
uv_mutex_init_recursive(uv_mutex_t * mutex)339 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
340   pthread_mutexattr_t attr;
341   int err;
342 
343   if (pthread_mutexattr_init(&attr))
344     abort();
345 
346   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
347     abort();
348 
349   err = pthread_mutex_init(mutex, &attr);
350 
351   if (pthread_mutexattr_destroy(&attr))
352     abort();
353 
354   return UV__ERR(err);
355 }
356 
357 
uv_mutex_destroy(uv_mutex_t * mutex)358 void uv_mutex_destroy(uv_mutex_t* mutex) {
359   if (pthread_mutex_destroy(mutex))
360     abort();
361 }
362 
363 
uv_mutex_lock(uv_mutex_t * mutex)364 void uv_mutex_lock(uv_mutex_t* mutex) {
365   if (pthread_mutex_lock(mutex))
366     abort();
367 }
368 
369 
uv_mutex_trylock(uv_mutex_t * mutex)370 int uv_mutex_trylock(uv_mutex_t* mutex) {
371   int err;
372 
373   err = pthread_mutex_trylock(mutex);
374   if (err) {
375     if (err != EBUSY && err != EAGAIN)
376       abort();
377     return UV_EBUSY;
378   }
379 
380   return 0;
381 }
382 
383 
uv_mutex_unlock(uv_mutex_t * mutex)384 void uv_mutex_unlock(uv_mutex_t* mutex) {
385   if (pthread_mutex_unlock(mutex))
386     abort();
387 }
388 
389 
uv_rwlock_init(uv_rwlock_t * rwlock)390 int uv_rwlock_init(uv_rwlock_t* rwlock) {
391   return UV__ERR(pthread_rwlock_init(rwlock, NULL));
392 }
393 
394 
uv_rwlock_destroy(uv_rwlock_t * rwlock)395 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
396   if (pthread_rwlock_destroy(rwlock))
397     abort();
398 }
399 
400 
uv_rwlock_rdlock(uv_rwlock_t * rwlock)401 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
402   if (pthread_rwlock_rdlock(rwlock))
403     abort();
404 }
405 
406 
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)407 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
408   int err;
409 
410   err = pthread_rwlock_tryrdlock(rwlock);
411   if (err) {
412     if (err != EBUSY && err != EAGAIN)
413       abort();
414     return UV_EBUSY;
415   }
416 
417   return 0;
418 }
419 
420 
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)421 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
422   if (pthread_rwlock_unlock(rwlock))
423     abort();
424 }
425 
426 
uv_rwlock_wrlock(uv_rwlock_t * rwlock)427 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
428   if (pthread_rwlock_wrlock(rwlock))
429     abort();
430 }
431 
432 
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)433 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
434   int err;
435 
436   err = pthread_rwlock_trywrlock(rwlock);
437   if (err) {
438     if (err != EBUSY && err != EAGAIN)
439       abort();
440     return UV_EBUSY;
441   }
442 
443   return 0;
444 }
445 
446 
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)447 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
448   if (pthread_rwlock_unlock(rwlock))
449     abort();
450 }
451 
452 
uv_once(uv_once_t * guard,void (* callback)(void))453 void uv_once(uv_once_t* guard, void (*callback)(void)) {
454   if (pthread_once(guard, callback))
455     abort();
456 }
457 
458 #if defined(__APPLE__) && defined(__MACH__)
459 
uv_sem_init(uv_sem_t * sem,unsigned int value)460 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
461   kern_return_t err;
462 
463   err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
464   if (err == KERN_SUCCESS)
465     return 0;
466   if (err == KERN_INVALID_ARGUMENT)
467     return UV_EINVAL;
468   if (err == KERN_RESOURCE_SHORTAGE)
469     return UV_ENOMEM;
470 
471   abort();
472   return UV_EINVAL;  /* Satisfy the compiler. */
473 }
474 
475 
uv_sem_destroy(uv_sem_t * sem)476 void uv_sem_destroy(uv_sem_t* sem) {
477   if (semaphore_destroy(mach_task_self(), *sem))
478     abort();
479 }
480 
481 
uv_sem_post(uv_sem_t * sem)482 void uv_sem_post(uv_sem_t* sem) {
483   if (semaphore_signal(*sem))
484     abort();
485 }
486 
487 
uv_sem_wait(uv_sem_t * sem)488 void uv_sem_wait(uv_sem_t* sem) {
489   int r;
490 
491   do
492     r = semaphore_wait(*sem);
493   while (r == KERN_ABORTED);
494 
495   if (r != KERN_SUCCESS)
496     abort();
497 }
498 
499 
uv_sem_trywait(uv_sem_t * sem)500 int uv_sem_trywait(uv_sem_t* sem) {
501   mach_timespec_t interval;
502   kern_return_t err;
503 
504   interval.tv_sec = 0;
505   interval.tv_nsec = 0;
506 
507   err = semaphore_timedwait(*sem, interval);
508   if (err == KERN_SUCCESS)
509     return 0;
510   if (err == KERN_OPERATION_TIMED_OUT)
511     return UV_EAGAIN;
512 
513   abort();
514   return UV_EINVAL;  /* Satisfy the compiler. */
515 }
516 
517 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
518 
519 #if defined(__GLIBC__) && !defined(__UCLIBC__)
520 
521 /* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
522  * by providing a custom implementation for glibc < 2.21 in terms of other
523  * concurrency primitives.
524  * Refs: https://github.com/nodejs/node/issues/19903 */
525 
526 /* To preserve ABI compatibility, we treat the uv_sem_t as storage for
527  * a pointer to the actual struct we're using underneath. */
528 
529 static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
530 static int platform_needs_custom_semaphore = 0;
531 
glibc_version_check(void)532 static void glibc_version_check(void) {
533   const char* version = gnu_get_libc_version();
534   platform_needs_custom_semaphore =
535       version[0] == '2' && version[1] == '.' &&
536       atoi(version + 2) < 21;
537 }
538 
539 #elif defined(__MVS__)
540 
541 #define platform_needs_custom_semaphore 1
542 
543 #else /* !defined(__GLIBC__) && !defined(__MVS__) */
544 
545 #define platform_needs_custom_semaphore 0
546 
547 #endif
548 
549 typedef struct uv_semaphore_s {
550   uv_mutex_t mutex;
551   uv_cond_t cond;
552   unsigned int value;
553 } uv_semaphore_t;
554 
555 #if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
556     platform_needs_custom_semaphore
557 STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
558 #endif
559 
uv__custom_sem_init(uv_sem_t * sem_,unsigned int value)560 static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
561   int err;
562   uv_semaphore_t* sem;
563 
564   sem = uv__malloc(sizeof(*sem));
565   if (sem == NULL)
566     return UV_ENOMEM;
567 
568   if ((err = uv_mutex_init(&sem->mutex)) != 0) {
569     uv__free(sem);
570     return err;
571   }
572 
573   if ((err = uv_cond_init(&sem->cond)) != 0) {
574     uv_mutex_destroy(&sem->mutex);
575     uv__free(sem);
576     return err;
577   }
578 
579   sem->value = value;
580   *(uv_semaphore_t**)sem_ = sem;
581   return 0;
582 }
583 
584 
uv__custom_sem_destroy(uv_sem_t * sem_)585 static void uv__custom_sem_destroy(uv_sem_t* sem_) {
586   uv_semaphore_t* sem;
587 
588   sem = *(uv_semaphore_t**)sem_;
589   uv_cond_destroy(&sem->cond);
590   uv_mutex_destroy(&sem->mutex);
591   uv__free(sem);
592 }
593 
594 
uv__custom_sem_post(uv_sem_t * sem_)595 static void uv__custom_sem_post(uv_sem_t* sem_) {
596   uv_semaphore_t* sem;
597 
598   sem = *(uv_semaphore_t**)sem_;
599   uv_mutex_lock(&sem->mutex);
600   sem->value++;
601   if (sem->value == 1)
602     uv_cond_signal(&sem->cond); /* Release one to replace us. */
603   uv_mutex_unlock(&sem->mutex);
604 }
605 
606 
uv__custom_sem_wait(uv_sem_t * sem_)607 static void uv__custom_sem_wait(uv_sem_t* sem_) {
608   uv_semaphore_t* sem;
609 
610   sem = *(uv_semaphore_t**)sem_;
611   uv_mutex_lock(&sem->mutex);
612   while (sem->value == 0)
613     uv_cond_wait(&sem->cond, &sem->mutex);
614   sem->value--;
615   uv_mutex_unlock(&sem->mutex);
616 }
617 
618 
uv__custom_sem_trywait(uv_sem_t * sem_)619 static int uv__custom_sem_trywait(uv_sem_t* sem_) {
620   uv_semaphore_t* sem;
621 
622   sem = *(uv_semaphore_t**)sem_;
623   if (uv_mutex_trylock(&sem->mutex) != 0)
624     return UV_EAGAIN;
625 
626   if (sem->value == 0) {
627     uv_mutex_unlock(&sem->mutex);
628     return UV_EAGAIN;
629   }
630 
631   sem->value--;
632   uv_mutex_unlock(&sem->mutex);
633 
634   return 0;
635 }
636 
uv__sem_init(uv_sem_t * sem,unsigned int value)637 static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
638   if (sem_init(sem, 0, value))
639     return UV__ERR(errno);
640   return 0;
641 }
642 
643 
uv__sem_destroy(uv_sem_t * sem)644 static void uv__sem_destroy(uv_sem_t* sem) {
645   if (sem_destroy(sem))
646     abort();
647 }
648 
649 
uv__sem_post(uv_sem_t * sem)650 static void uv__sem_post(uv_sem_t* sem) {
651   if (sem_post(sem))
652     abort();
653 }
654 
655 
uv__sem_wait(uv_sem_t * sem)656 static void uv__sem_wait(uv_sem_t* sem) {
657   int r;
658 
659   do
660     r = sem_wait(sem);
661   while (r == -1 && errno == EINTR);
662 
663   if (r)
664     abort();
665 }
666 
667 
uv__sem_trywait(uv_sem_t * sem)668 static int uv__sem_trywait(uv_sem_t* sem) {
669   int r;
670 
671   do
672     r = sem_trywait(sem);
673   while (r == -1 && errno == EINTR);
674 
675   if (r) {
676     if (errno == EAGAIN)
677       return UV_EAGAIN;
678     abort();
679   }
680 
681   return 0;
682 }
683 
uv_sem_init(uv_sem_t * sem,unsigned int value)684 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
685 #if defined(__GLIBC__) && !defined(__UCLIBC__)
686   uv_once(&glibc_version_check_once, glibc_version_check);
687 #endif
688 
689   if (platform_needs_custom_semaphore)
690     return uv__custom_sem_init(sem, value);
691   else
692     return uv__sem_init(sem, value);
693 }
694 
695 
uv_sem_destroy(uv_sem_t * sem)696 void uv_sem_destroy(uv_sem_t* sem) {
697   if (platform_needs_custom_semaphore)
698     uv__custom_sem_destroy(sem);
699   else
700     uv__sem_destroy(sem);
701 }
702 
703 
uv_sem_post(uv_sem_t * sem)704 void uv_sem_post(uv_sem_t* sem) {
705   if (platform_needs_custom_semaphore)
706     uv__custom_sem_post(sem);
707   else
708     uv__sem_post(sem);
709 }
710 
711 
uv_sem_wait(uv_sem_t * sem)712 void uv_sem_wait(uv_sem_t* sem) {
713   if (platform_needs_custom_semaphore)
714     uv__custom_sem_wait(sem);
715   else
716     uv__sem_wait(sem);
717 }
718 
719 
uv_sem_trywait(uv_sem_t * sem)720 int uv_sem_trywait(uv_sem_t* sem) {
721   if (platform_needs_custom_semaphore)
722     return uv__custom_sem_trywait(sem);
723   else
724     return uv__sem_trywait(sem);
725 }
726 
727 #endif /* defined(__APPLE__) && defined(__MACH__) */
728 
729 
730 #if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
731 
uv_cond_init(uv_cond_t * cond)732 int uv_cond_init(uv_cond_t* cond) {
733   return UV__ERR(pthread_cond_init(cond, NULL));
734 }
735 
736 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
737 
uv_cond_init(uv_cond_t * cond)738 int uv_cond_init(uv_cond_t* cond) {
739   pthread_condattr_t attr;
740   int err;
741 
742   err = pthread_condattr_init(&attr);
743   if (err)
744     return UV__ERR(err);
745 
746   err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
747   if (err)
748     goto error2;
749 
750   err = pthread_cond_init(cond, &attr);
751   if (err)
752     goto error2;
753 
754   err = pthread_condattr_destroy(&attr);
755   if (err)
756     goto error;
757 
758   return 0;
759 
760 error:
761   pthread_cond_destroy(cond);
762 error2:
763   pthread_condattr_destroy(&attr);
764   return UV__ERR(err);
765 }
766 
767 #endif /* defined(__APPLE__) && defined(__MACH__) */
768 
uv_cond_destroy(uv_cond_t * cond)769 void uv_cond_destroy(uv_cond_t* cond) {
770 #if defined(__APPLE__) && defined(__MACH__)
771   /* It has been reported that destroying condition variables that have been
772    * signalled but not waited on can sometimes result in application crashes.
773    * See https://codereview.chromium.org/1323293005.
774    */
775   pthread_mutex_t mutex;
776   struct timespec ts;
777   int err;
778 
779   if (pthread_mutex_init(&mutex, NULL))
780     abort();
781 
782   if (pthread_mutex_lock(&mutex))
783     abort();
784 
785   ts.tv_sec = 0;
786   ts.tv_nsec = 1;
787 
788   err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
789   if (err != 0 && err != ETIMEDOUT)
790     abort();
791 
792   if (pthread_mutex_unlock(&mutex))
793     abort();
794 
795   if (pthread_mutex_destroy(&mutex))
796     abort();
797 #endif /* defined(__APPLE__) && defined(__MACH__) */
798 
799   if (pthread_cond_destroy(cond))
800     abort();
801 }
802 
uv_cond_signal(uv_cond_t * cond)803 void uv_cond_signal(uv_cond_t* cond) {
804   if (pthread_cond_signal(cond))
805     abort();
806 }
807 
uv_cond_broadcast(uv_cond_t * cond)808 void uv_cond_broadcast(uv_cond_t* cond) {
809   if (pthread_cond_broadcast(cond))
810     abort();
811 }
812 
813 #if defined(__APPLE__) && defined(__MACH__)
814 
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)815 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
816   int r;
817 
818   errno = 0;
819   r = pthread_cond_wait(cond, mutex);
820 
821   /* Workaround for a bug in OS X at least up to 13.6
822    * See https://github.com/libuv/libuv/issues/4165
823    */
824   if (r == EINVAL)
825     if (errno == EBUSY)
826       return;
827 
828   if (r)
829     abort();
830 }
831 
832 #else /* !(defined(__APPLE__) && defined(__MACH__)) */
833 
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)834 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
835   if (pthread_cond_wait(cond, mutex))
836     abort();
837 }
838 
839 #endif
840 
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)841 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
842   int r;
843   struct timespec ts;
844 #if defined(__MVS__)
845   struct timeval tv;
846 #endif
847 
848 #if defined(__APPLE__) && defined(__MACH__)
849   ts.tv_sec = timeout / NANOSEC;
850   ts.tv_nsec = timeout % NANOSEC;
851   r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
852 #else
853 #if defined(__MVS__)
854   if (gettimeofday(&tv, NULL))
855     abort();
856   timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
857 #else
858   timeout += uv__hrtime(UV_CLOCK_PRECISE);
859 #endif
860   ts.tv_sec = timeout / NANOSEC;
861   ts.tv_nsec = timeout % NANOSEC;
862   r = pthread_cond_timedwait(cond, mutex, &ts);
863 #endif
864 
865 
866   if (r == 0)
867     return 0;
868 
869   if (r == ETIMEDOUT)
870     return UV_ETIMEDOUT;
871 
872   abort();
873 #ifndef __SUNPRO_C
874   return UV_EINVAL;  /* Satisfy the compiler. */
875 #endif
876 }
877 
878 
uv_key_create(uv_key_t * key)879 int uv_key_create(uv_key_t* key) {
880   return UV__ERR(pthread_key_create(key, NULL));
881 }
882 
883 
uv_key_delete(uv_key_t * key)884 void uv_key_delete(uv_key_t* key) {
885   if (pthread_key_delete(*key))
886     abort();
887 }
888 
889 
uv_key_get(uv_key_t * key)890 void* uv_key_get(uv_key_t* key) {
891   return pthread_getspecific(*key);
892 }
893 
894 
uv_key_set(uv_key_t * key,void * value)895 void uv_key_set(uv_key_t* key, void* value) {
896   if (pthread_setspecific(*key, value))
897     abort();
898 }
899 
900 #if defined(_AIX) || defined(__MVS__) || defined(__PASE__)
uv__thread_setname(const char * name)901 int uv__thread_setname(const char* name) {
902   return UV_ENOSYS;
903 }
904 #elif defined(__APPLE__)
uv__thread_setname(const char * name)905 int uv__thread_setname(const char* name) {
906   char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
907   strncpy(namebuf, name, sizeof(namebuf) - 1);
908   namebuf[sizeof(namebuf) - 1] = '\0';
909   int err = pthread_setname_np(namebuf);
910   if (err)
911     return UV__ERR(errno);
912   return 0;
913 }
914 #elif defined(__NetBSD__)
uv__thread_setname(const char * name)915 int uv__thread_setname(const char* name) {
916   char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
917   strncpy(namebuf, name, sizeof(namebuf) - 1);
918   namebuf[sizeof(namebuf) - 1] = '\0';
919   return UV__ERR(pthread_setname_np(pthread_self(), "%s", namebuf));
920 }
921 #elif defined(__OpenBSD__)
uv__thread_setname(const char * name)922 int uv__thread_setname(const char* name) {
923   char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
924   strncpy(namebuf, name, sizeof(namebuf) - 1);
925   namebuf[sizeof(namebuf) - 1] = '\0';
926   pthread_set_name_np(pthread_self(), namebuf);
927   return 0;
928 }
929 #else
uv__thread_setname(const char * name)930 int uv__thread_setname(const char* name) {
931   char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
932   strncpy(namebuf, name, sizeof(namebuf) - 1);
933   namebuf[sizeof(namebuf) - 1] = '\0';
934   return UV__ERR(pthread_setname_np(pthread_self(), namebuf));
935 }
936 #endif
937 
938 #if (defined(__ANDROID_API__) && __ANDROID_API__ < 26) || \
939     defined(_AIX) || \
940     defined(__MVS__) || \
941     defined(__PASE__)
uv__thread_getname(uv_thread_t * tid,char * name,size_t size)942 int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) {
943   return UV_ENOSYS;
944 }
945 #elif defined(__OpenBSD__)
uv__thread_getname(uv_thread_t * tid,char * name,size_t size)946 int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) {
947   char thread_name[UV_PTHREAD_MAX_NAMELEN_NP];
948   pthread_get_name_np(*tid, thread_name, sizeof(thread_name));
949   strncpy(name, thread_name, size - 1);
950   name[size - 1] = '\0';
951   return 0;
952 }
953 #elif defined(__APPLE__)
uv__thread_getname(uv_thread_t * tid,char * name,size_t size)954 int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) {
955   char thread_name[UV_PTHREAD_MAX_NAMELEN_NP];
956   if (pthread_getname_np(*tid, thread_name, sizeof(thread_name)) != 0)
957     return UV__ERR(errno);
958 
959   strncpy(name, thread_name, size - 1);
960   name[size - 1] = '\0';
961   return 0;
962 }
963 #else
uv__thread_getname(uv_thread_t * tid,char * name,size_t size)964 int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) {
965   int r;
966   char thread_name[UV_PTHREAD_MAX_NAMELEN_NP];
967   r = pthread_getname_np(*tid, thread_name, sizeof(thread_name));
968   if (r != 0)
969     return UV__ERR(r);
970 
971   strncpy(name, thread_name, size - 1);
972   name[size - 1] = '\0';
973   return 0;
974 }
975 #endif
976