1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include <assert.h>
23 #include <limits.h>
24 #include <stdlib.h>
25
26 #if defined(__MINGW64_VERSION_MAJOR)
27 /* MemoryBarrier expands to __mm_mfence in some cases (x86+sse2), which may
28 * require this header in some versions of mingw64. */
29 #include <intrin.h>
30 #endif
31
32 #include "uv.h"
33 #include "internal.h"
34
35 typedef void (*uv__once_cb)(void);
36
37 typedef struct {
38 uv__once_cb callback;
39 } uv__once_data_t;
40
uv__once_inner(INIT_ONCE * once,void * param,void ** context)41 static BOOL WINAPI uv__once_inner(INIT_ONCE *once, void* param, void** context) {
42 uv__once_data_t* data = param;
43
44 data->callback();
45
46 return TRUE;
47 }
48
uv_once(uv_once_t * guard,uv__once_cb callback)49 void uv_once(uv_once_t* guard, uv__once_cb callback) {
50 uv__once_data_t data = { .callback = callback };
51 InitOnceExecuteOnce(&guard->init_once, uv__once_inner, (void*) &data, NULL);
52 }
53
54
55 /* Verify that uv_thread_t can be stored in a TLS slot. */
56 STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*));
57
58 static uv_key_t uv__current_thread_key;
59 static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT;
60
61
uv__init_current_thread_key(void)62 static void uv__init_current_thread_key(void) {
63 if (uv_key_create(&uv__current_thread_key))
64 abort();
65 }
66
67
68 struct thread_ctx {
69 void (*entry)(void* arg);
70 void* arg;
71 uv_thread_t self;
72 };
73
74
uv__thread_start(void * arg)75 static UINT __stdcall uv__thread_start(void* arg) {
76 struct thread_ctx *ctx_p;
77 struct thread_ctx ctx;
78
79 ctx_p = arg;
80 ctx = *ctx_p;
81 uv__free(ctx_p);
82
83 uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
84 uv_key_set(&uv__current_thread_key, ctx.self);
85
86 ctx.entry(ctx.arg);
87
88 return 0;
89 }
90
91
uv_thread_create(uv_thread_t * tid,void (* entry)(void * arg),void * arg)92 int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
93 uv_thread_options_t params;
94 params.flags = UV_THREAD_NO_FLAGS;
95 return uv_thread_create_ex(tid, ¶ms, entry, arg);
96 }
97
uv_thread_create_ex(uv_thread_t * tid,const uv_thread_options_t * params,void (* entry)(void * arg),void * arg)98 int uv_thread_create_ex(uv_thread_t* tid,
99 const uv_thread_options_t* params,
100 void (*entry)(void *arg),
101 void *arg) {
102 struct thread_ctx* ctx;
103 int err;
104 HANDLE thread;
105 SYSTEM_INFO sysinfo;
106 size_t stack_size;
107 size_t pagesize;
108
109 stack_size =
110 params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
111
112 if (stack_size != 0) {
113 GetNativeSystemInfo(&sysinfo);
114 pagesize = (size_t)sysinfo.dwPageSize;
115 /* Round up to the nearest page boundary. */
116 stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
117
118 if ((unsigned)stack_size != stack_size)
119 return UV_EINVAL;
120 }
121
122 ctx = uv__malloc(sizeof(*ctx));
123 if (ctx == NULL)
124 return UV_ENOMEM;
125
126 ctx->entry = entry;
127 ctx->arg = arg;
128
129 /* Create the thread in suspended state so we have a chance to pass
130 * its own creation handle to it */
131 thread = (HANDLE) _beginthreadex(NULL,
132 (unsigned)stack_size,
133 uv__thread_start,
134 ctx,
135 CREATE_SUSPENDED,
136 NULL);
137 if (thread == NULL) {
138 err = errno;
139 uv__free(ctx);
140 } else {
141 err = 0;
142 *tid = thread;
143 ctx->self = thread;
144 ResumeThread(thread);
145 }
146
147 switch (err) {
148 case 0:
149 return 0;
150 case EACCES:
151 return UV_EACCES;
152 case EAGAIN:
153 return UV_EAGAIN;
154 case EINVAL:
155 return UV_EINVAL;
156 }
157
158 return UV_EIO;
159 }
160
uv_thread_setaffinity(uv_thread_t * tid,char * cpumask,char * oldmask,size_t mask_size)161 int uv_thread_setaffinity(uv_thread_t* tid,
162 char* cpumask,
163 char* oldmask,
164 size_t mask_size) {
165 int i;
166 HANDLE hproc;
167 DWORD_PTR procmask;
168 DWORD_PTR sysmask;
169 DWORD_PTR threadmask;
170 DWORD_PTR oldthreadmask;
171 int cpumasksize;
172
173 cpumasksize = uv_cpumask_size();
174 assert(cpumasksize > 0);
175 if (mask_size < (size_t)cpumasksize)
176 return UV_EINVAL;
177
178 hproc = GetCurrentProcess();
179 if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
180 return uv_translate_sys_error(GetLastError());
181
182 threadmask = 0;
183 for (i = 0; i < cpumasksize; i++) {
184 if (cpumask[i]) {
185 if (procmask & (1 << i))
186 threadmask |= 1 << i;
187 else
188 return UV_EINVAL;
189 }
190 }
191
192 oldthreadmask = SetThreadAffinityMask(*tid, threadmask);
193 if (oldthreadmask == 0)
194 return uv_translate_sys_error(GetLastError());
195
196 if (oldmask != NULL) {
197 for (i = 0; i < cpumasksize; i++)
198 oldmask[i] = (oldthreadmask >> i) & 1;
199 }
200
201 return 0;
202 }
203
uv_thread_getaffinity(uv_thread_t * tid,char * cpumask,size_t mask_size)204 int uv_thread_getaffinity(uv_thread_t* tid,
205 char* cpumask,
206 size_t mask_size) {
207 int i;
208 HANDLE hproc;
209 DWORD_PTR procmask;
210 DWORD_PTR sysmask;
211 DWORD_PTR threadmask;
212 int cpumasksize;
213
214 cpumasksize = uv_cpumask_size();
215 assert(cpumasksize > 0);
216 if (mask_size < (size_t)cpumasksize)
217 return UV_EINVAL;
218
219 hproc = GetCurrentProcess();
220 if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
221 return uv_translate_sys_error(GetLastError());
222
223 threadmask = SetThreadAffinityMask(*tid, procmask);
224 if (threadmask == 0 || SetThreadAffinityMask(*tid, threadmask) == 0)
225 return uv_translate_sys_error(GetLastError());
226
227 for (i = 0; i < cpumasksize; i++)
228 cpumask[i] = (threadmask >> i) & 1;
229
230 return 0;
231 }
232
uv_thread_getcpu(void)233 int uv_thread_getcpu(void) {
234 return GetCurrentProcessorNumber();
235 }
236
uv_thread_self(void)237 uv_thread_t uv_thread_self(void) {
238 uv_thread_t key;
239 uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
240 key = uv_key_get(&uv__current_thread_key);
241 if (key == NULL) {
242 /* If the thread wasn't started by uv_thread_create (such as the main
243 * thread), we assign an id to it now. */
244 if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
245 GetCurrentProcess(), &key, 0,
246 FALSE, DUPLICATE_SAME_ACCESS)) {
247 uv_fatal_error(GetLastError(), "DuplicateHandle");
248 }
249 uv_key_set(&uv__current_thread_key, key);
250 }
251 return key;
252 }
253
254
uv_thread_join(uv_thread_t * tid)255 int uv_thread_join(uv_thread_t *tid) {
256 if (WaitForSingleObject(*tid, INFINITE))
257 return uv_translate_sys_error(GetLastError());
258 else {
259 CloseHandle(*tid);
260 *tid = 0;
261 MemoryBarrier(); /* For feature parity with pthread_join(). */
262 return 0;
263 }
264 }
265
266
uv_thread_equal(const uv_thread_t * t1,const uv_thread_t * t2)267 int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
268 return *t1 == *t2;
269 }
270
271
uv_mutex_init(uv_mutex_t * mutex)272 int uv_mutex_init(uv_mutex_t* mutex) {
273 InitializeCriticalSection(mutex);
274 return 0;
275 }
276
277
uv_mutex_init_recursive(uv_mutex_t * mutex)278 int uv_mutex_init_recursive(uv_mutex_t* mutex) {
279 return uv_mutex_init(mutex);
280 }
281
282
uv_mutex_destroy(uv_mutex_t * mutex)283 void uv_mutex_destroy(uv_mutex_t* mutex) {
284 DeleteCriticalSection(mutex);
285 }
286
287
uv_mutex_lock(uv_mutex_t * mutex)288 void uv_mutex_lock(uv_mutex_t* mutex) {
289 EnterCriticalSection(mutex);
290 }
291
292
uv_mutex_trylock(uv_mutex_t * mutex)293 int uv_mutex_trylock(uv_mutex_t* mutex) {
294 if (TryEnterCriticalSection(mutex))
295 return 0;
296 else
297 return UV_EBUSY;
298 }
299
300
uv_mutex_unlock(uv_mutex_t * mutex)301 void uv_mutex_unlock(uv_mutex_t* mutex) {
302 LeaveCriticalSection(mutex);
303 }
304
305 /* Ensure that the ABI for this type remains stable in v1.x */
306 #ifdef _WIN64
307 STATIC_ASSERT(sizeof(uv_rwlock_t) == 80);
308 #else
309 STATIC_ASSERT(sizeof(uv_rwlock_t) == 48);
310 #endif
311
uv_rwlock_init(uv_rwlock_t * rwlock)312 int uv_rwlock_init(uv_rwlock_t* rwlock) {
313 memset(rwlock, 0, sizeof(*rwlock));
314 InitializeSRWLock(&rwlock->read_write_lock_);
315
316 return 0;
317 }
318
319
uv_rwlock_destroy(uv_rwlock_t * rwlock)320 void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
321 /* SRWLock does not need explicit destruction so long as there are no waiting threads
322 See: https://docs.microsoft.com/windows/win32/api/synchapi/nf-synchapi-initializesrwlock#remarks */
323 }
324
325
uv_rwlock_rdlock(uv_rwlock_t * rwlock)326 void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
327 AcquireSRWLockShared(&rwlock->read_write_lock_);
328 }
329
330
uv_rwlock_tryrdlock(uv_rwlock_t * rwlock)331 int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
332 if (!TryAcquireSRWLockShared(&rwlock->read_write_lock_))
333 return UV_EBUSY;
334
335 return 0;
336 }
337
338
uv_rwlock_rdunlock(uv_rwlock_t * rwlock)339 void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
340 ReleaseSRWLockShared(&rwlock->read_write_lock_);
341 }
342
343
uv_rwlock_wrlock(uv_rwlock_t * rwlock)344 void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
345 AcquireSRWLockExclusive(&rwlock->read_write_lock_);
346 }
347
348
uv_rwlock_trywrlock(uv_rwlock_t * rwlock)349 int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
350 if (!TryAcquireSRWLockExclusive(&rwlock->read_write_lock_))
351 return UV_EBUSY;
352
353 return 0;
354 }
355
356
uv_rwlock_wrunlock(uv_rwlock_t * rwlock)357 void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
358 ReleaseSRWLockExclusive(&rwlock->read_write_lock_);
359 }
360
361
uv_sem_init(uv_sem_t * sem,unsigned int value)362 int uv_sem_init(uv_sem_t* sem, unsigned int value) {
363 *sem = CreateSemaphore(NULL, value, INT_MAX, NULL);
364 if (*sem == NULL)
365 return uv_translate_sys_error(GetLastError());
366 else
367 return 0;
368 }
369
370
uv_sem_destroy(uv_sem_t * sem)371 void uv_sem_destroy(uv_sem_t* sem) {
372 if (!CloseHandle(*sem))
373 abort();
374 }
375
376
uv_sem_post(uv_sem_t * sem)377 void uv_sem_post(uv_sem_t* sem) {
378 if (!ReleaseSemaphore(*sem, 1, NULL))
379 abort();
380 }
381
382
uv_sem_wait(uv_sem_t * sem)383 void uv_sem_wait(uv_sem_t* sem) {
384 if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)
385 abort();
386 }
387
388
uv_sem_trywait(uv_sem_t * sem)389 int uv_sem_trywait(uv_sem_t* sem) {
390 DWORD r = WaitForSingleObject(*sem, 0);
391
392 if (r == WAIT_OBJECT_0)
393 return 0;
394
395 if (r == WAIT_TIMEOUT)
396 return UV_EAGAIN;
397
398 abort();
399 return -1; /* Satisfy the compiler. */
400 }
401
402
uv_cond_init(uv_cond_t * cond)403 int uv_cond_init(uv_cond_t* cond) {
404 InitializeConditionVariable(&cond->cond_var);
405 return 0;
406 }
407
408
uv_cond_destroy(uv_cond_t * cond)409 void uv_cond_destroy(uv_cond_t* cond) {
410 /* nothing to do */
411 (void) &cond;
412 }
413
414
uv_cond_signal(uv_cond_t * cond)415 void uv_cond_signal(uv_cond_t* cond) {
416 WakeConditionVariable(&cond->cond_var);
417 }
418
419
uv_cond_broadcast(uv_cond_t * cond)420 void uv_cond_broadcast(uv_cond_t* cond) {
421 WakeAllConditionVariable(&cond->cond_var);
422 }
423
424
uv_cond_wait(uv_cond_t * cond,uv_mutex_t * mutex)425 void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
426 if (!SleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))
427 abort();
428 }
429
430
uv_cond_timedwait(uv_cond_t * cond,uv_mutex_t * mutex,uint64_t timeout)431 int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
432 if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
433 return 0;
434 if (GetLastError() != ERROR_TIMEOUT)
435 abort();
436 return UV_ETIMEDOUT;
437 }
438
439
uv_key_create(uv_key_t * key)440 int uv_key_create(uv_key_t* key) {
441 key->tls_index = TlsAlloc();
442 if (key->tls_index == TLS_OUT_OF_INDEXES)
443 return UV_ENOMEM;
444 return 0;
445 }
446
447
uv_key_delete(uv_key_t * key)448 void uv_key_delete(uv_key_t* key) {
449 if (TlsFree(key->tls_index) == FALSE)
450 abort();
451 key->tls_index = TLS_OUT_OF_INDEXES;
452 }
453
454
uv_key_get(uv_key_t * key)455 void* uv_key_get(uv_key_t* key) {
456 void* value;
457
458 value = TlsGetValue(key->tls_index);
459 if (value == NULL)
460 if (GetLastError() != ERROR_SUCCESS)
461 abort();
462
463 return value;
464 }
465
466
uv_key_set(uv_key_t * key,void * value)467 void uv_key_set(uv_key_t* key, void* value) {
468 if (TlsSetValue(key->tls_index, value) == FALSE)
469 abort();
470 }
471