1 /*
2 +----------------------------------------------------------------------+
3 | Thread Safe Resource Manager |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 1999-2011, Andi Gutmans, Sascha Schumann, Zeev Suraski |
6 | This source file is subject to the TSRM license, that is bundled |
7 | with this package in the file LICENSE |
8 +----------------------------------------------------------------------+
9 | Authors: Zeev Suraski <zeev@php.net> |
10 +----------------------------------------------------------------------+
11 */
12
13 #include "TSRM.h"
14
15 #ifdef ZTS
16
17 #include <stdio.h>
18 #include <stdarg.h>
19
20 #if ZEND_DEBUG
21 # include <assert.h>
22 # define TSRM_ASSERT(c) assert(c)
23 #else
24 # define TSRM_ASSERT(c)
25 #endif
26
27 typedef struct _tsrm_tls_entry tsrm_tls_entry;
28
29 /* TSRMLS_CACHE_DEFINE; is already done in Zend, this is being always compiled statically. */
30 TSRMLS_CACHE_EXTERN();
31
32 struct _tsrm_tls_entry {
33 void **storage;
34 int count;
35 THREAD_T thread_id;
36 tsrm_tls_entry *next;
37 };
38
39
40 typedef struct {
41 size_t size;
42 ts_allocate_ctor ctor;
43 ts_allocate_dtor dtor;
44 size_t fast_offset;
45 int done;
46 } tsrm_resource_type;
47
48
49 /* The memory manager table */
50 static tsrm_tls_entry **tsrm_tls_table=NULL;
51 static int tsrm_tls_table_size;
52 static ts_rsrc_id id_count;
53
54 /* The resource sizes table */
55 static tsrm_resource_type *resource_types_table=NULL;
56 static int resource_types_table_size;
57
58 /* Reserved space for fast globals access */
59 static size_t tsrm_reserved_pos = 0;
60 static size_t tsrm_reserved_size = 0;
61
62 static MUTEX_T tsmm_mutex; /* thread-safe memory manager mutex */
63 static MUTEX_T tsrm_env_mutex; /* tsrm environ mutex */
64
65 /* New thread handlers */
66 static tsrm_thread_begin_func_t tsrm_new_thread_begin_handler = NULL;
67 static tsrm_thread_end_func_t tsrm_new_thread_end_handler = NULL;
68 static tsrm_shutdown_func_t tsrm_shutdown_handler = NULL;
69
70 /* Debug support */
71 int tsrm_error(int level, const char *format, ...);
72
73 /* Read a resource from a thread's resource storage */
74 static int tsrm_error_level;
75 static FILE *tsrm_error_file;
76
77 #ifdef TSRM_DEBUG
78 #define TSRM_ERROR(args) tsrm_error args
79 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
80 { \
81 int unshuffled_offset = TSRM_UNSHUFFLE_RSRC_ID(offset); \
82 \
83 if (offset==0) { \
84 return &array; \
85 } else if ((unshuffled_offset)>=0 && (unshuffled_offset)<(range)) { \
86 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Successfully fetched resource id %d for thread id %ld - 0x%0.8X", \
87 unshuffled_offset, (long) thread_resources->thread_id, array[unshuffled_offset])); \
88 return array[unshuffled_offset]; \
89 } else { \
90 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Resource id %d is out of range (%d..%d)", \
91 unshuffled_offset, TSRM_SHUFFLE_RSRC_ID(0), TSRM_SHUFFLE_RSRC_ID(thread_resources->count-1))); \
92 return NULL; \
93 } \
94 }
95 #else
96 #define TSRM_ERROR(args)
97 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
98 if (offset==0) { \
99 return &array; \
100 } else { \
101 return array[TSRM_UNSHUFFLE_RSRC_ID(offset)]; \
102 }
103 #endif
104
105 #ifdef TSRM_WIN32
106 static DWORD tls_key;
107 # define tsrm_tls_set(what) TlsSetValue(tls_key, (void*)(what))
108 # define tsrm_tls_get() TlsGetValue(tls_key)
109 #else
110 static pthread_key_t tls_key;
111 # define tsrm_tls_set(what) pthread_setspecific(tls_key, (void*)(what))
112 # define tsrm_tls_get() pthread_getspecific(tls_key)
113 #endif
114
115 TSRM_TLS bool in_main_thread = false;
116 TSRM_TLS bool is_thread_shutdown = false;
117
118 /* Startup TSRM (call once for the entire process) */
tsrm_startup(int expected_threads,int expected_resources,int debug_level,const char * debug_filename)119 TSRM_API bool tsrm_startup(int expected_threads, int expected_resources, int debug_level, const char *debug_filename)
120 {/*{{{*/
121 #ifdef TSRM_WIN32
122 tls_key = TlsAlloc();
123 #else
124 pthread_key_create(&tls_key, 0);
125 #endif
126
127 /* ensure singleton */
128 in_main_thread = true;
129 is_thread_shutdown = false;
130
131 tsrm_error_file = stderr;
132 tsrm_error_set(debug_level, debug_filename);
133 tsrm_tls_table_size = expected_threads;
134
135 tsrm_tls_table = (tsrm_tls_entry **) calloc(tsrm_tls_table_size, sizeof(tsrm_tls_entry *));
136 if (!tsrm_tls_table) {
137 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate TLS table"));
138 is_thread_shutdown = true;
139 return 0;
140 }
141 id_count=0;
142
143 resource_types_table_size = expected_resources;
144 resource_types_table = (tsrm_resource_type *) calloc(resource_types_table_size, sizeof(tsrm_resource_type));
145 if (!resource_types_table) {
146 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate resource types table"));
147 is_thread_shutdown = true;
148 free(tsrm_tls_table);
149 return 0;
150 }
151
152 tsmm_mutex = tsrm_mutex_alloc();
153
154 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Started up TSRM, %d expected threads, %d expected resources", expected_threads, expected_resources));
155
156 tsrm_reserved_pos = 0;
157 tsrm_reserved_size = 0;
158
159 tsrm_env_mutex = tsrm_mutex_alloc();
160
161 return 1;
162 }/*}}}*/
163
ts_free_resources(tsrm_tls_entry * thread_resources)164 static void ts_free_resources(tsrm_tls_entry *thread_resources)
165 {
166 /* Need to destroy in reverse order to respect dependencies. */
167 for (int i = thread_resources->count - 1; i >= 0; i--) {
168 if (!resource_types_table[i].done) {
169 if (resource_types_table[i].dtor) {
170 resource_types_table[i].dtor(thread_resources->storage[i]);
171 }
172
173 if (!resource_types_table[i].fast_offset) {
174 free(thread_resources->storage[i]);
175 }
176 }
177 }
178
179 free(thread_resources->storage);
180 }
181
182 /* Shutdown TSRM (call once for the entire process) */
tsrm_shutdown(void)183 TSRM_API void tsrm_shutdown(void)
184 {/*{{{*/
185 if (is_thread_shutdown) {
186 /* shutdown must only occur once */
187 return;
188 }
189
190 is_thread_shutdown = true;
191
192 if (!in_main_thread) {
193 /* only the main thread may shutdown tsrm */
194 return;
195 }
196
197 for (int i=0; i<tsrm_tls_table_size; i++) {
198 tsrm_tls_entry *p = tsrm_tls_table[i], *next_p;
199
200 while (p) {
201 next_p = p->next;
202 if (resource_types_table) {
203 /* This call will already free p->storage for us */
204 ts_free_resources(p);
205 } else {
206 free(p->storage);
207 }
208 free(p);
209 p = next_p;
210 }
211 }
212 free(tsrm_tls_table);
213 free(resource_types_table);
214 tsrm_mutex_free(tsmm_mutex);
215 tsrm_mutex_free(tsrm_env_mutex);
216 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Shutdown TSRM"));
217 if (tsrm_error_file!=stderr) {
218 fclose(tsrm_error_file);
219 }
220 #ifdef TSRM_WIN32
221 TlsFree(tls_key);
222 #else
223 pthread_setspecific(tls_key, 0);
224 pthread_key_delete(tls_key);
225 #endif
226 if (tsrm_shutdown_handler) {
227 tsrm_shutdown_handler();
228 }
229 tsrm_new_thread_begin_handler = NULL;
230 tsrm_new_thread_end_handler = NULL;
231 tsrm_shutdown_handler = NULL;
232
233 tsrm_reserved_pos = 0;
234 tsrm_reserved_size = 0;
235 }/*}}}*/
236
237 /* {{{ */
238 /* environ lock api */
tsrm_env_lock(void)239 TSRM_API void tsrm_env_lock(void) {
240 tsrm_mutex_lock(tsrm_env_mutex);
241 }
242
tsrm_env_unlock(void)243 TSRM_API void tsrm_env_unlock(void) {
244 tsrm_mutex_unlock(tsrm_env_mutex);
245 } /* }}} */
246
247 /* enlarge the arrays for the already active threads */
tsrm_update_active_threads(void)248 static void tsrm_update_active_threads(void)
249 {/*{{{*/
250 for (int i=0; i<tsrm_tls_table_size; i++) {
251 tsrm_tls_entry *p = tsrm_tls_table[i];
252
253 while (p) {
254 if (p->count < id_count) {
255 int j;
256
257 p->storage = (void *) realloc(p->storage, sizeof(void *)*id_count);
258 for (j=p->count; j<id_count; j++) {
259 if (resource_types_table[j].fast_offset) {
260 p->storage[j] = (void *) (((char*)p) + resource_types_table[j].fast_offset);
261 } else {
262 p->storage[j] = (void *) malloc(resource_types_table[j].size);
263 }
264 if (resource_types_table[j].ctor) {
265 resource_types_table[j].ctor(p->storage[j]);
266 }
267 }
268 p->count = id_count;
269 }
270 p = p->next;
271 }
272 }
273 }/*}}}*/
274
275
276 /* allocates a new thread-safe-resource id */
ts_allocate_id(ts_rsrc_id * rsrc_id,size_t size,ts_allocate_ctor ctor,ts_allocate_dtor dtor)277 TSRM_API ts_rsrc_id ts_allocate_id(ts_rsrc_id *rsrc_id, size_t size, ts_allocate_ctor ctor, ts_allocate_dtor dtor)
278 {/*{{{*/
279 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtaining a new resource id, %d bytes", size));
280
281 tsrm_mutex_lock(tsmm_mutex);
282
283 /* obtain a resource id */
284 *rsrc_id = TSRM_SHUFFLE_RSRC_ID(id_count++);
285 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtained resource id %d", *rsrc_id));
286
287 /* store the new resource type in the resource sizes table */
288 if (resource_types_table_size < id_count) {
289 tsrm_resource_type *_tmp;
290 _tmp = (tsrm_resource_type *) realloc(resource_types_table, sizeof(tsrm_resource_type)*id_count);
291 if (!_tmp) {
292 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate storage for resource"));
293 *rsrc_id = 0;
294 tsrm_mutex_unlock(tsmm_mutex);
295 return 0;
296 }
297 resource_types_table = _tmp;
298 resource_types_table_size = id_count;
299 }
300 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].size = size;
301 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].ctor = ctor;
302 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].dtor = dtor;
303 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].fast_offset = 0;
304 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].done = 0;
305
306 tsrm_update_active_threads();
307 tsrm_mutex_unlock(tsmm_mutex);
308
309 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully allocated new resource id %d", *rsrc_id));
310 return *rsrc_id;
311 }/*}}}*/
312
313
314 /* Reserve space for fast thread-safe-resources */
tsrm_reserve(size_t size)315 TSRM_API void tsrm_reserve(size_t size)
316 {/*{{{*/
317 tsrm_reserved_pos = 0;
318 tsrm_reserved_size = TSRM_ALIGNED_SIZE(size);
319 }/*}}}*/
320
321
322 /* allocates a new fast thread-safe-resource id */
ts_allocate_fast_id(ts_rsrc_id * rsrc_id,size_t * offset,size_t size,ts_allocate_ctor ctor,ts_allocate_dtor dtor)323 TSRM_API ts_rsrc_id ts_allocate_fast_id(ts_rsrc_id *rsrc_id, size_t *offset, size_t size, ts_allocate_ctor ctor, ts_allocate_dtor dtor)
324 {/*{{{*/
325 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtaining a new fast resource id, %d bytes", size));
326
327 tsrm_mutex_lock(tsmm_mutex);
328
329 /* obtain a resource id */
330 *rsrc_id = TSRM_SHUFFLE_RSRC_ID(id_count++);
331 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtained resource id %d", *rsrc_id));
332
333 size = TSRM_ALIGNED_SIZE(size);
334 if (tsrm_reserved_size - tsrm_reserved_pos < size) {
335 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate space for fast resource"));
336 *rsrc_id = 0;
337 *offset = 0;
338 tsrm_mutex_unlock(tsmm_mutex);
339 return 0;
340 }
341
342 *offset = TSRM_ALIGNED_SIZE(sizeof(tsrm_tls_entry)) + tsrm_reserved_pos;
343 tsrm_reserved_pos += size;
344
345 /* store the new resource type in the resource sizes table */
346 if (resource_types_table_size < id_count) {
347 tsrm_resource_type *_tmp;
348 _tmp = (tsrm_resource_type *) realloc(resource_types_table, sizeof(tsrm_resource_type)*id_count);
349 if (!_tmp) {
350 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate storage for resource"));
351 *rsrc_id = 0;
352 tsrm_mutex_unlock(tsmm_mutex);
353 return 0;
354 }
355 resource_types_table = _tmp;
356 resource_types_table_size = id_count;
357 }
358 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].size = size;
359 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].ctor = ctor;
360 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].dtor = dtor;
361 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].fast_offset = *offset;
362 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].done = 0;
363
364 tsrm_update_active_threads();
365 tsrm_mutex_unlock(tsmm_mutex);
366
367 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully allocated new resource id %d", *rsrc_id));
368 return *rsrc_id;
369 }/*}}}*/
370
set_thread_local_storage_resource_to(tsrm_tls_entry * thread_resource)371 static void set_thread_local_storage_resource_to(tsrm_tls_entry *thread_resource)
372 {
373 tsrm_tls_set(thread_resource);
374 TSRMLS_CACHE = thread_resource;
375 }
376
377 /* Must be called with tsmm_mutex held */
allocate_new_resource(tsrm_tls_entry ** thread_resources_ptr,THREAD_T thread_id)378 static void allocate_new_resource(tsrm_tls_entry **thread_resources_ptr, THREAD_T thread_id)
379 {/*{{{*/
380 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Creating data structures for thread %x", thread_id));
381 (*thread_resources_ptr) = (tsrm_tls_entry *) malloc(TSRM_ALIGNED_SIZE(sizeof(tsrm_tls_entry)) + tsrm_reserved_size);
382 (*thread_resources_ptr)->storage = NULL;
383 if (id_count > 0) {
384 (*thread_resources_ptr)->storage = (void **) malloc(sizeof(void *)*id_count);
385 }
386 (*thread_resources_ptr)->count = id_count;
387 (*thread_resources_ptr)->thread_id = thread_id;
388 (*thread_resources_ptr)->next = NULL;
389
390 /* Set thread local storage to this new thread resources structure */
391 set_thread_local_storage_resource_to(*thread_resources_ptr);
392
393 if (tsrm_new_thread_begin_handler) {
394 tsrm_new_thread_begin_handler(thread_id);
395 }
396 for (int i=0; i<id_count; i++) {
397 if (resource_types_table[i].done) {
398 (*thread_resources_ptr)->storage[i] = NULL;
399 } else {
400 if (resource_types_table[i].fast_offset) {
401 (*thread_resources_ptr)->storage[i] = (void *) (((char*)(*thread_resources_ptr)) + resource_types_table[i].fast_offset);
402 } else {
403 (*thread_resources_ptr)->storage[i] = (void *) malloc(resource_types_table[i].size);
404 }
405 if (resource_types_table[i].ctor) {
406 resource_types_table[i].ctor((*thread_resources_ptr)->storage[i]);
407 }
408 }
409 }
410
411 if (tsrm_new_thread_end_handler) {
412 tsrm_new_thread_end_handler(thread_id);
413 }
414 }/*}}}*/
415
416 /* fetches the requested resource for the current thread */
ts_resource_ex(ts_rsrc_id id,THREAD_T * th_id)417 TSRM_API void *ts_resource_ex(ts_rsrc_id id, THREAD_T *th_id)
418 {/*{{{*/
419 THREAD_T thread_id;
420 int hash_value;
421 tsrm_tls_entry *thread_resources, **last_thread_resources;
422
423 if (!th_id) {
424 /* Fast path for looking up the resources for the current
425 * thread. Its used by just about every call to
426 * ts_resource_ex(). This avoids the need for a mutex lock
427 * and our hashtable lookup.
428 */
429 thread_resources = tsrm_tls_get();
430
431 if (thread_resources) {
432 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for current thread %d", id, (long) thread_resources->thread_id));
433 /* Read a specific resource from the thread's resources.
434 * This is called outside of a mutex, so have to be aware about external
435 * changes to the structure as we read it.
436 */
437 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
438 }
439 thread_id = tsrm_thread_id();
440 } else {
441 thread_id = *th_id;
442 }
443
444 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for thread %ld", id, (long) thread_id));
445 tsrm_mutex_lock(tsmm_mutex);
446
447 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
448 thread_resources = tsrm_tls_table[hash_value];
449
450 if (!thread_resources) {
451 allocate_new_resource(&tsrm_tls_table[hash_value], thread_id);
452 tsrm_mutex_unlock(tsmm_mutex);
453 return ts_resource_ex(id, &thread_id);
454 } else {
455 last_thread_resources = &tsrm_tls_table[hash_value];
456 while (thread_resources->thread_id != thread_id) {
457 last_thread_resources = &thread_resources->next;
458 if (thread_resources->next) {
459 thread_resources = thread_resources->next;
460 } else {
461 allocate_new_resource(&thread_resources->next, thread_id);
462 tsrm_mutex_unlock(tsmm_mutex);
463 return ts_resource_ex(id, &thread_id);
464 }
465 }
466 }
467
468 /* It's possible that the current thread resources are requested, and that we get here.
469 * This means that the TSRM key pointer and cached pointer are NULL, but there is still
470 * a thread resource associated with this ID in the hashtable. This can occur if a thread
471 * goes away, but its resources are never cleaned up, and then that thread ID is reused.
472 * Since we don't always have a way to know when a thread goes away, we can't clean up
473 * the thread's resources before the new thread spawns.
474 * To solve this issue, we'll free up the old thread resources gracefully (gracefully
475 * because there might still be resources open like database connection which need to
476 * be shut down cleanly). After freeing up, we'll create the new resources for this thread
477 * as if the stale resources never existed in the first place. From that point forward,
478 * it is as if that situation never occurred.
479 * The fact that this situation happens isn't that bad because a child process containing
480 * threads will eventually be respawned anyway by the SAPI, so the stale threads won't last
481 * forever. */
482 TSRM_ASSERT(thread_resources->thread_id == thread_id);
483 if (thread_id == tsrm_thread_id() && !tsrm_tls_get()) {
484 tsrm_tls_entry *next = thread_resources->next;
485 /* In case that extensions don't use the pointer passed from the dtor, but incorrectly
486 * use the global pointer, we need to setup the global pointer temporarily here. */
487 set_thread_local_storage_resource_to(thread_resources);
488 /* Free up the old resource from the old thread instance */
489 ts_free_resources(thread_resources);
490 free(thread_resources);
491 /* Allocate a new resource at the same point in the linked list, and relink the next pointer */
492 allocate_new_resource(last_thread_resources, thread_id);
493 thread_resources = *last_thread_resources;
494 thread_resources->next = next;
495 /* We don't have to tail-call ts_resource_ex, we can take the fast path to the return
496 * because we already have the correct pointer. */
497 }
498
499 tsrm_mutex_unlock(tsmm_mutex);
500
501 /* Read a specific resource from the thread's resources.
502 * This is called outside of a mutex, so have to be aware about external
503 * changes to the structure as we read it.
504 */
505 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
506 }/*}}}*/
507
508
509 /* frees all resources allocated for the current thread */
ts_free_thread(void)510 void ts_free_thread(void)
511 {/*{{{*/
512 tsrm_tls_entry *thread_resources;
513 THREAD_T thread_id = tsrm_thread_id();
514 int hash_value;
515 tsrm_tls_entry *last=NULL;
516
517 TSRM_ASSERT(!in_main_thread);
518
519 tsrm_mutex_lock(tsmm_mutex);
520 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
521 thread_resources = tsrm_tls_table[hash_value];
522
523 while (thread_resources) {
524 if (thread_resources->thread_id == thread_id) {
525 ts_free_resources(thread_resources);
526 if (last) {
527 last->next = thread_resources->next;
528 } else {
529 tsrm_tls_table[hash_value] = thread_resources->next;
530 }
531 tsrm_tls_set(0);
532 free(thread_resources);
533 break;
534 }
535 if (thread_resources->next) {
536 last = thread_resources;
537 }
538 thread_resources = thread_resources->next;
539 }
540 tsrm_mutex_unlock(tsmm_mutex);
541 }/*}}}*/
542
543 /* deallocates all occurrences of a given id */
ts_free_id(ts_rsrc_id id)544 void ts_free_id(ts_rsrc_id id)
545 {/*{{{*/
546 int rsrc_id = TSRM_UNSHUFFLE_RSRC_ID(id);
547
548 tsrm_mutex_lock(tsmm_mutex);
549
550 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Freeing resource id %d", id));
551
552 if (tsrm_tls_table) {
553 for (int i=0; i<tsrm_tls_table_size; i++) {
554 tsrm_tls_entry *p = tsrm_tls_table[i];
555
556 while (p) {
557 if (p->count > rsrc_id && p->storage[rsrc_id]) {
558 if (resource_types_table) {
559 if (resource_types_table[rsrc_id].dtor) {
560 resource_types_table[rsrc_id].dtor(p->storage[rsrc_id]);
561 }
562 if (!resource_types_table[rsrc_id].fast_offset) {
563 free(p->storage[rsrc_id]);
564 }
565 }
566 p->storage[rsrc_id] = NULL;
567 }
568 p = p->next;
569 }
570 }
571 }
572 resource_types_table[rsrc_id].done = 1;
573
574 tsrm_mutex_unlock(tsmm_mutex);
575
576 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully freed resource id %d", id));
577 }/*}}}*/
578
ts_apply_for_id(ts_rsrc_id id,void (* cb)(void *))579 TSRM_API void ts_apply_for_id(ts_rsrc_id id, void (*cb)(void *))
580 {
581 int rsrc_id = TSRM_UNSHUFFLE_RSRC_ID(id);
582
583 tsrm_mutex_lock(tsmm_mutex);
584
585 if (tsrm_tls_table && resource_types_table) {
586 for (int i = 0; i < tsrm_tls_table_size; i++) {
587 tsrm_tls_entry *p = tsrm_tls_table[i];
588
589 while (p) {
590 if (p->count > rsrc_id && p->storage[rsrc_id]) {
591 cb(p->storage[rsrc_id]);
592 }
593 p = p->next;
594 }
595 }
596 }
597
598 tsrm_mutex_unlock(tsmm_mutex);
599 }
600
601 /*
602 * Utility Functions
603 */
604
605 /* Obtain the current thread id */
tsrm_thread_id(void)606 TSRM_API THREAD_T tsrm_thread_id(void)
607 {/*{{{*/
608 #ifdef TSRM_WIN32
609 return GetCurrentThreadId();
610 #else
611 return pthread_self();
612 #endif
613 }/*}}}*/
614
615
616 /* Allocate a mutex */
tsrm_mutex_alloc(void)617 TSRM_API MUTEX_T tsrm_mutex_alloc(void)
618 {/*{{{*/
619 MUTEX_T mutexp;
620 #ifdef TSRM_WIN32
621 mutexp = malloc(sizeof(CRITICAL_SECTION));
622 InitializeCriticalSection(mutexp);
623 #else
624 mutexp = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t));
625 pthread_mutex_init(mutexp,NULL);
626 #endif
627 #ifdef THR_DEBUG
628 printf("Mutex created thread: %d\n",mythreadid());
629 #endif
630 return( mutexp );
631 }/*}}}*/
632
633
634 /* Free a mutex */
tsrm_mutex_free(MUTEX_T mutexp)635 TSRM_API void tsrm_mutex_free(MUTEX_T mutexp)
636 {/*{{{*/
637 if (mutexp) {
638 #ifdef TSRM_WIN32
639 DeleteCriticalSection(mutexp);
640 free(mutexp);
641 #else
642 pthread_mutex_destroy(mutexp);
643 free(mutexp);
644 #endif
645 }
646 #ifdef THR_DEBUG
647 printf("Mutex freed thread: %d\n",mythreadid());
648 #endif
649 }/*}}}*/
650
651
652 /*
653 Lock a mutex.
654 A return value of 0 indicates success
655 */
tsrm_mutex_lock(MUTEX_T mutexp)656 TSRM_API int tsrm_mutex_lock(MUTEX_T mutexp)
657 {/*{{{*/
658 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex locked thread: %ld", tsrm_thread_id()));
659 #ifdef TSRM_WIN32
660 EnterCriticalSection(mutexp);
661 return 0;
662 #else
663 return pthread_mutex_lock(mutexp);
664 #endif
665 }/*}}}*/
666
667
668 /*
669 Unlock a mutex.
670 A return value of 0 indicates success
671 */
tsrm_mutex_unlock(MUTEX_T mutexp)672 TSRM_API int tsrm_mutex_unlock(MUTEX_T mutexp)
673 {/*{{{*/
674 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex unlocked thread: %ld", tsrm_thread_id()));
675 #ifdef TSRM_WIN32
676 LeaveCriticalSection(mutexp);
677 return 0;
678 #else
679 return pthread_mutex_unlock(mutexp);
680 #endif
681 }/*}}}*/
682
683 /*
684 Changes the signal mask of the calling thread
685 */
686 #ifdef HAVE_SIGPROCMASK
tsrm_sigmask(int how,const sigset_t * set,sigset_t * oldset)687 TSRM_API int tsrm_sigmask(int how, const sigset_t *set, sigset_t *oldset)
688 {/*{{{*/
689 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Changed sigmask in thread: %ld", tsrm_thread_id()));
690
691 return pthread_sigmask(how, set, oldset);
692 }/*}}}*/
693 #endif
694
695
tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)696 TSRM_API void *tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)
697 {/*{{{*/
698 void *retval = (void *) tsrm_new_thread_begin_handler;
699
700 tsrm_new_thread_begin_handler = new_thread_begin_handler;
701 return retval;
702 }/*}}}*/
703
704
tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)705 TSRM_API void *tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)
706 {/*{{{*/
707 void *retval = (void *) tsrm_new_thread_end_handler;
708
709 tsrm_new_thread_end_handler = new_thread_end_handler;
710 return retval;
711 }/*}}}*/
712
713
tsrm_set_shutdown_handler(tsrm_shutdown_func_t shutdown_handler)714 TSRM_API void *tsrm_set_shutdown_handler(tsrm_shutdown_func_t shutdown_handler)
715 {/*{{{*/
716 void *retval = (void *) tsrm_shutdown_handler;
717
718 tsrm_shutdown_handler = shutdown_handler;
719 return retval;
720 }/*}}}*/
721
722
723 /*
724 * Debug support
725 */
726
727 #ifdef TSRM_DEBUG
tsrm_error(int level,const char * format,...)728 int tsrm_error(int level, const char *format, ...)
729 {/*{{{*/
730 if (level<=tsrm_error_level) {
731 va_list args;
732 int size;
733
734 fprintf(tsrm_error_file, "TSRM: ");
735 va_start(args, format);
736 size = vfprintf(tsrm_error_file, format, args);
737 va_end(args);
738 fprintf(tsrm_error_file, "\n");
739 fflush(tsrm_error_file);
740 return size;
741 } else {
742 return 0;
743 }
744 }/*}}}*/
745 #endif
746
747
tsrm_error_set(int level,const char * debug_filename)748 void tsrm_error_set(int level, const char *debug_filename)
749 {/*{{{*/
750 tsrm_error_level = level;
751
752 #ifdef TSRM_DEBUG
753 if (tsrm_error_file!=stderr) { /* close files opened earlier */
754 fclose(tsrm_error_file);
755 }
756
757 if (debug_filename) {
758 tsrm_error_file = fopen(debug_filename, "w");
759 if (!tsrm_error_file) {
760 tsrm_error_file = stderr;
761 }
762 } else {
763 tsrm_error_file = stderr;
764 }
765 #endif
766 }/*}}}*/
767
tsrm_get_ls_cache(void)768 TSRM_API void *tsrm_get_ls_cache(void)
769 {/*{{{*/
770 return tsrm_tls_get();
771 }/*}}}*/
772
773 /* Returns offset of tsrm_ls_cache slot from Thread Control Block address */
tsrm_get_ls_cache_tcb_offset(void)774 TSRM_API size_t tsrm_get_ls_cache_tcb_offset(void)
775 {/*{{{*/
776 #if defined(__APPLE__) && defined(__x86_64__)
777 // TODO: Implement support for fast JIT ZTS code ???
778 return 0;
779 #elif defined(__x86_64__) && defined(__GNUC__) && !defined(__FreeBSD__) && \
780 !defined(__NetBSD__) && !defined(__OpenBSD__) && !defined(__MUSL__) && \
781 !defined(__HAIKU__)
782 size_t ret;
783
784 asm ("movq _tsrm_ls_cache@gottpoff(%%rip),%0"
785 : "=r" (ret));
786 return ret;
787 #elif defined(__i386__) && defined(__GNUC__) && !defined(__FreeBSD__) && \
788 !defined(__NetBSD__) && !defined(__OpenBSD__) && !defined(__MUSL__) && \
789 !defined(__HAIKU__)
790 size_t ret;
791
792 asm ("leal _tsrm_ls_cache@ntpoff,%0"
793 : "=r" (ret));
794 return ret;
795 #elif defined(__aarch64__)
796 size_t ret;
797
798 # ifdef __APPLE__
799 // Points to struct TLVDecriptor for _tsrm_ls_cache in macOS.
800 asm("adrp %0, #__tsrm_ls_cache@TLVPPAGE\n\t"
801 "ldr %0, [%0, #__tsrm_ls_cache@TLVPPAGEOFF]"
802 : "=r" (ret));
803 # elif defined(TSRM_TLS_MODEL_DEFAULT)
804 /* Surplus Static TLS space isn't guaranteed. */
805 ret = 0;
806 # elif defined(TSRM_TLS_MODEL_INITIAL_EXEC)
807 asm("adrp %0, :gottprel:_tsrm_ls_cache\n\t"
808 "ldr %0, [%0, #:gottprel_lo12:_tsrm_ls_cache]"
809 : "=r" (ret));
810 # elif defined(TSRM_TLS_MODEL_LOCAL_EXEC)
811 asm("mov %0, xzr\n\t"
812 "add %0, %0, #:tprel_hi12:_tsrm_ls_cache, lsl #12\n\t"
813 "add %0, %0, #:tprel_lo12_nc:_tsrm_ls_cache"
814 : "=r" (ret));
815 # else
816 # error "TSRM TLS model not set"
817 # endif
818 return ret;
819 #else
820 return 0;
821 #endif
822 }/*}}}*/
823
tsrm_is_main_thread(void)824 TSRM_API bool tsrm_is_main_thread(void)
825 {/*{{{*/
826 return in_main_thread;
827 }/*}}}*/
828
tsrm_is_shutdown(void)829 TSRM_API bool tsrm_is_shutdown(void)
830 {/*{{{*/
831 return is_thread_shutdown;
832 }/*}}}*/
833
tsrm_api_name(void)834 TSRM_API const char *tsrm_api_name(void)
835 {/*{{{*/
836 #ifdef TSRM_WIN32
837 return "Windows Threads";
838 #else
839 return "POSIX Threads";
840 #endif
841 }/*}}}*/
842
tsrm_is_managed_thread(void)843 TSRM_API bool tsrm_is_managed_thread(void)
844 {/*{{{*/
845 return tsrm_tls_get() ? true : false;
846 }/*}}}*/
847
848 #endif /* ZTS */
849