1 /*
2 +----------------------------------------------------------------------+
3 | Thread Safe Resource Manager |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 1999-2011, Andi Gutmans, Sascha Schumann, Zeev Suraski |
6 | This source file is subject to the TSRM license, that is bundled |
7 | with this package in the file LICENSE |
8 +----------------------------------------------------------------------+
9 | Authors: Zeev Suraski <zeev@php.net> |
10 +----------------------------------------------------------------------+
11 */
12
13 #include "TSRM.h"
14
15 #ifdef ZTS
16
17 #include <stdio.h>
18 #include <stdarg.h>
19
20 #if ZEND_DEBUG
21 # include <assert.h>
22 # define TSRM_ASSERT(c) assert(c)
23 #else
24 # define TSRM_ASSERT(c)
25 #endif
26
27 typedef struct _tsrm_tls_entry tsrm_tls_entry;
28
29 /* TSRMLS_CACHE_DEFINE; is already done in Zend, this is being always compiled statically. */
30 TSRMLS_CACHE_EXTERN();
31
32 struct _tsrm_tls_entry {
33 void **storage;
34 int count;
35 THREAD_T thread_id;
36 tsrm_tls_entry *next;
37 };
38
39
40 typedef struct {
41 size_t size;
42 ts_allocate_ctor ctor;
43 ts_allocate_dtor dtor;
44 size_t fast_offset;
45 int done;
46 } tsrm_resource_type;
47
48
49 /* The memory manager table */
50 static tsrm_tls_entry **tsrm_tls_table=NULL;
51 static int tsrm_tls_table_size;
52 static ts_rsrc_id id_count;
53
54 /* The resource sizes table */
55 static tsrm_resource_type *resource_types_table=NULL;
56 static int resource_types_table_size;
57
58 /* Reserved space for fast globals access */
59 static size_t tsrm_reserved_pos = 0;
60 static size_t tsrm_reserved_size = 0;
61
62 static MUTEX_T tsmm_mutex; /* thread-safe memory manager mutex */
63 static MUTEX_T tsrm_env_mutex; /* tsrm environ mutex */
64
65 /* New thread handlers */
66 static tsrm_thread_begin_func_t tsrm_new_thread_begin_handler = NULL;
67 static tsrm_thread_end_func_t tsrm_new_thread_end_handler = NULL;
68 static tsrm_shutdown_func_t tsrm_shutdown_handler = NULL;
69
70 /* Debug support */
71 int tsrm_error(int level, const char *format, ...);
72
73 /* Read a resource from a thread's resource storage */
74 static int tsrm_error_level;
75 static FILE *tsrm_error_file;
76
77 #ifdef TSRM_DEBUG
78 #define TSRM_ERROR(args) tsrm_error args
79 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
80 { \
81 int unshuffled_offset = TSRM_UNSHUFFLE_RSRC_ID(offset); \
82 \
83 if (offset==0) { \
84 return &array; \
85 } else if ((unshuffled_offset)>=0 && (unshuffled_offset)<(range)) { \
86 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Successfully fetched resource id %d for thread id %ld - 0x%0.8X", \
87 unshuffled_offset, (long) thread_resources->thread_id, array[unshuffled_offset])); \
88 return array[unshuffled_offset]; \
89 } else { \
90 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Resource id %d is out of range (%d..%d)", \
91 unshuffled_offset, TSRM_SHUFFLE_RSRC_ID(0), TSRM_SHUFFLE_RSRC_ID(thread_resources->count-1))); \
92 return NULL; \
93 } \
94 }
95 #else
96 #define TSRM_ERROR(args)
97 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
98 if (offset==0) { \
99 return &array; \
100 } else { \
101 return array[TSRM_UNSHUFFLE_RSRC_ID(offset)]; \
102 }
103 #endif
104
105 #ifdef TSRM_WIN32
106 static DWORD tls_key;
107 # define tsrm_tls_set(what) TlsSetValue(tls_key, (void*)(what))
108 # define tsrm_tls_get() TlsGetValue(tls_key)
109 #else
110 static pthread_key_t tls_key;
111 # define tsrm_tls_set(what) pthread_setspecific(tls_key, (void*)(what))
112 # define tsrm_tls_get() pthread_getspecific(tls_key)
113 #endif
114
115 TSRM_TLS uint8_t in_main_thread = 0;
116 TSRM_TLS uint8_t is_thread_shutdown = 0;
117
118 /* Startup TSRM (call once for the entire process) */
tsrm_startup(int expected_threads,int expected_resources,int debug_level,const char * debug_filename)119 TSRM_API int tsrm_startup(int expected_threads, int expected_resources, int debug_level, const char *debug_filename)
120 {/*{{{*/
121 #ifdef TSRM_WIN32
122 tls_key = TlsAlloc();
123 #else
124 pthread_key_create(&tls_key, 0);
125 #endif
126
127 /* ensure singleton */
128 in_main_thread = 1;
129 is_thread_shutdown = 0;
130
131 tsrm_error_file = stderr;
132 tsrm_error_set(debug_level, debug_filename);
133 tsrm_tls_table_size = expected_threads;
134
135 tsrm_tls_table = (tsrm_tls_entry **) calloc(tsrm_tls_table_size, sizeof(tsrm_tls_entry *));
136 if (!tsrm_tls_table) {
137 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate TLS table"));
138 is_thread_shutdown = 1;
139 return 0;
140 }
141 id_count=0;
142
143 resource_types_table_size = expected_resources;
144 resource_types_table = (tsrm_resource_type *) calloc(resource_types_table_size, sizeof(tsrm_resource_type));
145 if (!resource_types_table) {
146 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate resource types table"));
147 is_thread_shutdown = 1;
148 free(tsrm_tls_table);
149 return 0;
150 }
151
152 tsmm_mutex = tsrm_mutex_alloc();
153
154 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Started up TSRM, %d expected threads, %d expected resources", expected_threads, expected_resources));
155
156 tsrm_reserved_pos = 0;
157 tsrm_reserved_size = 0;
158
159 tsrm_env_mutex = tsrm_mutex_alloc();
160
161 return 1;
162 }/*}}}*/
163
ts_free_resources(tsrm_tls_entry * thread_resources)164 static void ts_free_resources(tsrm_tls_entry *thread_resources)
165 {
166 /* Need to destroy in reverse order to respect dependencies. */
167 for (int i = thread_resources->count - 1; i >= 0; i--) {
168 if (!resource_types_table[i].done) {
169 if (resource_types_table[i].dtor) {
170 resource_types_table[i].dtor(thread_resources->storage[i]);
171 }
172
173 if (!resource_types_table[i].fast_offset) {
174 free(thread_resources->storage[i]);
175 }
176 }
177 }
178
179 free(thread_resources->storage);
180 }
181
182 /* Shutdown TSRM (call once for the entire process) */
tsrm_shutdown(void)183 TSRM_API void tsrm_shutdown(void)
184 {/*{{{*/
185 int i;
186
187 if (is_thread_shutdown) {
188 /* shutdown must only occur once */
189 return;
190 }
191
192 is_thread_shutdown = 1;
193
194 if (!in_main_thread) {
195 /* only the main thread may shutdown tsrm */
196 return;
197 }
198
199 for (i=0; i<tsrm_tls_table_size; i++) {
200 tsrm_tls_entry *p = tsrm_tls_table[i], *next_p;
201
202 while (p) {
203 next_p = p->next;
204 if (resource_types_table) {
205 /* This call will already free p->storage for us */
206 ts_free_resources(p);
207 } else {
208 free(p->storage);
209 }
210 free(p);
211 p = next_p;
212 }
213 }
214 free(tsrm_tls_table);
215 free(resource_types_table);
216 tsrm_mutex_free(tsmm_mutex);
217 tsrm_mutex_free(tsrm_env_mutex);
218 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Shutdown TSRM"));
219 if (tsrm_error_file!=stderr) {
220 fclose(tsrm_error_file);
221 }
222 #ifdef TSRM_WIN32
223 TlsFree(tls_key);
224 #else
225 pthread_setspecific(tls_key, 0);
226 pthread_key_delete(tls_key);
227 #endif
228 if (tsrm_shutdown_handler) {
229 tsrm_shutdown_handler();
230 }
231 tsrm_new_thread_begin_handler = NULL;
232 tsrm_new_thread_end_handler = NULL;
233 tsrm_shutdown_handler = NULL;
234
235 tsrm_reserved_pos = 0;
236 tsrm_reserved_size = 0;
237 }/*}}}*/
238
239 /* {{{ */
240 /* environ lock api */
tsrm_env_lock(void)241 TSRM_API void tsrm_env_lock(void) {
242 tsrm_mutex_lock(tsrm_env_mutex);
243 }
244
tsrm_env_unlock(void)245 TSRM_API void tsrm_env_unlock(void) {
246 tsrm_mutex_unlock(tsrm_env_mutex);
247 } /* }}} */
248
249 /* enlarge the arrays for the already active threads */
tsrm_update_active_threads(void)250 static void tsrm_update_active_threads(void)
251 {/*{{{*/
252 int i;
253
254 for (i=0; i<tsrm_tls_table_size; i++) {
255 tsrm_tls_entry *p = tsrm_tls_table[i];
256
257 while (p) {
258 if (p->count < id_count) {
259 int j;
260
261 p->storage = (void *) realloc(p->storage, sizeof(void *)*id_count);
262 for (j=p->count; j<id_count; j++) {
263 if (resource_types_table[j].fast_offset) {
264 p->storage[j] = (void *) (((char*)p) + resource_types_table[j].fast_offset);
265 } else {
266 p->storage[j] = (void *) malloc(resource_types_table[j].size);
267 }
268 if (resource_types_table[j].ctor) {
269 resource_types_table[j].ctor(p->storage[j]);
270 }
271 }
272 p->count = id_count;
273 }
274 p = p->next;
275 }
276 }
277 }/*}}}*/
278
279
280 /* allocates a new thread-safe-resource id */
ts_allocate_id(ts_rsrc_id * rsrc_id,size_t size,ts_allocate_ctor ctor,ts_allocate_dtor dtor)281 TSRM_API ts_rsrc_id ts_allocate_id(ts_rsrc_id *rsrc_id, size_t size, ts_allocate_ctor ctor, ts_allocate_dtor dtor)
282 {/*{{{*/
283 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtaining a new resource id, %d bytes", size));
284
285 tsrm_mutex_lock(tsmm_mutex);
286
287 /* obtain a resource id */
288 *rsrc_id = TSRM_SHUFFLE_RSRC_ID(id_count++);
289 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtained resource id %d", *rsrc_id));
290
291 /* store the new resource type in the resource sizes table */
292 if (resource_types_table_size < id_count) {
293 tsrm_resource_type *_tmp;
294 _tmp = (tsrm_resource_type *) realloc(resource_types_table, sizeof(tsrm_resource_type)*id_count);
295 if (!_tmp) {
296 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate storage for resource"));
297 *rsrc_id = 0;
298 tsrm_mutex_unlock(tsmm_mutex);
299 return 0;
300 }
301 resource_types_table = _tmp;
302 resource_types_table_size = id_count;
303 }
304 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].size = size;
305 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].ctor = ctor;
306 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].dtor = dtor;
307 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].fast_offset = 0;
308 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].done = 0;
309
310 tsrm_update_active_threads();
311 tsrm_mutex_unlock(tsmm_mutex);
312
313 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully allocated new resource id %d", *rsrc_id));
314 return *rsrc_id;
315 }/*}}}*/
316
317
318 /* Reserve space for fast thread-safe-resources */
tsrm_reserve(size_t size)319 TSRM_API void tsrm_reserve(size_t size)
320 {/*{{{*/
321 tsrm_reserved_pos = 0;
322 tsrm_reserved_size = TSRM_ALIGNED_SIZE(size);
323 }/*}}}*/
324
325
326 /* allocates a new fast thread-safe-resource id */
ts_allocate_fast_id(ts_rsrc_id * rsrc_id,size_t * offset,size_t size,ts_allocate_ctor ctor,ts_allocate_dtor dtor)327 TSRM_API ts_rsrc_id ts_allocate_fast_id(ts_rsrc_id *rsrc_id, size_t *offset, size_t size, ts_allocate_ctor ctor, ts_allocate_dtor dtor)
328 {/*{{{*/
329 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtaining a new fast resource id, %d bytes", size));
330
331 tsrm_mutex_lock(tsmm_mutex);
332
333 /* obtain a resource id */
334 *rsrc_id = TSRM_SHUFFLE_RSRC_ID(id_count++);
335 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtained resource id %d", *rsrc_id));
336
337 size = TSRM_ALIGNED_SIZE(size);
338 if (tsrm_reserved_size - tsrm_reserved_pos < size) {
339 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate space for fast resource"));
340 *rsrc_id = 0;
341 *offset = 0;
342 tsrm_mutex_unlock(tsmm_mutex);
343 return 0;
344 }
345
346 *offset = TSRM_ALIGNED_SIZE(sizeof(tsrm_tls_entry)) + tsrm_reserved_pos;
347 tsrm_reserved_pos += size;
348
349 /* store the new resource type in the resource sizes table */
350 if (resource_types_table_size < id_count) {
351 tsrm_resource_type *_tmp;
352 _tmp = (tsrm_resource_type *) realloc(resource_types_table, sizeof(tsrm_resource_type)*id_count);
353 if (!_tmp) {
354 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate storage for resource"));
355 *rsrc_id = 0;
356 tsrm_mutex_unlock(tsmm_mutex);
357 return 0;
358 }
359 resource_types_table = _tmp;
360 resource_types_table_size = id_count;
361 }
362 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].size = size;
363 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].ctor = ctor;
364 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].dtor = dtor;
365 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].fast_offset = *offset;
366 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].done = 0;
367
368 tsrm_update_active_threads();
369 tsrm_mutex_unlock(tsmm_mutex);
370
371 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully allocated new resource id %d", *rsrc_id));
372 return *rsrc_id;
373 }/*}}}*/
374
set_thread_local_storage_resource_to(tsrm_tls_entry * thread_resource)375 static void set_thread_local_storage_resource_to(tsrm_tls_entry *thread_resource)
376 {
377 tsrm_tls_set(thread_resource);
378 TSRMLS_CACHE = thread_resource;
379 }
380
381 /* Must be called with tsmm_mutex held */
allocate_new_resource(tsrm_tls_entry ** thread_resources_ptr,THREAD_T thread_id)382 static void allocate_new_resource(tsrm_tls_entry **thread_resources_ptr, THREAD_T thread_id)
383 {/*{{{*/
384 int i;
385
386 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Creating data structures for thread %x", thread_id));
387 (*thread_resources_ptr) = (tsrm_tls_entry *) malloc(TSRM_ALIGNED_SIZE(sizeof(tsrm_tls_entry)) + tsrm_reserved_size);
388 (*thread_resources_ptr)->storage = NULL;
389 if (id_count > 0) {
390 (*thread_resources_ptr)->storage = (void **) malloc(sizeof(void *)*id_count);
391 }
392 (*thread_resources_ptr)->count = id_count;
393 (*thread_resources_ptr)->thread_id = thread_id;
394 (*thread_resources_ptr)->next = NULL;
395
396 /* Set thread local storage to this new thread resources structure */
397 set_thread_local_storage_resource_to(*thread_resources_ptr);
398
399 if (tsrm_new_thread_begin_handler) {
400 tsrm_new_thread_begin_handler(thread_id);
401 }
402 for (i=0; i<id_count; i++) {
403 if (resource_types_table[i].done) {
404 (*thread_resources_ptr)->storage[i] = NULL;
405 } else {
406 if (resource_types_table[i].fast_offset) {
407 (*thread_resources_ptr)->storage[i] = (void *) (((char*)(*thread_resources_ptr)) + resource_types_table[i].fast_offset);
408 } else {
409 (*thread_resources_ptr)->storage[i] = (void *) malloc(resource_types_table[i].size);
410 }
411 if (resource_types_table[i].ctor) {
412 resource_types_table[i].ctor((*thread_resources_ptr)->storage[i]);
413 }
414 }
415 }
416
417 if (tsrm_new_thread_end_handler) {
418 tsrm_new_thread_end_handler(thread_id);
419 }
420 }/*}}}*/
421
422 /* fetches the requested resource for the current thread */
ts_resource_ex(ts_rsrc_id id,THREAD_T * th_id)423 TSRM_API void *ts_resource_ex(ts_rsrc_id id, THREAD_T *th_id)
424 {/*{{{*/
425 THREAD_T thread_id;
426 int hash_value;
427 tsrm_tls_entry *thread_resources, **last_thread_resources;
428
429 if (!th_id) {
430 /* Fast path for looking up the resources for the current
431 * thread. Its used by just about every call to
432 * ts_resource_ex(). This avoids the need for a mutex lock
433 * and our hashtable lookup.
434 */
435 thread_resources = tsrm_tls_get();
436
437 if (thread_resources) {
438 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for current thread %d", id, (long) thread_resources->thread_id));
439 /* Read a specific resource from the thread's resources.
440 * This is called outside of a mutex, so have to be aware about external
441 * changes to the structure as we read it.
442 */
443 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
444 }
445 thread_id = tsrm_thread_id();
446 } else {
447 thread_id = *th_id;
448 }
449
450 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for thread %ld", id, (long) thread_id));
451 tsrm_mutex_lock(tsmm_mutex);
452
453 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
454 thread_resources = tsrm_tls_table[hash_value];
455
456 if (!thread_resources) {
457 allocate_new_resource(&tsrm_tls_table[hash_value], thread_id);
458 tsrm_mutex_unlock(tsmm_mutex);
459 return ts_resource_ex(id, &thread_id);
460 } else {
461 last_thread_resources = &tsrm_tls_table[hash_value];
462 while (thread_resources->thread_id != thread_id) {
463 last_thread_resources = &thread_resources->next;
464 if (thread_resources->next) {
465 thread_resources = thread_resources->next;
466 } else {
467 allocate_new_resource(&thread_resources->next, thread_id);
468 tsrm_mutex_unlock(tsmm_mutex);
469 return ts_resource_ex(id, &thread_id);
470 }
471 }
472 }
473
474 /* It's possible that the current thread resources are requested, and that we get here.
475 * This means that the TSRM key pointer and cached pointer are NULL, but there is still
476 * a thread resource associated with this ID in the hashtable. This can occur if a thread
477 * goes away, but its resources are never cleaned up, and then that thread ID is reused.
478 * Since we don't always have a way to know when a thread goes away, we can't clean up
479 * the thread's resources before the new thread spawns.
480 * To solve this issue, we'll free up the old thread resources gracefully (gracefully
481 * because there might still be resources open like database connection which need to
482 * be shut down cleanly). After freeing up, we'll create the new resources for this thread
483 * as if the stale resources never existed in the first place. From that point forward,
484 * it is as if that situation never occurred.
485 * The fact that this situation happens isn't that bad because a child process containing
486 * threads will eventually be respawned anyway by the SAPI, so the stale threads won't last
487 * forever. */
488 TSRM_ASSERT(thread_resources->thread_id == thread_id);
489 if (thread_id == tsrm_thread_id() && !tsrm_tls_get()) {
490 tsrm_tls_entry *next = thread_resources->next;
491 /* In case that extensions don't use the pointer passed from the dtor, but incorrectly
492 * use the global pointer, we need to setup the global pointer temporarily here. */
493 set_thread_local_storage_resource_to(thread_resources);
494 /* Free up the old resource from the old thread instance */
495 ts_free_resources(thread_resources);
496 free(thread_resources);
497 /* Allocate a new resource at the same point in the linked list, and relink the next pointer */
498 allocate_new_resource(last_thread_resources, thread_id);
499 thread_resources = *last_thread_resources;
500 thread_resources->next = next;
501 /* We don't have to tail-call ts_resource_ex, we can take the fast path to the return
502 * because we already have the correct pointer. */
503 }
504
505 tsrm_mutex_unlock(tsmm_mutex);
506
507 /* Read a specific resource from the thread's resources.
508 * This is called outside of a mutex, so have to be aware about external
509 * changes to the structure as we read it.
510 */
511 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
512 }/*}}}*/
513
514
515 /* frees all resources allocated for the current thread */
ts_free_thread(void)516 void ts_free_thread(void)
517 {/*{{{*/
518 tsrm_tls_entry *thread_resources;
519 THREAD_T thread_id = tsrm_thread_id();
520 int hash_value;
521 tsrm_tls_entry *last=NULL;
522
523 TSRM_ASSERT(!in_main_thread);
524
525 tsrm_mutex_lock(tsmm_mutex);
526 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
527 thread_resources = tsrm_tls_table[hash_value];
528
529 while (thread_resources) {
530 if (thread_resources->thread_id == thread_id) {
531 ts_free_resources(thread_resources);
532 if (last) {
533 last->next = thread_resources->next;
534 } else {
535 tsrm_tls_table[hash_value] = thread_resources->next;
536 }
537 tsrm_tls_set(0);
538 free(thread_resources);
539 break;
540 }
541 if (thread_resources->next) {
542 last = thread_resources;
543 }
544 thread_resources = thread_resources->next;
545 }
546 tsrm_mutex_unlock(tsmm_mutex);
547 }/*}}}*/
548
549 /* deallocates all occurrences of a given id */
ts_free_id(ts_rsrc_id id)550 void ts_free_id(ts_rsrc_id id)
551 {/*{{{*/
552 int i;
553 int j = TSRM_UNSHUFFLE_RSRC_ID(id);
554
555 tsrm_mutex_lock(tsmm_mutex);
556
557 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Freeing resource id %d", id));
558
559 if (tsrm_tls_table) {
560 for (i=0; i<tsrm_tls_table_size; i++) {
561 tsrm_tls_entry *p = tsrm_tls_table[i];
562
563 while (p) {
564 if (p->count > j && p->storage[j]) {
565 if (resource_types_table) {
566 if (resource_types_table[j].dtor) {
567 resource_types_table[j].dtor(p->storage[j]);
568 }
569 if (!resource_types_table[j].fast_offset) {
570 free(p->storage[j]);
571 }
572 }
573 p->storage[j] = NULL;
574 }
575 p = p->next;
576 }
577 }
578 }
579 resource_types_table[j].done = 1;
580
581 tsrm_mutex_unlock(tsmm_mutex);
582
583 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully freed resource id %d", id));
584 }/*}}}*/
585
586
587 /*
588 * Utility Functions
589 */
590
591 /* Obtain the current thread id */
tsrm_thread_id(void)592 TSRM_API THREAD_T tsrm_thread_id(void)
593 {/*{{{*/
594 #ifdef TSRM_WIN32
595 return GetCurrentThreadId();
596 #else
597 return pthread_self();
598 #endif
599 }/*}}}*/
600
601
602 /* Allocate a mutex */
tsrm_mutex_alloc(void)603 TSRM_API MUTEX_T tsrm_mutex_alloc(void)
604 {/*{{{*/
605 MUTEX_T mutexp;
606 #ifdef TSRM_WIN32
607 mutexp = malloc(sizeof(CRITICAL_SECTION));
608 InitializeCriticalSection(mutexp);
609 #else
610 mutexp = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t));
611 pthread_mutex_init(mutexp,NULL);
612 #endif
613 #ifdef THR_DEBUG
614 printf("Mutex created thread: %d\n",mythreadid());
615 #endif
616 return( mutexp );
617 }/*}}}*/
618
619
620 /* Free a mutex */
tsrm_mutex_free(MUTEX_T mutexp)621 TSRM_API void tsrm_mutex_free(MUTEX_T mutexp)
622 {/*{{{*/
623 if (mutexp) {
624 #ifdef TSRM_WIN32
625 DeleteCriticalSection(mutexp);
626 free(mutexp);
627 #else
628 pthread_mutex_destroy(mutexp);
629 free(mutexp);
630 #endif
631 }
632 #ifdef THR_DEBUG
633 printf("Mutex freed thread: %d\n",mythreadid());
634 #endif
635 }/*}}}*/
636
637
638 /*
639 Lock a mutex.
640 A return value of 0 indicates success
641 */
tsrm_mutex_lock(MUTEX_T mutexp)642 TSRM_API int tsrm_mutex_lock(MUTEX_T mutexp)
643 {/*{{{*/
644 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex locked thread: %ld", tsrm_thread_id()));
645 #ifdef TSRM_WIN32
646 EnterCriticalSection(mutexp);
647 return 0;
648 #else
649 return pthread_mutex_lock(mutexp);
650 #endif
651 }/*}}}*/
652
653
654 /*
655 Unlock a mutex.
656 A return value of 0 indicates success
657 */
tsrm_mutex_unlock(MUTEX_T mutexp)658 TSRM_API int tsrm_mutex_unlock(MUTEX_T mutexp)
659 {/*{{{*/
660 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex unlocked thread: %ld", tsrm_thread_id()));
661 #ifdef TSRM_WIN32
662 LeaveCriticalSection(mutexp);
663 return 0;
664 #else
665 return pthread_mutex_unlock(mutexp);
666 #endif
667 }/*}}}*/
668
669 /*
670 Changes the signal mask of the calling thread
671 */
672 #ifdef HAVE_SIGPROCMASK
tsrm_sigmask(int how,const sigset_t * set,sigset_t * oldset)673 TSRM_API int tsrm_sigmask(int how, const sigset_t *set, sigset_t *oldset)
674 {/*{{{*/
675 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Changed sigmask in thread: %ld", tsrm_thread_id()));
676
677 return pthread_sigmask(how, set, oldset);
678 }/*}}}*/
679 #endif
680
681
tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)682 TSRM_API void *tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)
683 {/*{{{*/
684 void *retval = (void *) tsrm_new_thread_begin_handler;
685
686 tsrm_new_thread_begin_handler = new_thread_begin_handler;
687 return retval;
688 }/*}}}*/
689
690
tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)691 TSRM_API void *tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)
692 {/*{{{*/
693 void *retval = (void *) tsrm_new_thread_end_handler;
694
695 tsrm_new_thread_end_handler = new_thread_end_handler;
696 return retval;
697 }/*}}}*/
698
699
tsrm_set_shutdown_handler(tsrm_shutdown_func_t shutdown_handler)700 TSRM_API void *tsrm_set_shutdown_handler(tsrm_shutdown_func_t shutdown_handler)
701 {/*{{{*/
702 void *retval = (void *) tsrm_shutdown_handler;
703
704 tsrm_shutdown_handler = shutdown_handler;
705 return retval;
706 }/*}}}*/
707
708
709 /*
710 * Debug support
711 */
712
713 #ifdef TSRM_DEBUG
tsrm_error(int level,const char * format,...)714 int tsrm_error(int level, const char *format, ...)
715 {/*{{{*/
716 if (level<=tsrm_error_level) {
717 va_list args;
718 int size;
719
720 fprintf(tsrm_error_file, "TSRM: ");
721 va_start(args, format);
722 size = vfprintf(tsrm_error_file, format, args);
723 va_end(args);
724 fprintf(tsrm_error_file, "\n");
725 fflush(tsrm_error_file);
726 return size;
727 } else {
728 return 0;
729 }
730 }/*}}}*/
731 #endif
732
733
tsrm_error_set(int level,const char * debug_filename)734 void tsrm_error_set(int level, const char *debug_filename)
735 {/*{{{*/
736 tsrm_error_level = level;
737
738 #ifdef TSRM_DEBUG
739 if (tsrm_error_file!=stderr) { /* close files opened earlier */
740 fclose(tsrm_error_file);
741 }
742
743 if (debug_filename) {
744 tsrm_error_file = fopen(debug_filename, "w");
745 if (!tsrm_error_file) {
746 tsrm_error_file = stderr;
747 }
748 } else {
749 tsrm_error_file = stderr;
750 }
751 #endif
752 }/*}}}*/
753
tsrm_get_ls_cache(void)754 TSRM_API void *tsrm_get_ls_cache(void)
755 {/*{{{*/
756 return tsrm_tls_get();
757 }/*}}}*/
758
759 /* Returns offset of tsrm_ls_cache slot from Thread Control Block address */
tsrm_get_ls_cache_tcb_offset(void)760 TSRM_API size_t tsrm_get_ls_cache_tcb_offset(void)
761 {/*{{{*/
762 #if defined(__APPLE__) && defined(__x86_64__)
763 // TODO: Implement support for fast JIT ZTS code ???
764 return 0;
765 #elif defined(__x86_64__) && defined(__GNUC__) && !defined(__FreeBSD__) && \
766 !defined(__OpenBSD__) && !defined(__MUSL__) && !defined(__HAIKU__)
767 size_t ret;
768
769 asm ("movq _tsrm_ls_cache@gottpoff(%%rip),%0"
770 : "=r" (ret));
771 return ret;
772 #elif defined(__i386__) && defined(__GNUC__) && !defined(__FreeBSD__) && \
773 !defined(__OpenBSD__) && !defined(__MUSL__) && !defined(__HAIKU__)
774 size_t ret;
775
776 asm ("leal _tsrm_ls_cache@ntpoff,%0"
777 : "=r" (ret));
778 return ret;
779 #elif defined(__aarch64__)
780 size_t ret;
781
782 # ifdef __APPLE__
783 // Points to struct TLVDecriptor for _tsrm_ls_cache in macOS.
784 asm("adrp %0, #__tsrm_ls_cache@TLVPPAGE\n\t"
785 "ldr %0, [%0, #__tsrm_ls_cache@TLVPPAGEOFF]"
786 : "=r" (ret));
787 # else
788 asm("mov %0, xzr\n\t"
789 "add %0, %0, #:tprel_hi12:_tsrm_ls_cache, lsl #12\n\t"
790 "add %0, %0, #:tprel_lo12_nc:_tsrm_ls_cache"
791 : "=r" (ret));
792 # endif
793 return ret;
794 #else
795 return 0;
796 #endif
797 }/*}}}*/
798
tsrm_is_main_thread(void)799 TSRM_API uint8_t tsrm_is_main_thread(void)
800 {/*{{{*/
801 return in_main_thread;
802 }/*}}}*/
803
tsrm_is_shutdown(void)804 TSRM_API uint8_t tsrm_is_shutdown(void)
805 {/*{{{*/
806 return is_thread_shutdown;
807 }/*}}}*/
808
tsrm_api_name(void)809 TSRM_API const char *tsrm_api_name(void)
810 {/*{{{*/
811 #ifdef TSRM_WIN32
812 return "Windows Threads";
813 #else
814 return "POSIX Threads";
815 #endif
816 }/*}}}*/
817
818 #endif /* ZTS */
819