1 /*
2 +----------------------------------------------------------------------+
3 | Thread Safe Resource Manager |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 1999-2011, Andi Gutmans, Sascha Schumann, Zeev Suraski |
6 | This source file is subject to the TSRM license, that is bundled |
7 | with this package in the file LICENSE |
8 +----------------------------------------------------------------------+
9 | Authors: Zeev Suraski <zeev@php.net> |
10 +----------------------------------------------------------------------+
11 */
12
13 #include "TSRM.h"
14
15 #ifdef ZTS
16
17 #include <stdio.h>
18 #include <stdarg.h>
19
20 #if ZEND_DEBUG
21 # include <assert.h>
22 # define TSRM_ASSERT(c) assert(c)
23 #else
24 # define TSRM_ASSERT(c)
25 #endif
26
27 typedef struct _tsrm_tls_entry tsrm_tls_entry;
28
29 /* TSRMLS_CACHE_DEFINE; is already done in Zend, this is being always compiled statically. */
30 TSRMLS_CACHE_EXTERN();
31
32 struct _tsrm_tls_entry {
33 void **storage;
34 int count;
35 THREAD_T thread_id;
36 tsrm_tls_entry *next;
37 };
38
39
40 typedef struct {
41 size_t size;
42 ts_allocate_ctor ctor;
43 ts_allocate_dtor dtor;
44 size_t fast_offset;
45 int done;
46 } tsrm_resource_type;
47
48
49 /* The memory manager table */
50 static tsrm_tls_entry **tsrm_tls_table=NULL;
51 static int tsrm_tls_table_size;
52 static ts_rsrc_id id_count;
53
54 /* The resource sizes table */
55 static tsrm_resource_type *resource_types_table=NULL;
56 static int resource_types_table_size;
57
58 /* Reserved space for fast globals access */
59 static size_t tsrm_reserved_pos = 0;
60 static size_t tsrm_reserved_size = 0;
61
62 static MUTEX_T tsmm_mutex; /* thread-safe memory manager mutex */
63 static MUTEX_T tsrm_env_mutex; /* tsrm environ mutex */
64
65 /* New thread handlers */
66 static tsrm_thread_begin_func_t tsrm_new_thread_begin_handler = NULL;
67 static tsrm_thread_end_func_t tsrm_new_thread_end_handler = NULL;
68 static tsrm_shutdown_func_t tsrm_shutdown_handler = NULL;
69
70 /* Debug support */
71 int tsrm_error(int level, const char *format, ...);
72
73 /* Read a resource from a thread's resource storage */
74 static int tsrm_error_level;
75 static FILE *tsrm_error_file;
76
77 #ifdef TSRM_DEBUG
78 #define TSRM_ERROR(args) tsrm_error args
79 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
80 { \
81 int unshuffled_offset = TSRM_UNSHUFFLE_RSRC_ID(offset); \
82 \
83 if (offset==0) { \
84 return &array; \
85 } else if ((unshuffled_offset)>=0 && (unshuffled_offset)<(range)) { \
86 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Successfully fetched resource id %d for thread id %ld - 0x%0.8X", \
87 unshuffled_offset, (long) thread_resources->thread_id, array[unshuffled_offset])); \
88 return array[unshuffled_offset]; \
89 } else { \
90 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Resource id %d is out of range (%d..%d)", \
91 unshuffled_offset, TSRM_SHUFFLE_RSRC_ID(0), TSRM_SHUFFLE_RSRC_ID(thread_resources->count-1))); \
92 return NULL; \
93 } \
94 }
95 #else
96 #define TSRM_ERROR(args)
97 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
98 if (offset==0) { \
99 return &array; \
100 } else { \
101 return array[TSRM_UNSHUFFLE_RSRC_ID(offset)]; \
102 }
103 #endif
104
105 #ifdef TSRM_WIN32
106 static DWORD tls_key;
107 # define tsrm_tls_set(what) TlsSetValue(tls_key, (void*)(what))
108 # define tsrm_tls_get() TlsGetValue(tls_key)
109 #else
110 static pthread_key_t tls_key;
111 # define tsrm_tls_set(what) pthread_setspecific(tls_key, (void*)(what))
112 # define tsrm_tls_get() pthread_getspecific(tls_key)
113 #endif
114
115 TSRM_TLS bool in_main_thread = false;
116 TSRM_TLS bool is_thread_shutdown = false;
117
118 /* Startup TSRM (call once for the entire process) */
tsrm_startup(int expected_threads,int expected_resources,int debug_level,const char * debug_filename)119 TSRM_API bool tsrm_startup(int expected_threads, int expected_resources, int debug_level, const char *debug_filename)
120 {/*{{{*/
121 #ifdef TSRM_WIN32
122 tls_key = TlsAlloc();
123 #else
124 pthread_key_create(&tls_key, 0);
125 #endif
126
127 /* ensure singleton */
128 in_main_thread = true;
129 is_thread_shutdown = false;
130
131 tsrm_error_file = stderr;
132 tsrm_error_set(debug_level, debug_filename);
133 tsrm_tls_table_size = expected_threads;
134
135 tsrm_tls_table = (tsrm_tls_entry **) calloc(tsrm_tls_table_size, sizeof(tsrm_tls_entry *));
136 if (!tsrm_tls_table) {
137 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate TLS table"));
138 is_thread_shutdown = true;
139 return 0;
140 }
141 id_count=0;
142
143 resource_types_table_size = expected_resources;
144 resource_types_table = (tsrm_resource_type *) calloc(resource_types_table_size, sizeof(tsrm_resource_type));
145 if (!resource_types_table) {
146 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate resource types table"));
147 is_thread_shutdown = true;
148 free(tsrm_tls_table);
149 return 0;
150 }
151
152 tsmm_mutex = tsrm_mutex_alloc();
153
154 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Started up TSRM, %d expected threads, %d expected resources", expected_threads, expected_resources));
155
156 tsrm_reserved_pos = 0;
157 tsrm_reserved_size = 0;
158
159 tsrm_env_mutex = tsrm_mutex_alloc();
160
161 return 1;
162 }/*}}}*/
163
ts_free_resources(tsrm_tls_entry * thread_resources)164 static void ts_free_resources(tsrm_tls_entry *thread_resources)
165 {
166 /* Need to destroy in reverse order to respect dependencies. */
167 for (int i = thread_resources->count - 1; i >= 0; i--) {
168 if (!resource_types_table[i].done) {
169 if (resource_types_table[i].dtor) {
170 resource_types_table[i].dtor(thread_resources->storage[i]);
171 }
172
173 if (!resource_types_table[i].fast_offset) {
174 free(thread_resources->storage[i]);
175 }
176 }
177 }
178
179 free(thread_resources->storage);
180 }
181
182 /* Shutdown TSRM (call once for the entire process) */
tsrm_shutdown(void)183 TSRM_API void tsrm_shutdown(void)
184 {/*{{{*/
185 if (is_thread_shutdown) {
186 /* shutdown must only occur once */
187 return;
188 }
189
190 is_thread_shutdown = true;
191
192 if (!in_main_thread) {
193 /* only the main thread may shutdown tsrm */
194 return;
195 }
196
197 for (int i=0; i<tsrm_tls_table_size; i++) {
198 tsrm_tls_entry *p = tsrm_tls_table[i], *next_p;
199
200 while (p) {
201 next_p = p->next;
202 if (resource_types_table) {
203 /* This call will already free p->storage for us */
204 ts_free_resources(p);
205 } else {
206 free(p->storage);
207 }
208 free(p);
209 p = next_p;
210 }
211 }
212 free(tsrm_tls_table);
213 free(resource_types_table);
214 tsrm_mutex_free(tsmm_mutex);
215 tsrm_mutex_free(tsrm_env_mutex);
216 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Shutdown TSRM"));
217 if (tsrm_error_file!=stderr) {
218 fclose(tsrm_error_file);
219 }
220 #ifdef TSRM_WIN32
221 TlsFree(tls_key);
222 #else
223 pthread_setspecific(tls_key, 0);
224 pthread_key_delete(tls_key);
225 #endif
226 if (tsrm_shutdown_handler) {
227 tsrm_shutdown_handler();
228 }
229 tsrm_new_thread_begin_handler = NULL;
230 tsrm_new_thread_end_handler = NULL;
231 tsrm_shutdown_handler = NULL;
232
233 tsrm_reserved_pos = 0;
234 tsrm_reserved_size = 0;
235 }/*}}}*/
236
237 /* {{{ */
238 /* environ lock api */
tsrm_env_lock(void)239 TSRM_API void tsrm_env_lock(void) {
240 tsrm_mutex_lock(tsrm_env_mutex);
241 }
242
tsrm_env_unlock(void)243 TSRM_API void tsrm_env_unlock(void) {
244 tsrm_mutex_unlock(tsrm_env_mutex);
245 } /* }}} */
246
247 /* enlarge the arrays for the already active threads */
tsrm_update_active_threads(void)248 static void tsrm_update_active_threads(void)
249 {/*{{{*/
250 for (int i=0; i<tsrm_tls_table_size; i++) {
251 tsrm_tls_entry *p = tsrm_tls_table[i];
252
253 while (p) {
254 if (p->count < id_count) {
255 int j;
256
257 p->storage = (void *) realloc(p->storage, sizeof(void *)*id_count);
258 for (j=p->count; j<id_count; j++) {
259 if (resource_types_table[j].fast_offset) {
260 p->storage[j] = (void *) (((char*)p) + resource_types_table[j].fast_offset);
261 } else {
262 p->storage[j] = (void *) malloc(resource_types_table[j].size);
263 }
264 if (resource_types_table[j].ctor) {
265 resource_types_table[j].ctor(p->storage[j]);
266 }
267 }
268 p->count = id_count;
269 }
270 p = p->next;
271 }
272 }
273 }/*}}}*/
274
275
276 /* allocates a new thread-safe-resource id */
ts_allocate_id(ts_rsrc_id * rsrc_id,size_t size,ts_allocate_ctor ctor,ts_allocate_dtor dtor)277 TSRM_API ts_rsrc_id ts_allocate_id(ts_rsrc_id *rsrc_id, size_t size, ts_allocate_ctor ctor, ts_allocate_dtor dtor)
278 {/*{{{*/
279 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtaining a new resource id, %d bytes", size));
280
281 tsrm_mutex_lock(tsmm_mutex);
282
283 /* obtain a resource id */
284 *rsrc_id = TSRM_SHUFFLE_RSRC_ID(id_count++);
285 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtained resource id %d", *rsrc_id));
286
287 /* store the new resource type in the resource sizes table */
288 if (resource_types_table_size < id_count) {
289 tsrm_resource_type *_tmp;
290 _tmp = (tsrm_resource_type *) realloc(resource_types_table, sizeof(tsrm_resource_type)*id_count);
291 if (!_tmp) {
292 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate storage for resource"));
293 *rsrc_id = 0;
294 tsrm_mutex_unlock(tsmm_mutex);
295 return 0;
296 }
297 resource_types_table = _tmp;
298 resource_types_table_size = id_count;
299 }
300 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].size = size;
301 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].ctor = ctor;
302 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].dtor = dtor;
303 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].fast_offset = 0;
304 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].done = 0;
305
306 tsrm_update_active_threads();
307 tsrm_mutex_unlock(tsmm_mutex);
308
309 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully allocated new resource id %d", *rsrc_id));
310 return *rsrc_id;
311 }/*}}}*/
312
313
314 /* Reserve space for fast thread-safe-resources */
tsrm_reserve(size_t size)315 TSRM_API void tsrm_reserve(size_t size)
316 {/*{{{*/
317 tsrm_reserved_pos = 0;
318 tsrm_reserved_size = TSRM_ALIGNED_SIZE(size);
319 }/*}}}*/
320
321
322 /* allocates a new fast thread-safe-resource id */
ts_allocate_fast_id(ts_rsrc_id * rsrc_id,size_t * offset,size_t size,ts_allocate_ctor ctor,ts_allocate_dtor dtor)323 TSRM_API ts_rsrc_id ts_allocate_fast_id(ts_rsrc_id *rsrc_id, size_t *offset, size_t size, ts_allocate_ctor ctor, ts_allocate_dtor dtor)
324 {/*{{{*/
325 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtaining a new fast resource id, %d bytes", size));
326
327 tsrm_mutex_lock(tsmm_mutex);
328
329 /* obtain a resource id */
330 *rsrc_id = TSRM_SHUFFLE_RSRC_ID(id_count++);
331 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtained resource id %d", *rsrc_id));
332
333 size = TSRM_ALIGNED_SIZE(size);
334 if (tsrm_reserved_size - tsrm_reserved_pos < size) {
335 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate space for fast resource"));
336 *rsrc_id = 0;
337 *offset = 0;
338 tsrm_mutex_unlock(tsmm_mutex);
339 return 0;
340 }
341
342 *offset = TSRM_ALIGNED_SIZE(sizeof(tsrm_tls_entry)) + tsrm_reserved_pos;
343 tsrm_reserved_pos += size;
344
345 /* store the new resource type in the resource sizes table */
346 if (resource_types_table_size < id_count) {
347 tsrm_resource_type *_tmp;
348 _tmp = (tsrm_resource_type *) realloc(resource_types_table, sizeof(tsrm_resource_type)*id_count);
349 if (!_tmp) {
350 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate storage for resource"));
351 *rsrc_id = 0;
352 tsrm_mutex_unlock(tsmm_mutex);
353 return 0;
354 }
355 resource_types_table = _tmp;
356 resource_types_table_size = id_count;
357 }
358 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].size = size;
359 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].ctor = ctor;
360 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].dtor = dtor;
361 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].fast_offset = *offset;
362 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].done = 0;
363
364 tsrm_update_active_threads();
365 tsrm_mutex_unlock(tsmm_mutex);
366
367 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully allocated new resource id %d", *rsrc_id));
368 return *rsrc_id;
369 }/*}}}*/
370
set_thread_local_storage_resource_to(tsrm_tls_entry * thread_resource)371 static void set_thread_local_storage_resource_to(tsrm_tls_entry *thread_resource)
372 {
373 tsrm_tls_set(thread_resource);
374 TSRMLS_CACHE = thread_resource;
375 }
376
377 /* Must be called with tsmm_mutex held */
allocate_new_resource(tsrm_tls_entry ** thread_resources_ptr,THREAD_T thread_id)378 static void allocate_new_resource(tsrm_tls_entry **thread_resources_ptr, THREAD_T thread_id)
379 {/*{{{*/
380 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Creating data structures for thread %x", thread_id));
381 (*thread_resources_ptr) = (tsrm_tls_entry *) malloc(TSRM_ALIGNED_SIZE(sizeof(tsrm_tls_entry)) + tsrm_reserved_size);
382 (*thread_resources_ptr)->storage = NULL;
383 if (id_count > 0) {
384 (*thread_resources_ptr)->storage = (void **) malloc(sizeof(void *)*id_count);
385 }
386 (*thread_resources_ptr)->count = id_count;
387 (*thread_resources_ptr)->thread_id = thread_id;
388 (*thread_resources_ptr)->next = NULL;
389
390 /* Set thread local storage to this new thread resources structure */
391 set_thread_local_storage_resource_to(*thread_resources_ptr);
392
393 if (tsrm_new_thread_begin_handler) {
394 tsrm_new_thread_begin_handler(thread_id);
395 }
396 for (int i=0; i<id_count; i++) {
397 if (resource_types_table[i].done) {
398 (*thread_resources_ptr)->storage[i] = NULL;
399 } else {
400 if (resource_types_table[i].fast_offset) {
401 (*thread_resources_ptr)->storage[i] = (void *) (((char*)(*thread_resources_ptr)) + resource_types_table[i].fast_offset);
402 } else {
403 (*thread_resources_ptr)->storage[i] = (void *) malloc(resource_types_table[i].size);
404 }
405 if (resource_types_table[i].ctor) {
406 resource_types_table[i].ctor((*thread_resources_ptr)->storage[i]);
407 }
408 }
409 }
410
411 if (tsrm_new_thread_end_handler) {
412 tsrm_new_thread_end_handler(thread_id);
413 }
414 }/*}}}*/
415
416 /* fetches the requested resource for the current thread */
ts_resource_ex(ts_rsrc_id id,THREAD_T * th_id)417 TSRM_API void *ts_resource_ex(ts_rsrc_id id, THREAD_T *th_id)
418 {/*{{{*/
419 THREAD_T thread_id;
420 int hash_value;
421 tsrm_tls_entry *thread_resources, **last_thread_resources;
422
423 if (!th_id) {
424 /* Fast path for looking up the resources for the current
425 * thread. Its used by just about every call to
426 * ts_resource_ex(). This avoids the need for a mutex lock
427 * and our hashtable lookup.
428 */
429 thread_resources = tsrm_tls_get();
430
431 if (thread_resources) {
432 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for current thread %d", id, (long) thread_resources->thread_id));
433 /* Read a specific resource from the thread's resources.
434 * This is called outside of a mutex, so have to be aware about external
435 * changes to the structure as we read it.
436 */
437 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
438 }
439 thread_id = tsrm_thread_id();
440 } else {
441 thread_id = *th_id;
442 }
443
444 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for thread %ld", id, (long) thread_id));
445 tsrm_mutex_lock(tsmm_mutex);
446
447 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
448 thread_resources = tsrm_tls_table[hash_value];
449
450 if (!thread_resources) {
451 allocate_new_resource(&tsrm_tls_table[hash_value], thread_id);
452 tsrm_mutex_unlock(tsmm_mutex);
453 return ts_resource_ex(id, &thread_id);
454 } else {
455 last_thread_resources = &tsrm_tls_table[hash_value];
456 while (thread_resources->thread_id != thread_id) {
457 last_thread_resources = &thread_resources->next;
458 if (thread_resources->next) {
459 thread_resources = thread_resources->next;
460 } else {
461 allocate_new_resource(&thread_resources->next, thread_id);
462 tsrm_mutex_unlock(tsmm_mutex);
463 return ts_resource_ex(id, &thread_id);
464 }
465 }
466 }
467
468 /* It's possible that the current thread resources are requested, and that we get here.
469 * This means that the TSRM key pointer and cached pointer are NULL, but there is still
470 * a thread resource associated with this ID in the hashtable. This can occur if a thread
471 * goes away, but its resources are never cleaned up, and then that thread ID is reused.
472 * Since we don't always have a way to know when a thread goes away, we can't clean up
473 * the thread's resources before the new thread spawns.
474 * To solve this issue, we'll free up the old thread resources gracefully (gracefully
475 * because there might still be resources open like database connection which need to
476 * be shut down cleanly). After freeing up, we'll create the new resources for this thread
477 * as if the stale resources never existed in the first place. From that point forward,
478 * it is as if that situation never occurred.
479 * The fact that this situation happens isn't that bad because a child process containing
480 * threads will eventually be respawned anyway by the SAPI, so the stale threads won't last
481 * forever. */
482 TSRM_ASSERT(thread_resources->thread_id == thread_id);
483 if (thread_id == tsrm_thread_id() && !tsrm_tls_get()) {
484 tsrm_tls_entry *next = thread_resources->next;
485 /* In case that extensions don't use the pointer passed from the dtor, but incorrectly
486 * use the global pointer, we need to setup the global pointer temporarily here. */
487 set_thread_local_storage_resource_to(thread_resources);
488 /* Free up the old resource from the old thread instance */
489 ts_free_resources(thread_resources);
490 free(thread_resources);
491 /* Allocate a new resource at the same point in the linked list, and relink the next pointer */
492 allocate_new_resource(last_thread_resources, thread_id);
493 thread_resources = *last_thread_resources;
494 thread_resources->next = next;
495 /* We don't have to tail-call ts_resource_ex, we can take the fast path to the return
496 * because we already have the correct pointer. */
497 }
498
499 tsrm_mutex_unlock(tsmm_mutex);
500
501 /* Read a specific resource from the thread's resources.
502 * This is called outside of a mutex, so have to be aware about external
503 * changes to the structure as we read it.
504 */
505 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
506 }/*}}}*/
507
508
509 /* frees all resources allocated for the current thread */
ts_free_thread(void)510 void ts_free_thread(void)
511 {/*{{{*/
512 tsrm_tls_entry *thread_resources;
513 THREAD_T thread_id = tsrm_thread_id();
514 int hash_value;
515 tsrm_tls_entry *last=NULL;
516
517 TSRM_ASSERT(!in_main_thread);
518
519 tsrm_mutex_lock(tsmm_mutex);
520 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
521 thread_resources = tsrm_tls_table[hash_value];
522
523 while (thread_resources) {
524 if (thread_resources->thread_id == thread_id) {
525 ts_free_resources(thread_resources);
526 if (last) {
527 last->next = thread_resources->next;
528 } else {
529 tsrm_tls_table[hash_value] = thread_resources->next;
530 }
531 tsrm_tls_set(0);
532 free(thread_resources);
533 break;
534 }
535 if (thread_resources->next) {
536 last = thread_resources;
537 }
538 thread_resources = thread_resources->next;
539 }
540 tsrm_mutex_unlock(tsmm_mutex);
541 }/*}}}*/
542
543 /* deallocates all occurrences of a given id */
ts_free_id(ts_rsrc_id id)544 void ts_free_id(ts_rsrc_id id)
545 {/*{{{*/
546 int rsrc_id = TSRM_UNSHUFFLE_RSRC_ID(id);
547
548 tsrm_mutex_lock(tsmm_mutex);
549
550 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Freeing resource id %d", id));
551
552 if (tsrm_tls_table) {
553 for (int i=0; i<tsrm_tls_table_size; i++) {
554 tsrm_tls_entry *p = tsrm_tls_table[i];
555
556 while (p) {
557 if (p->count > rsrc_id && p->storage[rsrc_id]) {
558 if (resource_types_table) {
559 if (resource_types_table[rsrc_id].dtor) {
560 resource_types_table[rsrc_id].dtor(p->storage[rsrc_id]);
561 }
562 if (!resource_types_table[rsrc_id].fast_offset) {
563 free(p->storage[rsrc_id]);
564 }
565 }
566 p->storage[rsrc_id] = NULL;
567 }
568 p = p->next;
569 }
570 }
571 }
572 resource_types_table[rsrc_id].done = 1;
573
574 tsrm_mutex_unlock(tsmm_mutex);
575
576 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully freed resource id %d", id));
577 }/*}}}*/
578
579
580 /*
581 * Utility Functions
582 */
583
584 /* Obtain the current thread id */
tsrm_thread_id(void)585 TSRM_API THREAD_T tsrm_thread_id(void)
586 {/*{{{*/
587 #ifdef TSRM_WIN32
588 return GetCurrentThreadId();
589 #else
590 return pthread_self();
591 #endif
592 }/*}}}*/
593
594
595 /* Allocate a mutex */
tsrm_mutex_alloc(void)596 TSRM_API MUTEX_T tsrm_mutex_alloc(void)
597 {/*{{{*/
598 MUTEX_T mutexp;
599 #ifdef TSRM_WIN32
600 mutexp = malloc(sizeof(CRITICAL_SECTION));
601 InitializeCriticalSection(mutexp);
602 #else
603 mutexp = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t));
604 pthread_mutex_init(mutexp,NULL);
605 #endif
606 #ifdef THR_DEBUG
607 printf("Mutex created thread: %d\n",mythreadid());
608 #endif
609 return( mutexp );
610 }/*}}}*/
611
612
613 /* Free a mutex */
tsrm_mutex_free(MUTEX_T mutexp)614 TSRM_API void tsrm_mutex_free(MUTEX_T mutexp)
615 {/*{{{*/
616 if (mutexp) {
617 #ifdef TSRM_WIN32
618 DeleteCriticalSection(mutexp);
619 free(mutexp);
620 #else
621 pthread_mutex_destroy(mutexp);
622 free(mutexp);
623 #endif
624 }
625 #ifdef THR_DEBUG
626 printf("Mutex freed thread: %d\n",mythreadid());
627 #endif
628 }/*}}}*/
629
630
631 /*
632 Lock a mutex.
633 A return value of 0 indicates success
634 */
tsrm_mutex_lock(MUTEX_T mutexp)635 TSRM_API int tsrm_mutex_lock(MUTEX_T mutexp)
636 {/*{{{*/
637 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex locked thread: %ld", tsrm_thread_id()));
638 #ifdef TSRM_WIN32
639 EnterCriticalSection(mutexp);
640 return 0;
641 #else
642 return pthread_mutex_lock(mutexp);
643 #endif
644 }/*}}}*/
645
646
647 /*
648 Unlock a mutex.
649 A return value of 0 indicates success
650 */
tsrm_mutex_unlock(MUTEX_T mutexp)651 TSRM_API int tsrm_mutex_unlock(MUTEX_T mutexp)
652 {/*{{{*/
653 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex unlocked thread: %ld", tsrm_thread_id()));
654 #ifdef TSRM_WIN32
655 LeaveCriticalSection(mutexp);
656 return 0;
657 #else
658 return pthread_mutex_unlock(mutexp);
659 #endif
660 }/*}}}*/
661
662 /*
663 Changes the signal mask of the calling thread
664 */
665 #ifdef HAVE_SIGPROCMASK
tsrm_sigmask(int how,const sigset_t * set,sigset_t * oldset)666 TSRM_API int tsrm_sigmask(int how, const sigset_t *set, sigset_t *oldset)
667 {/*{{{*/
668 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Changed sigmask in thread: %ld", tsrm_thread_id()));
669
670 return pthread_sigmask(how, set, oldset);
671 }/*}}}*/
672 #endif
673
674
tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)675 TSRM_API void *tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)
676 {/*{{{*/
677 void *retval = (void *) tsrm_new_thread_begin_handler;
678
679 tsrm_new_thread_begin_handler = new_thread_begin_handler;
680 return retval;
681 }/*}}}*/
682
683
tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)684 TSRM_API void *tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)
685 {/*{{{*/
686 void *retval = (void *) tsrm_new_thread_end_handler;
687
688 tsrm_new_thread_end_handler = new_thread_end_handler;
689 return retval;
690 }/*}}}*/
691
692
tsrm_set_shutdown_handler(tsrm_shutdown_func_t shutdown_handler)693 TSRM_API void *tsrm_set_shutdown_handler(tsrm_shutdown_func_t shutdown_handler)
694 {/*{{{*/
695 void *retval = (void *) tsrm_shutdown_handler;
696
697 tsrm_shutdown_handler = shutdown_handler;
698 return retval;
699 }/*}}}*/
700
701
702 /*
703 * Debug support
704 */
705
706 #ifdef TSRM_DEBUG
tsrm_error(int level,const char * format,...)707 int tsrm_error(int level, const char *format, ...)
708 {/*{{{*/
709 if (level<=tsrm_error_level) {
710 va_list args;
711 int size;
712
713 fprintf(tsrm_error_file, "TSRM: ");
714 va_start(args, format);
715 size = vfprintf(tsrm_error_file, format, args);
716 va_end(args);
717 fprintf(tsrm_error_file, "\n");
718 fflush(tsrm_error_file);
719 return size;
720 } else {
721 return 0;
722 }
723 }/*}}}*/
724 #endif
725
726
tsrm_error_set(int level,const char * debug_filename)727 void tsrm_error_set(int level, const char *debug_filename)
728 {/*{{{*/
729 tsrm_error_level = level;
730
731 #ifdef TSRM_DEBUG
732 if (tsrm_error_file!=stderr) { /* close files opened earlier */
733 fclose(tsrm_error_file);
734 }
735
736 if (debug_filename) {
737 tsrm_error_file = fopen(debug_filename, "w");
738 if (!tsrm_error_file) {
739 tsrm_error_file = stderr;
740 }
741 } else {
742 tsrm_error_file = stderr;
743 }
744 #endif
745 }/*}}}*/
746
tsrm_get_ls_cache(void)747 TSRM_API void *tsrm_get_ls_cache(void)
748 {/*{{{*/
749 return tsrm_tls_get();
750 }/*}}}*/
751
752 /* Returns offset of tsrm_ls_cache slot from Thread Control Block address */
tsrm_get_ls_cache_tcb_offset(void)753 TSRM_API size_t tsrm_get_ls_cache_tcb_offset(void)
754 {/*{{{*/
755 #if defined(__APPLE__) && defined(__x86_64__)
756 // TODO: Implement support for fast JIT ZTS code ???
757 return 0;
758 #elif defined(__x86_64__) && defined(__GNUC__) && !defined(__FreeBSD__) && \
759 !defined(__OpenBSD__) && !defined(__MUSL__) && !defined(__HAIKU__)
760 size_t ret;
761
762 asm ("movq _tsrm_ls_cache@gottpoff(%%rip),%0"
763 : "=r" (ret));
764 return ret;
765 #elif defined(__i386__) && defined(__GNUC__) && !defined(__FreeBSD__) && \
766 !defined(__OpenBSD__) && !defined(__MUSL__) && !defined(__HAIKU__)
767 size_t ret;
768
769 asm ("leal _tsrm_ls_cache@ntpoff,%0"
770 : "=r" (ret));
771 return ret;
772 #elif defined(__aarch64__)
773 size_t ret;
774
775 # ifdef __APPLE__
776 // Points to struct TLVDecriptor for _tsrm_ls_cache in macOS.
777 asm("adrp %0, #__tsrm_ls_cache@TLVPPAGE\n\t"
778 "ldr %0, [%0, #__tsrm_ls_cache@TLVPPAGEOFF]"
779 : "=r" (ret));
780 # elif defined(TSRM_TLS_MODEL_DEFAULT)
781 /* Surplus Static TLS space isn't guaranteed. */
782 ret = 0;
783 # elif defined(TSRM_TLS_MODEL_INITIAL_EXEC)
784 asm("adrp %0, :gottprel:_tsrm_ls_cache\n\t"
785 "ldr %0, [%0, #:gottprel_lo12:_tsrm_ls_cache]"
786 : "=r" (ret));
787 # elif defined(TSRM_TLS_MODEL_LOCAL_EXEC)
788 asm("mov %0, xzr\n\t"
789 "add %0, %0, #:tprel_hi12:_tsrm_ls_cache, lsl #12\n\t"
790 "add %0, %0, #:tprel_lo12_nc:_tsrm_ls_cache"
791 : "=r" (ret));
792 # else
793 # error "TSRM TLS model not set"
794 # endif
795 return ret;
796 #else
797 return 0;
798 #endif
799 }/*}}}*/
800
tsrm_is_main_thread(void)801 TSRM_API bool tsrm_is_main_thread(void)
802 {/*{{{*/
803 return in_main_thread;
804 }/*}}}*/
805
tsrm_is_shutdown(void)806 TSRM_API bool tsrm_is_shutdown(void)
807 {/*{{{*/
808 return is_thread_shutdown;
809 }/*}}}*/
810
tsrm_api_name(void)811 TSRM_API const char *tsrm_api_name(void)
812 {/*{{{*/
813 #ifdef TSRM_WIN32
814 return "Windows Threads";
815 #else
816 return "POSIX Threads";
817 #endif
818 }/*}}}*/
819
tsrm_is_managed_thread(void)820 TSRM_API bool tsrm_is_managed_thread(void)
821 {/*{{{*/
822 return tsrm_tls_get() ? true : false;
823 }/*}}}*/
824
825 #endif /* ZTS */
826