1 /*
2 +----------------------------------------------------------------------+
3 | Thread Safe Resource Manager |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 1999-2011, Andi Gutmans, Sascha Schumann, Zeev Suraski |
6 | This source file is subject to the TSRM license, that is bundled |
7 | with this package in the file LICENSE |
8 +----------------------------------------------------------------------+
9 | Authors: Zeev Suraski <zeev@zend.com> |
10 +----------------------------------------------------------------------+
11 */
12
13 #include "TSRM.h"
14
15 #ifdef ZTS
16
17 #include <stdio.h>
18
19 #if HAVE_STDARG_H
20 #include <stdarg.h>
21 #endif
22
23 typedef struct _tsrm_tls_entry tsrm_tls_entry;
24
25 #if defined(TSRM_WIN32)
26 /* TSRMLS_CACHE_DEFINE; is already done in Zend, this is being always compiled statically. */
27 #endif
28
29 struct _tsrm_tls_entry {
30 void **storage;
31 int count;
32 THREAD_T thread_id;
33 tsrm_tls_entry *next;
34 };
35
36
37 typedef struct {
38 size_t size;
39 ts_allocate_ctor ctor;
40 ts_allocate_dtor dtor;
41 int done;
42 } tsrm_resource_type;
43
44
45 /* The memory manager table */
46 static tsrm_tls_entry **tsrm_tls_table=NULL;
47 static int tsrm_tls_table_size;
48 static ts_rsrc_id id_count;
49
50 /* The resource sizes table */
51 static tsrm_resource_type *resource_types_table=NULL;
52 static int resource_types_table_size;
53
54
55 static MUTEX_T tsmm_mutex; /* thread-safe memory manager mutex */
56
57 /* New thread handlers */
58 static tsrm_thread_begin_func_t tsrm_new_thread_begin_handler = NULL;
59 static tsrm_thread_end_func_t tsrm_new_thread_end_handler = NULL;
60 static tsrm_shutdown_func_t tsrm_shutdown_handler = NULL;
61
62 /* Debug support */
63 int tsrm_error(int level, const char *format, ...);
64
65 /* Read a resource from a thread's resource storage */
66 static int tsrm_error_level;
67 static FILE *tsrm_error_file;
68
69 #if TSRM_DEBUG
70 #define TSRM_ERROR(args) tsrm_error args
71 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
72 { \
73 int unshuffled_offset = TSRM_UNSHUFFLE_RSRC_ID(offset); \
74 \
75 if (offset==0) { \
76 return &array; \
77 } else if ((unshuffled_offset)>=0 && (unshuffled_offset)<(range)) { \
78 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Successfully fetched resource id %d for thread id %ld - 0x%0.8X", \
79 unshuffled_offset, (long) thread_resources->thread_id, array[unshuffled_offset])); \
80 return array[unshuffled_offset]; \
81 } else { \
82 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Resource id %d is out of range (%d..%d)", \
83 unshuffled_offset, TSRM_SHUFFLE_RSRC_ID(0), TSRM_SHUFFLE_RSRC_ID(thread_resources->count-1))); \
84 return NULL; \
85 } \
86 }
87 #else
88 #define TSRM_ERROR(args)
89 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
90 if (offset==0) { \
91 return &array; \
92 } else { \
93 return array[TSRM_UNSHUFFLE_RSRC_ID(offset)]; \
94 }
95 #endif
96
97 #if defined(GNUPTH)
98 static pth_key_t tls_key;
99 # define tsrm_tls_set(what) pth_key_setdata(tls_key, (void*)(what))
100 # define tsrm_tls_get() pth_key_getdata(tls_key)
101
102 #elif defined(PTHREADS)
103 /* Thread local storage */
104 static pthread_key_t tls_key;
105 # define tsrm_tls_set(what) pthread_setspecific(tls_key, (void*)(what))
106 # define tsrm_tls_get() pthread_getspecific(tls_key)
107
108 #elif defined(TSRM_ST)
109 static int tls_key;
110 # define tsrm_tls_set(what) st_thread_setspecific(tls_key, (void*)(what))
111 # define tsrm_tls_get() st_thread_getspecific(tls_key)
112
113 #elif defined(TSRM_WIN32)
114 static DWORD tls_key;
115 # define tsrm_tls_set(what) TlsSetValue(tls_key, (void*)(what))
116 # define tsrm_tls_get() TlsGetValue(tls_key)
117
118 #elif defined(BETHREADS)
119 static int32 tls_key;
120 # define tsrm_tls_set(what) tls_set(tls_key, (void*)(what))
121 # define tsrm_tls_get() (tsrm_tls_entry*)tls_get(tls_key)
122
123 #else
124 # define tsrm_tls_set(what)
125 # define tsrm_tls_get() NULL
126 # warning tsrm_set_interpreter_context is probably broken on this platform
127 #endif
128
129 TSRM_TLS uint8_t in_main_thread = 0;
130
131 /* Startup TSRM (call once for the entire process) */
tsrm_startup(int expected_threads,int expected_resources,int debug_level,char * debug_filename)132 TSRM_API int tsrm_startup(int expected_threads, int expected_resources, int debug_level, char *debug_filename)
133 {/*{{{*/
134 #if defined(GNUPTH)
135 pth_init();
136 pth_key_create(&tls_key, 0);
137 #elif defined(PTHREADS)
138 pthread_key_create( &tls_key, 0 );
139 #elif defined(TSRM_ST)
140 st_init();
141 st_key_create(&tls_key, 0);
142 #elif defined(TSRM_WIN32)
143 tls_key = TlsAlloc();
144 #elif defined(BETHREADS)
145 tls_key = tls_allocate();
146 #endif
147
148 /* ensure singleton */
149 in_main_thread = 1;
150
151 tsrm_error_file = stderr;
152 tsrm_error_set(debug_level, debug_filename);
153 tsrm_tls_table_size = expected_threads;
154
155 tsrm_tls_table = (tsrm_tls_entry **) calloc(tsrm_tls_table_size, sizeof(tsrm_tls_entry *));
156 if (!tsrm_tls_table) {
157 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate TLS table"));
158 return 0;
159 }
160 id_count=0;
161
162 resource_types_table_size = expected_resources;
163 resource_types_table = (tsrm_resource_type *) calloc(resource_types_table_size, sizeof(tsrm_resource_type));
164 if (!resource_types_table) {
165 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate resource types table"));
166 free(tsrm_tls_table);
167 tsrm_tls_table = NULL;
168 return 0;
169 }
170
171 tsmm_mutex = tsrm_mutex_alloc();
172
173 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Started up TSRM, %d expected threads, %d expected resources", expected_threads, expected_resources));
174 return 1;
175 }/*}}}*/
176
177
178 /* Shutdown TSRM (call once for the entire process) */
tsrm_shutdown(void)179 TSRM_API void tsrm_shutdown(void)
180 {/*{{{*/
181 int i;
182
183 if (!in_main_thread) {
184 /* ensure singleton */
185 return;
186 }
187
188 if (tsrm_tls_table) {
189 for (i=0; i<tsrm_tls_table_size; i++) {
190 tsrm_tls_entry *p = tsrm_tls_table[i], *next_p;
191
192 while (p) {
193 int j;
194
195 next_p = p->next;
196 for (j=0; j<p->count; j++) {
197 if (p->storage[j]) {
198 if (resource_types_table && !resource_types_table[j].done && resource_types_table[j].dtor) {
199 resource_types_table[j].dtor(p->storage[j]);
200 }
201 free(p->storage[j]);
202 }
203 }
204 free(p->storage);
205 free(p);
206 p = next_p;
207 }
208 }
209 free(tsrm_tls_table);
210 tsrm_tls_table = NULL;
211 }
212 if (resource_types_table) {
213 free(resource_types_table);
214 resource_types_table=NULL;
215 }
216 tsrm_mutex_free(tsmm_mutex);
217 tsmm_mutex = NULL;
218 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Shutdown TSRM"));
219 if (tsrm_error_file!=stderr) {
220 fclose(tsrm_error_file);
221 }
222 #if defined(GNUPTH)
223 pth_kill();
224 #elif defined(PTHREADS)
225 pthread_setspecific(tls_key, 0);
226 pthread_key_delete(tls_key);
227 #elif defined(TSRM_WIN32)
228 TlsFree(tls_key);
229 #endif
230 if (tsrm_shutdown_handler) {
231 tsrm_shutdown_handler();
232 }
233 tsrm_new_thread_begin_handler = NULL;
234 tsrm_new_thread_end_handler = NULL;
235 tsrm_shutdown_handler = NULL;
236 }/*}}}*/
237
238
239 /* allocates a new thread-safe-resource id */
ts_allocate_id(ts_rsrc_id * rsrc_id,size_t size,ts_allocate_ctor ctor,ts_allocate_dtor dtor)240 TSRM_API ts_rsrc_id ts_allocate_id(ts_rsrc_id *rsrc_id, size_t size, ts_allocate_ctor ctor, ts_allocate_dtor dtor)
241 {/*{{{*/
242 int i;
243
244 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtaining a new resource id, %d bytes", size));
245
246 tsrm_mutex_lock(tsmm_mutex);
247
248 /* obtain a resource id */
249 *rsrc_id = TSRM_SHUFFLE_RSRC_ID(id_count++);
250 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtained resource id %d", *rsrc_id));
251
252 /* store the new resource type in the resource sizes table */
253 if (resource_types_table_size < id_count) {
254 resource_types_table = (tsrm_resource_type *) realloc(resource_types_table, sizeof(tsrm_resource_type)*id_count);
255 if (!resource_types_table) {
256 tsrm_mutex_unlock(tsmm_mutex);
257 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate storage for resource"));
258 *rsrc_id = 0;
259 return 0;
260 }
261 resource_types_table_size = id_count;
262 }
263 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].size = size;
264 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].ctor = ctor;
265 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].dtor = dtor;
266 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].done = 0;
267
268 /* enlarge the arrays for the already active threads */
269 for (i=0; i<tsrm_tls_table_size; i++) {
270 tsrm_tls_entry *p = tsrm_tls_table[i];
271
272 while (p) {
273 if (p->count < id_count) {
274 int j;
275
276 p->storage = (void *) realloc(p->storage, sizeof(void *)*id_count);
277 for (j=p->count; j<id_count; j++) {
278 p->storage[j] = (void *) malloc(resource_types_table[j].size);
279 if (resource_types_table[j].ctor) {
280 resource_types_table[j].ctor(p->storage[j]);
281 }
282 }
283 p->count = id_count;
284 }
285 p = p->next;
286 }
287 }
288 tsrm_mutex_unlock(tsmm_mutex);
289
290 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully allocated new resource id %d", *rsrc_id));
291 return *rsrc_id;
292 }/*}}}*/
293
294
allocate_new_resource(tsrm_tls_entry ** thread_resources_ptr,THREAD_T thread_id)295 static void allocate_new_resource(tsrm_tls_entry **thread_resources_ptr, THREAD_T thread_id)
296 {/*{{{*/
297 int i;
298
299 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Creating data structures for thread %x", thread_id));
300 (*thread_resources_ptr) = (tsrm_tls_entry *) malloc(sizeof(tsrm_tls_entry));
301 (*thread_resources_ptr)->storage = NULL;
302 if (id_count > 0) {
303 (*thread_resources_ptr)->storage = (void **) malloc(sizeof(void *)*id_count);
304 }
305 (*thread_resources_ptr)->count = id_count;
306 (*thread_resources_ptr)->thread_id = thread_id;
307 (*thread_resources_ptr)->next = NULL;
308
309 /* Set thread local storage to this new thread resources structure */
310 tsrm_tls_set(*thread_resources_ptr);
311
312 if (tsrm_new_thread_begin_handler) {
313 tsrm_new_thread_begin_handler(thread_id);
314 }
315 for (i=0; i<id_count; i++) {
316 if (resource_types_table[i].done) {
317 (*thread_resources_ptr)->storage[i] = NULL;
318 } else
319 {
320 (*thread_resources_ptr)->storage[i] = (void *) malloc(resource_types_table[i].size);
321 if (resource_types_table[i].ctor) {
322 resource_types_table[i].ctor((*thread_resources_ptr)->storage[i]);
323 }
324 }
325 }
326
327 if (tsrm_new_thread_end_handler) {
328 tsrm_new_thread_end_handler(thread_id);
329 }
330
331 tsrm_mutex_unlock(tsmm_mutex);
332 }/*}}}*/
333
334
335 /* fetches the requested resource for the current thread */
ts_resource_ex(ts_rsrc_id id,THREAD_T * th_id)336 TSRM_API void *ts_resource_ex(ts_rsrc_id id, THREAD_T *th_id)
337 {/*{{{*/
338 THREAD_T thread_id;
339 int hash_value;
340 tsrm_tls_entry *thread_resources;
341
342 if (!th_id) {
343 /* Fast path for looking up the resources for the current
344 * thread. Its used by just about every call to
345 * ts_resource_ex(). This avoids the need for a mutex lock
346 * and our hashtable lookup.
347 */
348 thread_resources = tsrm_tls_get();
349
350 if (thread_resources) {
351 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for current thread %d", id, (long) thread_resources->thread_id));
352 /* Read a specific resource from the thread's resources.
353 * This is called outside of a mutex, so have to be aware about external
354 * changes to the structure as we read it.
355 */
356 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
357 }
358 thread_id = tsrm_thread_id();
359 } else {
360 thread_id = *th_id;
361 }
362
363 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for thread %ld", id, (long) thread_id));
364 tsrm_mutex_lock(tsmm_mutex);
365
366 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
367 thread_resources = tsrm_tls_table[hash_value];
368
369 if (!thread_resources) {
370 allocate_new_resource(&tsrm_tls_table[hash_value], thread_id);
371 return ts_resource_ex(id, &thread_id);
372 } else {
373 do {
374 if (thread_resources->thread_id == thread_id) {
375 break;
376 }
377 if (thread_resources->next) {
378 thread_resources = thread_resources->next;
379 } else {
380 allocate_new_resource(&thread_resources->next, thread_id);
381 return ts_resource_ex(id, &thread_id);
382 /*
383 * thread_resources = thread_resources->next;
384 * break;
385 */
386 }
387 } while (thread_resources);
388 }
389 tsrm_mutex_unlock(tsmm_mutex);
390 /* Read a specific resource from the thread's resources.
391 * This is called outside of a mutex, so have to be aware about external
392 * changes to the structure as we read it.
393 */
394 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
395 }/*}}}*/
396
397 /* frees an interpreter context. You are responsible for making sure that
398 * it is not linked into the TSRM hash, and not marked as the current interpreter */
tsrm_free_interpreter_context(void * context)399 void tsrm_free_interpreter_context(void *context)
400 {/*{{{*/
401 tsrm_tls_entry *next, *thread_resources = (tsrm_tls_entry*)context;
402 int i;
403
404 while (thread_resources) {
405 next = thread_resources->next;
406
407 for (i=0; i<thread_resources->count; i++) {
408 if (resource_types_table[i].dtor) {
409 resource_types_table[i].dtor(thread_resources->storage[i]);
410 }
411 }
412 for (i=0; i<thread_resources->count; i++) {
413 free(thread_resources->storage[i]);
414 }
415 free(thread_resources->storage);
416 free(thread_resources);
417 thread_resources = next;
418 }
419 }/*}}}*/
420
tsrm_set_interpreter_context(void * new_ctx)421 void *tsrm_set_interpreter_context(void *new_ctx)
422 {/*{{{*/
423 tsrm_tls_entry *current;
424
425 current = tsrm_tls_get();
426
427 /* TODO: unlink current from the global linked list, and replace it
428 * it with the new context, protected by mutex where/if appropriate */
429
430 /* Set thread local storage to this new thread resources structure */
431 tsrm_tls_set(new_ctx);
432
433 /* return old context, so caller can restore it when they're done */
434 return current;
435 }/*}}}*/
436
437
438 /* allocates a new interpreter context */
tsrm_new_interpreter_context(void)439 void *tsrm_new_interpreter_context(void)
440 {/*{{{*/
441 tsrm_tls_entry *new_ctx, *current;
442 THREAD_T thread_id;
443
444 thread_id = tsrm_thread_id();
445 tsrm_mutex_lock(tsmm_mutex);
446
447 current = tsrm_tls_get();
448
449 allocate_new_resource(&new_ctx, thread_id);
450
451 /* switch back to the context that was in use prior to our creation
452 * of the new one */
453 return tsrm_set_interpreter_context(current);
454 }/*}}}*/
455
456
457 /* frees all resources allocated for the current thread */
ts_free_thread(void)458 void ts_free_thread(void)
459 {/*{{{*/
460 tsrm_tls_entry *thread_resources;
461 int i;
462 THREAD_T thread_id = tsrm_thread_id();
463 int hash_value;
464 tsrm_tls_entry *last=NULL;
465
466 tsrm_mutex_lock(tsmm_mutex);
467 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
468 thread_resources = tsrm_tls_table[hash_value];
469
470 while (thread_resources) {
471 if (thread_resources->thread_id == thread_id) {
472 for (i=0; i<thread_resources->count; i++) {
473 if (resource_types_table[i].dtor) {
474 resource_types_table[i].dtor(thread_resources->storage[i]);
475 }
476 }
477 for (i=0; i<thread_resources->count; i++) {
478 free(thread_resources->storage[i]);
479 }
480 free(thread_resources->storage);
481 if (last) {
482 last->next = thread_resources->next;
483 } else {
484 tsrm_tls_table[hash_value] = thread_resources->next;
485 }
486 tsrm_tls_set(0);
487 free(thread_resources);
488 break;
489 }
490 if (thread_resources->next) {
491 last = thread_resources;
492 }
493 thread_resources = thread_resources->next;
494 }
495 tsrm_mutex_unlock(tsmm_mutex);
496 }/*}}}*/
497
498
499 /* frees all resources allocated for all threads except current */
ts_free_worker_threads(void)500 void ts_free_worker_threads(void)
501 {/*{{{*/
502 tsrm_tls_entry *thread_resources;
503 int i;
504 THREAD_T thread_id = tsrm_thread_id();
505 int hash_value;
506 tsrm_tls_entry *last=NULL;
507
508 tsrm_mutex_lock(tsmm_mutex);
509 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
510 thread_resources = tsrm_tls_table[hash_value];
511
512 while (thread_resources) {
513 if (thread_resources->thread_id != thread_id) {
514 for (i=0; i<thread_resources->count; i++) {
515 if (resource_types_table[i].dtor) {
516 resource_types_table[i].dtor(thread_resources->storage[i]);
517 }
518 }
519 for (i=0; i<thread_resources->count; i++) {
520 free(thread_resources->storage[i]);
521 }
522 free(thread_resources->storage);
523 if (last) {
524 last->next = thread_resources->next;
525 } else {
526 tsrm_tls_table[hash_value] = thread_resources->next;
527 }
528 free(thread_resources);
529 if (last) {
530 thread_resources = last->next;
531 } else {
532 thread_resources = tsrm_tls_table[hash_value];
533 }
534 } else {
535 if (thread_resources->next) {
536 last = thread_resources;
537 }
538 thread_resources = thread_resources->next;
539 }
540 }
541 tsrm_mutex_unlock(tsmm_mutex);
542 }/*}}}*/
543
544
545 /* deallocates all occurrences of a given id */
ts_free_id(ts_rsrc_id id)546 void ts_free_id(ts_rsrc_id id)
547 {/*{{{*/
548 int i;
549 int j = TSRM_UNSHUFFLE_RSRC_ID(id);
550
551 tsrm_mutex_lock(tsmm_mutex);
552
553 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Freeing resource id %d", id));
554
555 if (tsrm_tls_table) {
556 for (i=0; i<tsrm_tls_table_size; i++) {
557 tsrm_tls_entry *p = tsrm_tls_table[i];
558
559 while (p) {
560 if (p->count > j && p->storage[j]) {
561 if (resource_types_table && resource_types_table[j].dtor) {
562 resource_types_table[j].dtor(p->storage[j]);
563 }
564 free(p->storage[j]);
565 p->storage[j] = NULL;
566 }
567 p = p->next;
568 }
569 }
570 }
571 resource_types_table[j].done = 1;
572
573 tsrm_mutex_unlock(tsmm_mutex);
574
575 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully freed resource id %d", id));
576 }/*}}}*/
577
578
579
580
581 /*
582 * Utility Functions
583 */
584
585 /* Obtain the current thread id */
tsrm_thread_id(void)586 TSRM_API THREAD_T tsrm_thread_id(void)
587 {/*{{{*/
588 #ifdef TSRM_WIN32
589 return GetCurrentThreadId();
590 #elif defined(GNUPTH)
591 return pth_self();
592 #elif defined(PTHREADS)
593 return pthread_self();
594 #elif defined(NSAPI)
595 return systhread_current();
596 #elif defined(PI3WEB)
597 return PIThread_getCurrent();
598 #elif defined(TSRM_ST)
599 return st_thread_self();
600 #elif defined(BETHREADS)
601 return find_thread(NULL);
602 #endif
603 }/*}}}*/
604
605
606 /* Allocate a mutex */
tsrm_mutex_alloc(void)607 TSRM_API MUTEX_T tsrm_mutex_alloc(void)
608 {/*{{{*/
609 MUTEX_T mutexp;
610 #ifdef TSRM_WIN32
611 mutexp = malloc(sizeof(CRITICAL_SECTION));
612 InitializeCriticalSection(mutexp);
613 #elif defined(GNUPTH)
614 mutexp = (MUTEX_T) malloc(sizeof(*mutexp));
615 pth_mutex_init(mutexp);
616 #elif defined(PTHREADS)
617 mutexp = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t));
618 pthread_mutex_init(mutexp,NULL);
619 #elif defined(NSAPI)
620 mutexp = crit_init();
621 #elif defined(PI3WEB)
622 mutexp = PIPlatform_allocLocalMutex();
623 #elif defined(TSRM_ST)
624 mutexp = st_mutex_new();
625 #elif defined(BETHREADS)
626 mutexp = (beos_ben*)malloc(sizeof(beos_ben));
627 mutexp->ben = 0;
628 mutexp->sem = create_sem(1, "PHP sempahore");
629 #endif
630 #ifdef THR_DEBUG
631 printf("Mutex created thread: %d\n",mythreadid());
632 #endif
633 return( mutexp );
634 }/*}}}*/
635
636
637 /* Free a mutex */
tsrm_mutex_free(MUTEX_T mutexp)638 TSRM_API void tsrm_mutex_free(MUTEX_T mutexp)
639 {/*{{{*/
640 if (mutexp) {
641 #ifdef TSRM_WIN32
642 DeleteCriticalSection(mutexp);
643 free(mutexp);
644 #elif defined(GNUPTH)
645 free(mutexp);
646 #elif defined(PTHREADS)
647 pthread_mutex_destroy(mutexp);
648 free(mutexp);
649 #elif defined(NSAPI)
650 crit_terminate(mutexp);
651 #elif defined(PI3WEB)
652 PISync_delete(mutexp);
653 #elif defined(TSRM_ST)
654 st_mutex_destroy(mutexp);
655 #elif defined(BETHREADS)
656 delete_sem(mutexp->sem);
657 free(mutexp);
658 #endif
659 }
660 #ifdef THR_DEBUG
661 printf("Mutex freed thread: %d\n",mythreadid());
662 #endif
663 }/*}}}*/
664
665
666 /*
667 Lock a mutex.
668 A return value of 0 indicates success
669 */
tsrm_mutex_lock(MUTEX_T mutexp)670 TSRM_API int tsrm_mutex_lock(MUTEX_T mutexp)
671 {/*{{{*/
672 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex locked thread: %ld", tsrm_thread_id()));
673 #ifdef TSRM_WIN32
674 EnterCriticalSection(mutexp);
675 return 0;
676 #elif defined(GNUPTH)
677 if (pth_mutex_acquire(mutexp, 0, NULL)) {
678 return 0;
679 }
680 return -1;
681 #elif defined(PTHREADS)
682 return pthread_mutex_lock(mutexp);
683 #elif defined(NSAPI)
684 crit_enter(mutexp);
685 return 0;
686 #elif defined(PI3WEB)
687 return PISync_lock(mutexp);
688 #elif defined(TSRM_ST)
689 return st_mutex_lock(mutexp);
690 #elif defined(BETHREADS)
691 if (atomic_add(&mutexp->ben, 1) != 0)
692 return acquire_sem(mutexp->sem);
693 return 0;
694 #endif
695 }/*}}}*/
696
697
698 /*
699 Unlock a mutex.
700 A return value of 0 indicates success
701 */
tsrm_mutex_unlock(MUTEX_T mutexp)702 TSRM_API int tsrm_mutex_unlock(MUTEX_T mutexp)
703 {/*{{{*/
704 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex unlocked thread: %ld", tsrm_thread_id()));
705 #ifdef TSRM_WIN32
706 LeaveCriticalSection(mutexp);
707 return 0;
708 #elif defined(GNUPTH)
709 if (pth_mutex_release(mutexp)) {
710 return 0;
711 }
712 return -1;
713 #elif defined(PTHREADS)
714 return pthread_mutex_unlock(mutexp);
715 #elif defined(NSAPI)
716 crit_exit(mutexp);
717 return 0;
718 #elif defined(PI3WEB)
719 return PISync_unlock(mutexp);
720 #elif defined(TSRM_ST)
721 return st_mutex_unlock(mutexp);
722 #elif defined(BETHREADS)
723 if (atomic_add(&mutexp->ben, -1) != 1)
724 return release_sem(mutexp->sem);
725 return 0;
726 #endif
727 }/*}}}*/
728
729 /*
730 Changes the signal mask of the calling thread
731 */
732 #ifdef HAVE_SIGPROCMASK
tsrm_sigmask(int how,const sigset_t * set,sigset_t * oldset)733 TSRM_API int tsrm_sigmask(int how, const sigset_t *set, sigset_t *oldset)
734 {/*{{{*/
735 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Changed sigmask in thread: %ld", tsrm_thread_id()));
736 /* TODO: add support for other APIs */
737 #ifdef PTHREADS
738 return pthread_sigmask(how, set, oldset);
739 #else
740 return sigprocmask(how, set, oldset);
741 #endif
742 }/*}}}*/
743 #endif
744
745
tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)746 TSRM_API void *tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)
747 {/*{{{*/
748 void *retval = (void *) tsrm_new_thread_begin_handler;
749
750 tsrm_new_thread_begin_handler = new_thread_begin_handler;
751 return retval;
752 }/*}}}*/
753
754
tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)755 TSRM_API void *tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)
756 {/*{{{*/
757 void *retval = (void *) tsrm_new_thread_end_handler;
758
759 tsrm_new_thread_end_handler = new_thread_end_handler;
760 return retval;
761 }/*}}}*/
762
763
tsrm_set_shutdown_handler(tsrm_shutdown_func_t shutdown_handler)764 TSRM_API void *tsrm_set_shutdown_handler(tsrm_shutdown_func_t shutdown_handler)
765 {/*{{{*/
766 void *retval = (void *) tsrm_shutdown_handler;
767
768 tsrm_shutdown_handler = shutdown_handler;
769 return retval;
770 }/*}}}*/
771
772
773 /*
774 * Debug support
775 */
776
777 #if TSRM_DEBUG
tsrm_error(int level,const char * format,...)778 int tsrm_error(int level, const char *format, ...)
779 {/*{{{*/
780 if (level<=tsrm_error_level) {
781 va_list args;
782 int size;
783
784 fprintf(tsrm_error_file, "TSRM: ");
785 va_start(args, format);
786 size = vfprintf(tsrm_error_file, format, args);
787 va_end(args);
788 fprintf(tsrm_error_file, "\n");
789 fflush(tsrm_error_file);
790 return size;
791 } else {
792 return 0;
793 }
794 }/*}}}*/
795 #endif
796
797
tsrm_error_set(int level,char * debug_filename)798 void tsrm_error_set(int level, char *debug_filename)
799 {/*{{{*/
800 tsrm_error_level = level;
801
802 #if TSRM_DEBUG
803 if (tsrm_error_file!=stderr) { /* close files opened earlier */
804 fclose(tsrm_error_file);
805 }
806
807 if (debug_filename) {
808 tsrm_error_file = fopen(debug_filename, "w");
809 if (!tsrm_error_file) {
810 tsrm_error_file = stderr;
811 }
812 } else {
813 tsrm_error_file = stderr;
814 }
815 #endif
816 }/*}}}*/
817
tsrm_get_ls_cache(void)818 TSRM_API void *tsrm_get_ls_cache(void)
819 {/*{{{*/
820 return tsrm_tls_get();
821 }/*}}}*/
822
tsrm_is_main_thread(void)823 TSRM_API uint8_t tsrm_is_main_thread(void)
824 {/*{{{*/
825 return in_main_thread;
826 }/*}}}*/
827
828 #endif /* ZTS */
829
830 /*
831 * Local variables:
832 * tab-width: 4
833 * c-basic-offset: 4
834 * End:
835 * vim600: sw=4 ts=4 fdm=marker
836 * vim<600: sw=4 ts=4
837 */
838