1 /*
2 +----------------------------------------------------------------------+
3 | Thread Safe Resource Manager |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 1999-2011, Andi Gutmans, Sascha Schumann, Zeev Suraski |
6 | This source file is subject to the TSRM license, that is bundled |
7 | with this package in the file LICENSE |
8 +----------------------------------------------------------------------+
9 | Authors: Zeev Suraski <zeev@zend.com> |
10 +----------------------------------------------------------------------+
11 */
12
13 #include "TSRM.h"
14
15 #ifdef ZTS
16
17 #include <stdio.h>
18
19 #if HAVE_STDARG_H
20 #include <stdarg.h>
21 #endif
22
23 typedef struct _tsrm_tls_entry tsrm_tls_entry;
24
25 #if defined(TSRM_WIN32)
26 /* TSRMLS_CACHE_DEFINE; is already done in Zend, this is being always compiled statically. */
27 #endif
28
29 struct _tsrm_tls_entry {
30 void **storage;
31 int count;
32 THREAD_T thread_id;
33 tsrm_tls_entry *next;
34 };
35
36
37 typedef struct {
38 size_t size;
39 ts_allocate_ctor ctor;
40 ts_allocate_dtor dtor;
41 int done;
42 } tsrm_resource_type;
43
44
45 /* The memory manager table */
46 static tsrm_tls_entry **tsrm_tls_table=NULL;
47 static int tsrm_tls_table_size;
48 static ts_rsrc_id id_count;
49
50 /* The resource sizes table */
51 static tsrm_resource_type *resource_types_table=NULL;
52 static int resource_types_table_size;
53
54
55 static MUTEX_T tsmm_mutex; /* thread-safe memory manager mutex */
56
57 /* New thread handlers */
58 static tsrm_thread_begin_func_t tsrm_new_thread_begin_handler;
59 static tsrm_thread_end_func_t tsrm_new_thread_end_handler;
60
61 /* Debug support */
62 int tsrm_error(int level, const char *format, ...);
63
64 /* Read a resource from a thread's resource storage */
65 static int tsrm_error_level;
66 static FILE *tsrm_error_file;
67
68 #if TSRM_DEBUG
69 #define TSRM_ERROR(args) tsrm_error args
70 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
71 { \
72 int unshuffled_offset = TSRM_UNSHUFFLE_RSRC_ID(offset); \
73 \
74 if (offset==0) { \
75 return &array; \
76 } else if ((unshuffled_offset)>=0 && (unshuffled_offset)<(range)) { \
77 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Successfully fetched resource id %d for thread id %ld - 0x%0.8X", \
78 unshuffled_offset, (long) thread_resources->thread_id, array[unshuffled_offset])); \
79 return array[unshuffled_offset]; \
80 } else { \
81 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Resource id %d is out of range (%d..%d)", \
82 unshuffled_offset, TSRM_SHUFFLE_RSRC_ID(0), TSRM_SHUFFLE_RSRC_ID(thread_resources->count-1))); \
83 return NULL; \
84 } \
85 }
86 #else
87 #define TSRM_ERROR(args)
88 #define TSRM_SAFE_RETURN_RSRC(array, offset, range) \
89 if (offset==0) { \
90 return &array; \
91 } else { \
92 return array[TSRM_UNSHUFFLE_RSRC_ID(offset)]; \
93 }
94 #endif
95
96 #if defined(GNUPTH)
97 static pth_key_t tls_key;
98 # define tsrm_tls_set(what) pth_key_setdata(tls_key, (void*)(what))
99 # define tsrm_tls_get() pth_key_getdata(tls_key)
100
101 #elif defined(PTHREADS)
102 /* Thread local storage */
103 static pthread_key_t tls_key;
104 # define tsrm_tls_set(what) pthread_setspecific(tls_key, (void*)(what))
105 # define tsrm_tls_get() pthread_getspecific(tls_key)
106
107 #elif defined(TSRM_ST)
108 static int tls_key;
109 # define tsrm_tls_set(what) st_thread_setspecific(tls_key, (void*)(what))
110 # define tsrm_tls_get() st_thread_getspecific(tls_key)
111
112 #elif defined(TSRM_WIN32)
113 static DWORD tls_key;
114 # define tsrm_tls_set(what) TlsSetValue(tls_key, (void*)(what))
115 # define tsrm_tls_get() TlsGetValue(tls_key)
116
117 #elif defined(BETHREADS)
118 static int32 tls_key;
119 # define tsrm_tls_set(what) tls_set(tls_key, (void*)(what))
120 # define tsrm_tls_get() (tsrm_tls_entry*)tls_get(tls_key)
121
122 #else
123 # define tsrm_tls_set(what)
124 # define tsrm_tls_get() NULL
125 # warning tsrm_set_interpreter_context is probably broken on this platform
126 #endif
127
128 /* Startup TSRM (call once for the entire process) */
tsrm_startup(int expected_threads,int expected_resources,int debug_level,char * debug_filename)129 TSRM_API int tsrm_startup(int expected_threads, int expected_resources, int debug_level, char *debug_filename)
130 {
131 #if defined(GNUPTH)
132 pth_init();
133 pth_key_create(&tls_key, 0);
134 #elif defined(PTHREADS)
135 pthread_key_create( &tls_key, 0 );
136 #elif defined(TSRM_ST)
137 st_init();
138 st_key_create(&tls_key, 0);
139 #elif defined(TSRM_WIN32)
140 tls_key = TlsAlloc();
141 #elif defined(BETHREADS)
142 tls_key = tls_allocate();
143 #endif
144
145 tsrm_error_file = stderr;
146 tsrm_error_set(debug_level, debug_filename);
147 tsrm_tls_table_size = expected_threads;
148
149 tsrm_tls_table = (tsrm_tls_entry **) calloc(tsrm_tls_table_size, sizeof(tsrm_tls_entry *));
150 if (!tsrm_tls_table) {
151 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate TLS table"));
152 return 0;
153 }
154 id_count=0;
155
156 resource_types_table_size = expected_resources;
157 resource_types_table = (tsrm_resource_type *) calloc(resource_types_table_size, sizeof(tsrm_resource_type));
158 if (!resource_types_table) {
159 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate resource types table"));
160 free(tsrm_tls_table);
161 tsrm_tls_table = NULL;
162 return 0;
163 }
164
165 tsmm_mutex = tsrm_mutex_alloc();
166
167 tsrm_new_thread_begin_handler = tsrm_new_thread_end_handler = NULL;
168
169 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Started up TSRM, %d expected threads, %d expected resources", expected_threads, expected_resources));
170 return 1;
171 }
172
173
174 /* Shutdown TSRM (call once for the entire process) */
tsrm_shutdown(void)175 TSRM_API void tsrm_shutdown(void)
176 {
177 int i;
178
179 if (tsrm_tls_table) {
180 for (i=0; i<tsrm_tls_table_size; i++) {
181 tsrm_tls_entry *p = tsrm_tls_table[i], *next_p;
182
183 while (p) {
184 int j;
185
186 next_p = p->next;
187 for (j=0; j<p->count; j++) {
188 if (p->storage[j]) {
189 if (resource_types_table && !resource_types_table[j].done && resource_types_table[j].dtor) {
190 resource_types_table[j].dtor(p->storage[j]);
191 }
192 free(p->storage[j]);
193 }
194 }
195 free(p->storage);
196 free(p);
197 p = next_p;
198 }
199 }
200 free(tsrm_tls_table);
201 tsrm_tls_table = NULL;
202 }
203 if (resource_types_table) {
204 free(resource_types_table);
205 resource_types_table=NULL;
206 }
207 tsrm_mutex_free(tsmm_mutex);
208 tsmm_mutex = NULL;
209 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Shutdown TSRM"));
210 if (tsrm_error_file!=stderr) {
211 fclose(tsrm_error_file);
212 }
213 #if defined(GNUPTH)
214 pth_kill();
215 #elif defined(PTHREADS)
216 pthread_setspecific(tls_key, 0);
217 pthread_key_delete(tls_key);
218 #elif defined(TSRM_WIN32)
219 TlsFree(tls_key);
220 #endif
221 }
222
223
224 /* allocates a new thread-safe-resource id */
ts_allocate_id(ts_rsrc_id * rsrc_id,size_t size,ts_allocate_ctor ctor,ts_allocate_dtor dtor)225 TSRM_API ts_rsrc_id ts_allocate_id(ts_rsrc_id *rsrc_id, size_t size, ts_allocate_ctor ctor, ts_allocate_dtor dtor)
226 {
227 int i;
228
229 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtaining a new resource id, %d bytes", size));
230
231 tsrm_mutex_lock(tsmm_mutex);
232
233 /* obtain a resource id */
234 *rsrc_id = TSRM_SHUFFLE_RSRC_ID(id_count++);
235 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Obtained resource id %d", *rsrc_id));
236
237 /* store the new resource type in the resource sizes table */
238 if (resource_types_table_size < id_count) {
239 resource_types_table = (tsrm_resource_type *) realloc(resource_types_table, sizeof(tsrm_resource_type)*id_count);
240 if (!resource_types_table) {
241 tsrm_mutex_unlock(tsmm_mutex);
242 TSRM_ERROR((TSRM_ERROR_LEVEL_ERROR, "Unable to allocate storage for resource"));
243 *rsrc_id = 0;
244 return 0;
245 }
246 resource_types_table_size = id_count;
247 }
248 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].size = size;
249 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].ctor = ctor;
250 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].dtor = dtor;
251 resource_types_table[TSRM_UNSHUFFLE_RSRC_ID(*rsrc_id)].done = 0;
252
253 /* enlarge the arrays for the already active threads */
254 for (i=0; i<tsrm_tls_table_size; i++) {
255 tsrm_tls_entry *p = tsrm_tls_table[i];
256
257 while (p) {
258 if (p->count < id_count) {
259 int j;
260
261 p->storage = (void *) realloc(p->storage, sizeof(void *)*id_count);
262 for (j=p->count; j<id_count; j++) {
263 p->storage[j] = (void *) malloc(resource_types_table[j].size);
264 if (resource_types_table[j].ctor) {
265 resource_types_table[j].ctor(p->storage[j]);
266 }
267 }
268 p->count = id_count;
269 }
270 p = p->next;
271 }
272 }
273 tsrm_mutex_unlock(tsmm_mutex);
274
275 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully allocated new resource id %d", *rsrc_id));
276 return *rsrc_id;
277 }
278
279
allocate_new_resource(tsrm_tls_entry ** thread_resources_ptr,THREAD_T thread_id)280 static void allocate_new_resource(tsrm_tls_entry **thread_resources_ptr, THREAD_T thread_id)
281 {
282 int i;
283
284 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Creating data structures for thread %x", thread_id));
285 (*thread_resources_ptr) = (tsrm_tls_entry *) malloc(sizeof(tsrm_tls_entry));
286 (*thread_resources_ptr)->storage = NULL;
287 if (id_count > 0) {
288 (*thread_resources_ptr)->storage = (void **) malloc(sizeof(void *)*id_count);
289 }
290 (*thread_resources_ptr)->count = id_count;
291 (*thread_resources_ptr)->thread_id = thread_id;
292 (*thread_resources_ptr)->next = NULL;
293
294 /* Set thread local storage to this new thread resources structure */
295 tsrm_tls_set(*thread_resources_ptr);
296
297 if (tsrm_new_thread_begin_handler) {
298 tsrm_new_thread_begin_handler(thread_id);
299 }
300 for (i=0; i<id_count; i++) {
301 if (resource_types_table[i].done) {
302 (*thread_resources_ptr)->storage[i] = NULL;
303 } else
304 {
305 (*thread_resources_ptr)->storage[i] = (void *) malloc(resource_types_table[i].size);
306 if (resource_types_table[i].ctor) {
307 resource_types_table[i].ctor((*thread_resources_ptr)->storage[i]);
308 }
309 }
310 }
311
312 if (tsrm_new_thread_end_handler) {
313 tsrm_new_thread_end_handler(thread_id);
314 }
315
316 tsrm_mutex_unlock(tsmm_mutex);
317 }
318
319
320 /* fetches the requested resource for the current thread */
ts_resource_ex(ts_rsrc_id id,THREAD_T * th_id)321 TSRM_API void *ts_resource_ex(ts_rsrc_id id, THREAD_T *th_id)
322 {
323 THREAD_T thread_id;
324 int hash_value;
325 tsrm_tls_entry *thread_resources;
326
327 #ifdef NETWARE
328 /* The below if loop is added for NetWare to fix an abend while unloading PHP
329 * when an Apache unload command is issued on the system console.
330 * While exiting from PHP, at the end for some reason, this function is called
331 * with tsrm_tls_table = NULL. When this happened, the server abends when
332 * tsrm_tls_table is accessed since it is NULL.
333 */
334 if(tsrm_tls_table) {
335 #endif
336 if (!th_id) {
337 /* Fast path for looking up the resources for the current
338 * thread. Its used by just about every call to
339 * ts_resource_ex(). This avoids the need for a mutex lock
340 * and our hashtable lookup.
341 */
342 thread_resources = tsrm_tls_get();
343
344 if (thread_resources) {
345 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for current thread %d", id, (long) thread_resources->thread_id));
346 /* Read a specific resource from the thread's resources.
347 * This is called outside of a mutex, so have to be aware about external
348 * changes to the structure as we read it.
349 */
350 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
351 }
352 thread_id = tsrm_thread_id();
353 } else {
354 thread_id = *th_id;
355 }
356
357 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Fetching resource id %d for thread %ld", id, (long) thread_id));
358 tsrm_mutex_lock(tsmm_mutex);
359
360 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
361 thread_resources = tsrm_tls_table[hash_value];
362
363 if (!thread_resources) {
364 allocate_new_resource(&tsrm_tls_table[hash_value], thread_id);
365 return ts_resource_ex(id, &thread_id);
366 } else {
367 do {
368 if (thread_resources->thread_id == thread_id) {
369 break;
370 }
371 if (thread_resources->next) {
372 thread_resources = thread_resources->next;
373 } else {
374 allocate_new_resource(&thread_resources->next, thread_id);
375 return ts_resource_ex(id, &thread_id);
376 /*
377 * thread_resources = thread_resources->next;
378 * break;
379 */
380 }
381 } while (thread_resources);
382 }
383 tsrm_mutex_unlock(tsmm_mutex);
384 /* Read a specific resource from the thread's resources.
385 * This is called outside of a mutex, so have to be aware about external
386 * changes to the structure as we read it.
387 */
388 TSRM_SAFE_RETURN_RSRC(thread_resources->storage, id, thread_resources->count);
389 #ifdef NETWARE
390 } /* if(tsrm_tls_table) */
391 #endif
392 }
393
394 /* frees an interpreter context. You are responsible for making sure that
395 * it is not linked into the TSRM hash, and not marked as the current interpreter */
tsrm_free_interpreter_context(void * context)396 void tsrm_free_interpreter_context(void *context)
397 {
398 tsrm_tls_entry *next, *thread_resources = (tsrm_tls_entry*)context;
399 int i;
400
401 while (thread_resources) {
402 next = thread_resources->next;
403
404 for (i=0; i<thread_resources->count; i++) {
405 if (resource_types_table[i].dtor) {
406 resource_types_table[i].dtor(thread_resources->storage[i]);
407 }
408 }
409 for (i=0; i<thread_resources->count; i++) {
410 free(thread_resources->storage[i]);
411 }
412 free(thread_resources->storage);
413 free(thread_resources);
414 thread_resources = next;
415 }
416 }
417
tsrm_set_interpreter_context(void * new_ctx)418 void *tsrm_set_interpreter_context(void *new_ctx)
419 {
420 tsrm_tls_entry *current;
421
422 current = tsrm_tls_get();
423
424 /* TODO: unlink current from the global linked list, and replace it
425 * it with the new context, protected by mutex where/if appropriate */
426
427 /* Set thread local storage to this new thread resources structure */
428 tsrm_tls_set(new_ctx);
429
430 /* return old context, so caller can restore it when they're done */
431 return current;
432 }
433
434
435 /* allocates a new interpreter context */
tsrm_new_interpreter_context(void)436 void *tsrm_new_interpreter_context(void)
437 {
438 tsrm_tls_entry *new_ctx, *current;
439 THREAD_T thread_id;
440
441 thread_id = tsrm_thread_id();
442 tsrm_mutex_lock(tsmm_mutex);
443
444 current = tsrm_tls_get();
445
446 allocate_new_resource(&new_ctx, thread_id);
447
448 /* switch back to the context that was in use prior to our creation
449 * of the new one */
450 return tsrm_set_interpreter_context(current);
451 }
452
453
454 /* frees all resources allocated for the current thread */
ts_free_thread(void)455 void ts_free_thread(void)
456 {
457 tsrm_tls_entry *thread_resources;
458 int i;
459 THREAD_T thread_id = tsrm_thread_id();
460 int hash_value;
461 tsrm_tls_entry *last=NULL;
462
463 tsrm_mutex_lock(tsmm_mutex);
464 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
465 thread_resources = tsrm_tls_table[hash_value];
466
467 while (thread_resources) {
468 if (thread_resources->thread_id == thread_id) {
469 for (i=0; i<thread_resources->count; i++) {
470 if (resource_types_table[i].dtor) {
471 resource_types_table[i].dtor(thread_resources->storage[i]);
472 }
473 }
474 for (i=0; i<thread_resources->count; i++) {
475 free(thread_resources->storage[i]);
476 }
477 free(thread_resources->storage);
478 if (last) {
479 last->next = thread_resources->next;
480 } else {
481 tsrm_tls_table[hash_value] = thread_resources->next;
482 }
483 tsrm_tls_set(0);
484 free(thread_resources);
485 break;
486 }
487 if (thread_resources->next) {
488 last = thread_resources;
489 }
490 thread_resources = thread_resources->next;
491 }
492 tsrm_mutex_unlock(tsmm_mutex);
493 }
494
495
496 /* frees all resources allocated for all threads except current */
ts_free_worker_threads(void)497 void ts_free_worker_threads(void)
498 {
499 tsrm_tls_entry *thread_resources;
500 int i;
501 THREAD_T thread_id = tsrm_thread_id();
502 int hash_value;
503 tsrm_tls_entry *last=NULL;
504
505 tsrm_mutex_lock(tsmm_mutex);
506 hash_value = THREAD_HASH_OF(thread_id, tsrm_tls_table_size);
507 thread_resources = tsrm_tls_table[hash_value];
508
509 while (thread_resources) {
510 if (thread_resources->thread_id != thread_id) {
511 for (i=0; i<thread_resources->count; i++) {
512 if (resource_types_table[i].dtor) {
513 resource_types_table[i].dtor(thread_resources->storage[i]);
514 }
515 }
516 for (i=0; i<thread_resources->count; i++) {
517 free(thread_resources->storage[i]);
518 }
519 free(thread_resources->storage);
520 if (last) {
521 last->next = thread_resources->next;
522 } else {
523 tsrm_tls_table[hash_value] = thread_resources->next;
524 }
525 free(thread_resources);
526 if (last) {
527 thread_resources = last->next;
528 } else {
529 thread_resources = tsrm_tls_table[hash_value];
530 }
531 } else {
532 if (thread_resources->next) {
533 last = thread_resources;
534 }
535 thread_resources = thread_resources->next;
536 }
537 }
538 tsrm_mutex_unlock(tsmm_mutex);
539 }
540
541
542 /* deallocates all occurrences of a given id */
ts_free_id(ts_rsrc_id id)543 void ts_free_id(ts_rsrc_id id)
544 {
545 int i;
546 int j = TSRM_UNSHUFFLE_RSRC_ID(id);
547
548 tsrm_mutex_lock(tsmm_mutex);
549
550 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Freeing resource id %d", id));
551
552 if (tsrm_tls_table) {
553 for (i=0; i<tsrm_tls_table_size; i++) {
554 tsrm_tls_entry *p = tsrm_tls_table[i];
555
556 while (p) {
557 if (p->count > j && p->storage[j]) {
558 if (resource_types_table && resource_types_table[j].dtor) {
559 resource_types_table[j].dtor(p->storage[j]);
560 }
561 free(p->storage[j]);
562 p->storage[j] = NULL;
563 }
564 p = p->next;
565 }
566 }
567 }
568 resource_types_table[j].done = 1;
569
570 tsrm_mutex_unlock(tsmm_mutex);
571
572 TSRM_ERROR((TSRM_ERROR_LEVEL_CORE, "Successfully freed resource id %d", id));
573 }
574
575
576
577
578 /*
579 * Utility Functions
580 */
581
582 /* Obtain the current thread id */
tsrm_thread_id(void)583 TSRM_API THREAD_T tsrm_thread_id(void)
584 {
585 #ifdef TSRM_WIN32
586 return GetCurrentThreadId();
587 #elif defined(GNUPTH)
588 return pth_self();
589 #elif defined(PTHREADS)
590 return pthread_self();
591 #elif defined(NSAPI)
592 return systhread_current();
593 #elif defined(PI3WEB)
594 return PIThread_getCurrent();
595 #elif defined(TSRM_ST)
596 return st_thread_self();
597 #elif defined(BETHREADS)
598 return find_thread(NULL);
599 #endif
600 }
601
602
603 /* Allocate a mutex */
tsrm_mutex_alloc(void)604 TSRM_API MUTEX_T tsrm_mutex_alloc(void)
605 {
606 MUTEX_T mutexp;
607 #ifdef TSRM_WIN32
608 mutexp = malloc(sizeof(CRITICAL_SECTION));
609 InitializeCriticalSection(mutexp);
610 #elif defined(GNUPTH)
611 mutexp = (MUTEX_T) malloc(sizeof(*mutexp));
612 pth_mutex_init(mutexp);
613 #elif defined(PTHREADS)
614 mutexp = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t));
615 pthread_mutex_init(mutexp,NULL);
616 #elif defined(NSAPI)
617 mutexp = crit_init();
618 #elif defined(PI3WEB)
619 mutexp = PIPlatform_allocLocalMutex();
620 #elif defined(TSRM_ST)
621 mutexp = st_mutex_new();
622 #elif defined(BETHREADS)
623 mutexp = (beos_ben*)malloc(sizeof(beos_ben));
624 mutexp->ben = 0;
625 mutexp->sem = create_sem(1, "PHP sempahore");
626 #endif
627 #ifdef THR_DEBUG
628 printf("Mutex created thread: %d\n",mythreadid());
629 #endif
630 return( mutexp );
631 }
632
633
634 /* Free a mutex */
tsrm_mutex_free(MUTEX_T mutexp)635 TSRM_API void tsrm_mutex_free(MUTEX_T mutexp)
636 {
637 if (mutexp) {
638 #ifdef TSRM_WIN32
639 DeleteCriticalSection(mutexp);
640 free(mutexp);
641 #elif defined(GNUPTH)
642 free(mutexp);
643 #elif defined(PTHREADS)
644 pthread_mutex_destroy(mutexp);
645 free(mutexp);
646 #elif defined(NSAPI)
647 crit_terminate(mutexp);
648 #elif defined(PI3WEB)
649 PISync_delete(mutexp);
650 #elif defined(TSRM_ST)
651 st_mutex_destroy(mutexp);
652 #elif defined(BETHREADS)
653 delete_sem(mutexp->sem);
654 free(mutexp);
655 #endif
656 }
657 #ifdef THR_DEBUG
658 printf("Mutex freed thread: %d\n",mythreadid());
659 #endif
660 }
661
662
663 /*
664 Lock a mutex.
665 A return value of 0 indicates success
666 */
tsrm_mutex_lock(MUTEX_T mutexp)667 TSRM_API int tsrm_mutex_lock(MUTEX_T mutexp)
668 {
669 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex locked thread: %ld", tsrm_thread_id()));
670 #ifdef TSRM_WIN32
671 EnterCriticalSection(mutexp);
672 return 0;
673 #elif defined(GNUPTH)
674 if (pth_mutex_acquire(mutexp, 0, NULL)) {
675 return 0;
676 }
677 return -1;
678 #elif defined(PTHREADS)
679 return pthread_mutex_lock(mutexp);
680 #elif defined(NSAPI)
681 crit_enter(mutexp);
682 return 0;
683 #elif defined(PI3WEB)
684 return PISync_lock(mutexp);
685 #elif defined(TSRM_ST)
686 return st_mutex_lock(mutexp);
687 #elif defined(BETHREADS)
688 if (atomic_add(&mutexp->ben, 1) != 0)
689 return acquire_sem(mutexp->sem);
690 return 0;
691 #endif
692 }
693
694
695 /*
696 Unlock a mutex.
697 A return value of 0 indicates success
698 */
tsrm_mutex_unlock(MUTEX_T mutexp)699 TSRM_API int tsrm_mutex_unlock(MUTEX_T mutexp)
700 {
701 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex unlocked thread: %ld", tsrm_thread_id()));
702 #ifdef TSRM_WIN32
703 LeaveCriticalSection(mutexp);
704 return 0;
705 #elif defined(GNUPTH)
706 if (pth_mutex_release(mutexp)) {
707 return 0;
708 }
709 return -1;
710 #elif defined(PTHREADS)
711 return pthread_mutex_unlock(mutexp);
712 #elif defined(NSAPI)
713 crit_exit(mutexp);
714 return 0;
715 #elif defined(PI3WEB)
716 return PISync_unlock(mutexp);
717 #elif defined(TSRM_ST)
718 return st_mutex_unlock(mutexp);
719 #elif defined(BETHREADS)
720 if (atomic_add(&mutexp->ben, -1) != 1)
721 return release_sem(mutexp->sem);
722 return 0;
723 #endif
724 }
725
726 /*
727 Changes the signal mask of the calling thread
728 */
729 #ifdef HAVE_SIGPROCMASK
tsrm_sigmask(int how,const sigset_t * set,sigset_t * oldset)730 TSRM_API int tsrm_sigmask(int how, const sigset_t *set, sigset_t *oldset)
731 {
732 TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Changed sigmask in thread: %ld", tsrm_thread_id()));
733 /* TODO: add support for other APIs */
734 #ifdef PTHREADS
735 return pthread_sigmask(how, set, oldset);
736 #else
737 return sigprocmask(how, set, oldset);
738 #endif
739 }
740 #endif
741
742
tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)743 TSRM_API void *tsrm_set_new_thread_begin_handler(tsrm_thread_begin_func_t new_thread_begin_handler)
744 {
745 void *retval = (void *) tsrm_new_thread_begin_handler;
746
747 tsrm_new_thread_begin_handler = new_thread_begin_handler;
748 return retval;
749 }
750
751
tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)752 TSRM_API void *tsrm_set_new_thread_end_handler(tsrm_thread_end_func_t new_thread_end_handler)
753 {
754 void *retval = (void *) tsrm_new_thread_end_handler;
755
756 tsrm_new_thread_end_handler = new_thread_end_handler;
757 return retval;
758 }
759
760
761
762 /*
763 * Debug support
764 */
765
766 #if TSRM_DEBUG
tsrm_error(int level,const char * format,...)767 int tsrm_error(int level, const char *format, ...)
768 {
769 if (level<=tsrm_error_level) {
770 va_list args;
771 int size;
772
773 fprintf(tsrm_error_file, "TSRM: ");
774 va_start(args, format);
775 size = vfprintf(tsrm_error_file, format, args);
776 va_end(args);
777 fprintf(tsrm_error_file, "\n");
778 fflush(tsrm_error_file);
779 return size;
780 } else {
781 return 0;
782 }
783 }
784 #endif
785
786
tsrm_error_set(int level,char * debug_filename)787 void tsrm_error_set(int level, char *debug_filename)
788 {
789 tsrm_error_level = level;
790
791 #if TSRM_DEBUG
792 if (tsrm_error_file!=stderr) { /* close files opened earlier */
793 fclose(tsrm_error_file);
794 }
795
796 if (debug_filename) {
797 tsrm_error_file = fopen(debug_filename, "w");
798 if (!tsrm_error_file) {
799 tsrm_error_file = stderr;
800 }
801 } else {
802 tsrm_error_file = stderr;
803 }
804 #endif
805 }
806
tsrm_get_ls_cache(void)807 TSRM_API void *tsrm_get_ls_cache(void)
808 {
809 return tsrm_tls_get();
810 }
811
812 #endif /* ZTS */
813