Ruby  2.0.0p451(2014-02-24revision45167)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: nagachika $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /*
46  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
47  * 2.15 or later and set _FORTIFY_SOURCE > 0.
48  * However, the implementation is wrong. Even though Linux's select(2)
49  * support large fd size (>FD_SETSIZE), it wrongly assume fd is always
50  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
51  * it doesn't work correctly and makes program abort. Therefore we need to
52  * disable FORTY_SOURCE until glibc fixes it.
53  */
54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
57 
58 /* for model 2 */
59 
60 #include "eval_intern.h"
61 #include "gc.h"
62 #include "internal.h"
63 #include "ruby/io.h"
64 #include "ruby/thread.h"
65 
66 #ifndef USE_NATIVE_THREAD_PRIORITY
67 #define USE_NATIVE_THREAD_PRIORITY 0
68 #define RUBY_THREAD_PRIORITY_MAX 3
69 #define RUBY_THREAD_PRIORITY_MIN -3
70 #endif
71 
72 #ifndef THREAD_DEBUG
73 #define THREAD_DEBUG 0
74 #endif
75 
76 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
77 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
78 
81 
85 
86 static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check);
87 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check);
88 static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check);
89 static double timeofday(void);
90 static int rb_threadptr_dead(rb_thread_t *th);
91 static void rb_check_deadlock(rb_vm_t *vm);
93 
94 #define eKillSignal INT2FIX(0)
95 #define eTerminateSignal INT2FIX(1)
96 static volatile int system_working = 1;
97 
98 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
99 
100 inline static void
102 {
103  st_delete(table, &key, 0);
104 }
105 
106 /********************************************************************************/
107 
108 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
109 
113 };
114 
116  struct rb_unblock_callback *old, int fail_if_interrupted);
117 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
118 
119 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
120  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
121 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
122 
123 #ifdef __ia64
124 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \
125  do{(th)->machine_register_stack_end = rb_ia64_bsp();}while(0)
126 #else
127 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
128 #endif
129 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
130  do { \
131  FLUSH_REGISTER_WINDOWS; \
132  RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
133  setjmp((th)->machine_regs); \
134  SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
135  } while (0)
136 
137 #define GVL_UNLOCK_BEGIN() do { \
138  rb_thread_t *_th_stored = GET_THREAD(); \
139  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
140  gvl_release(_th_stored->vm);
141 
142 #define GVL_UNLOCK_END() \
143  gvl_acquire(_th_stored->vm, _th_stored); \
144  rb_thread_set_current(_th_stored); \
145 } while(0)
146 
147 #ifdef __GNUC__
148 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
149 #else
150 #define only_if_constant(expr, notconst) notconst
151 #endif
152 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
153  rb_thread_t *__th = GET_THREAD(); \
154  struct rb_blocking_region_buffer __region; \
155  if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
156  /* always return true unless fail_if_interrupted */ \
157  !only_if_constant(fail_if_interrupted, TRUE)) { \
158  exec; \
159  blocking_region_end(__th, &__region); \
160  }; \
161 } while(0)
162 
163 #if THREAD_DEBUG
164 #ifdef HAVE_VA_ARGS_MACRO
165 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
166 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
167 #define POSITION_FORMAT "%s:%d:"
168 #define POSITION_ARGS ,file, line
169 #else
170 void rb_thread_debug(const char *fmt, ...);
171 #define thread_debug rb_thread_debug
172 #define POSITION_FORMAT
173 #define POSITION_ARGS
174 #endif
175 
176 # if THREAD_DEBUG < 0
177 static int rb_thread_debug_enabled;
178 
179 /*
180  * call-seq:
181  * Thread.DEBUG -> num
182  *
183  * Returns the thread debug level. Available only if compiled with
184  * THREAD_DEBUG=-1.
185  */
186 
187 static VALUE
188 rb_thread_s_debug(void)
189 {
190  return INT2NUM(rb_thread_debug_enabled);
191 }
192 
193 /*
194  * call-seq:
195  * Thread.DEBUG = num
196  *
197  * Sets the thread debug level. Available only if compiled with
198  * THREAD_DEBUG=-1.
199  */
200 
201 static VALUE
202 rb_thread_s_debug_set(VALUE self, VALUE val)
203 {
204  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
205  return val;
206 }
207 # else
208 # define rb_thread_debug_enabled THREAD_DEBUG
209 # endif
210 #else
211 #define thread_debug if(0)printf
212 #endif
213 
214 #ifndef __ia64
215 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
216 #endif
217 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
218  VALUE *register_stack_start));
219 static void timer_thread_function(void *);
220 
221 #if defined(_WIN32)
222 #include "thread_win32.c"
223 
224 #define DEBUG_OUT() \
225  WaitForSingleObject(&debug_mutex, INFINITE); \
226  printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
227  fflush(stdout); \
228  ReleaseMutex(&debug_mutex);
229 
230 #elif defined(HAVE_PTHREAD_H)
231 #include "thread_pthread.c"
232 
233 #define DEBUG_OUT() \
234  pthread_mutex_lock(&debug_mutex); \
235  printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
236  fflush(stdout); \
237  pthread_mutex_unlock(&debug_mutex);
238 
239 #else
240 #error "unsupported thread type"
241 #endif
242 
243 #if THREAD_DEBUG
244 static int debug_mutex_initialized = 1;
245 static rb_thread_lock_t debug_mutex;
246 
247 void
248 rb_thread_debug(
249 #ifdef HAVE_VA_ARGS_MACRO
250  const char *file, int line,
251 #endif
252  const char *fmt, ...)
253 {
254  va_list args;
255  char buf[BUFSIZ];
256 
257  if (!rb_thread_debug_enabled) return;
258 
259  if (debug_mutex_initialized == 1) {
260  debug_mutex_initialized = 0;
261  native_mutex_initialize(&debug_mutex);
262  }
263 
264  va_start(args, fmt);
265  vsnprintf(buf, BUFSIZ, fmt, args);
266  va_end(args);
267 
268  DEBUG_OUT();
269 }
270 #endif
271 
272 void
274 {
275  gvl_release(vm);
276  gvl_destroy(vm);
277  native_mutex_destroy(&vm->thread_destruct_lock);
278 }
279 
280 void
282 {
283  native_mutex_unlock(lock);
284 }
285 
286 void
288 {
289  native_mutex_destroy(lock);
290 }
291 
292 static int
294  struct rb_unblock_callback *old, int fail_if_interrupted)
295 {
296  check_ints:
297  if (fail_if_interrupted) {
298  if (RUBY_VM_INTERRUPTED_ANY(th)) {
299  return FALSE;
300  }
301  }
302  else {
303  RUBY_VM_CHECK_INTS(th);
304  }
305 
306  native_mutex_lock(&th->interrupt_lock);
307  if (RUBY_VM_INTERRUPTED_ANY(th)) {
308  native_mutex_unlock(&th->interrupt_lock);
309  goto check_ints;
310  }
311  else {
312  if (old) *old = th->unblock;
313  th->unblock.func = func;
314  th->unblock.arg = arg;
315  }
316  native_mutex_unlock(&th->interrupt_lock);
317 
318  return TRUE;
319 }
320 
321 static void
323 {
324  native_mutex_lock(&th->interrupt_lock);
325  th->unblock = *old;
326  native_mutex_unlock(&th->interrupt_lock);
327 }
328 
329 static void
331 {
332  native_mutex_lock(&th->interrupt_lock);
333  if (trap)
335  else
337  if (th->unblock.func) {
338  (th->unblock.func)(th->unblock.arg);
339  }
340  else {
341  /* none */
342  }
343  native_mutex_unlock(&th->interrupt_lock);
344 }
345 
346 void
348 {
350 }
351 
352 void
354 {
356 }
357 
358 static int
360 {
361  VALUE thval = key;
362  rb_thread_t *th;
363  GetThreadPtr(thval, th);
364 
365  if (th != main_thread) {
366  thread_debug("terminate_i: %p\n", (void *)th);
369  }
370  else {
371  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
372  }
373  return ST_CONTINUE;
374 }
375 
376 typedef struct rb_mutex_struct
377 {
380  struct rb_thread_struct volatile *th;
384 } rb_mutex_t;
385 
386 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
389 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
390 
391 void
393 {
394  const char *err;
395  rb_mutex_t *mutex;
396  rb_mutex_t *mutexes = th->keeping_mutexes;
397 
398  while (mutexes) {
399  mutex = mutexes;
400  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
401  mutexes); */
402  mutexes = mutex->next_mutex;
403  err = rb_mutex_unlock_th(mutex, th);
404  if (err) rb_bug("invalid keeping_mutexes: %s", err);
405  }
406 }
407 
408 void
410 {
411  rb_thread_t *th = GET_THREAD(); /* main thread */
412  rb_vm_t *vm = th->vm;
413 
414  if (vm->main_thread != th) {
415  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
416  (void *)vm->main_thread, (void *)th);
417  }
418 
419  /* unlock all locking mutexes */
421 
422  retry:
423  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
425 
426  while (!rb_thread_alone()) {
427  int state;
428 
429  TH_PUSH_TAG(th);
430  if ((state = TH_EXEC_TAG()) == 0) {
431  native_sleep(th, 0);
433  }
434  TH_POP_TAG();
435 
436  if (state) {
437  goto retry;
438  }
439  }
440 }
441 
442 static void
444 {
445  rb_thread_t *th = th_ptr;
446  th->status = THREAD_KILLED;
448 #ifdef __ia64
449  th->machine_register_stack_start = th->machine_register_stack_end = 0;
450 #endif
451 }
452 
453 static void
454 thread_cleanup_func(void *th_ptr, int atfork)
455 {
456  rb_thread_t *th = th_ptr;
457 
458  th->locking_mutex = Qfalse;
460 
461  /*
462  * Unfortunately, we can't release native threading resource at fork
463  * because libc may have unstable locking state therefore touching
464  * a threading resource may cause a deadlock.
465  */
466  if (atfork)
467  return;
468 
469  native_mutex_destroy(&th->interrupt_lock);
470  native_thread_destroy(th);
471 }
472 
473 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
474 
475 void
477 {
478  native_thread_init_stack(th);
479 }
480 
481 static int
482 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
483 {
484  int state;
485  VALUE args = th->first_args;
486  rb_proc_t *proc;
487  rb_thread_list_t *join_list;
488  rb_thread_t *main_th;
489  VALUE errinfo = Qnil;
490 # ifdef USE_SIGALTSTACK
491  void rb_register_sigaltstack(rb_thread_t *th);
492 
493  rb_register_sigaltstack(th);
494 # endif
495 
496  if (th == th->vm->main_thread)
497  rb_bug("thread_start_func_2 must not used for main thread");
498 
499  ruby_thread_set_native(th);
500 
501  th->machine_stack_start = stack_start;
502 #ifdef __ia64
503  th->machine_register_stack_start = register_stack_start;
504 #endif
505  thread_debug("thread start: %p\n", (void *)th);
506 
507  gvl_acquire(th->vm, th);
508  {
509  thread_debug("thread start (get lock): %p\n", (void *)th);
511 
512  TH_PUSH_TAG(th);
513  if ((state = EXEC_TAG()) == 0) {
514  SAVE_ROOT_JMPBUF(th, {
515  if (!th->first_func) {
516  GetProcPtr(th->first_proc, proc);
517  th->errinfo = Qnil;
518  th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
519  th->root_svar = Qnil;
520  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, Qundef);
521  th->value = rb_vm_invoke_proc(th, proc, (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
522  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, Qundef);
523  }
524  else {
525  th->value = (*th->first_func)((void *)args);
526  }
527  });
528  }
529  else {
530  errinfo = th->errinfo;
531  if (state == TAG_FATAL) {
532  /* fatal error within this thread, need to stop whole script */
533  }
534  else if (th->safe_level >= 4) {
535  /* Ignore it. Main thread shouldn't be harmed from untrusted thread. */
536  errinfo = Qnil;
537  }
538  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
539  /* exit on main_thread. */
540  }
541  else if (th->vm->thread_abort_on_exception ||
543  /* exit on main_thread */
544  }
545  else {
546  errinfo = Qnil;
547  }
548  th->value = Qnil;
549  }
550 
551  th->status = THREAD_KILLED;
552  thread_debug("thread end: %p\n", (void *)th);
553 
554  main_th = th->vm->main_thread;
555  if (RB_TYPE_P(errinfo, T_OBJECT)) {
556  /* treat with normal error object */
557  rb_threadptr_raise(main_th, 1, &errinfo);
558  }
559  TH_POP_TAG();
560 
561  /* locking_mutex must be Qfalse */
562  if (th->locking_mutex != Qfalse) {
563  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
564  (void *)th, th->locking_mutex);
565  }
566 
567  /* delete self other than main thread from living_threads */
569  if (rb_thread_alone()) {
570  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
571  rb_threadptr_interrupt(main_th);
572  }
573 
574  /* wake up joining threads */
575  join_list = th->join_list;
576  while (join_list) {
577  rb_threadptr_interrupt(join_list->th);
578  switch (join_list->th->status) {
580  join_list->th->status = THREAD_RUNNABLE;
581  default: break;
582  }
583  join_list = join_list->next;
584  }
585 
587  rb_check_deadlock(th->vm);
588 
589  if (!th->root_fiber) {
591  th->stack = 0;
592  }
593  }
594  native_mutex_lock(&th->vm->thread_destruct_lock);
595  /* make sure vm->running_thread never point me after this point.*/
596  th->vm->running_thread = NULL;
597  native_mutex_unlock(&th->vm->thread_destruct_lock);
599  gvl_release(th->vm);
600 
601  return 0;
602 }
603 
604 static VALUE
606 {
607  rb_thread_t *th, *current_th = GET_THREAD();
608  int err;
609 
610  if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
612  "can't start a new thread (frozen ThreadGroup)");
613  }
614  GetThreadPtr(thval, th);
615 
616  /* setup thread environment */
617  th->first_func = fn;
618  th->first_proc = fn ? Qfalse : rb_block_proc();
619  th->first_args = args; /* GC: shouldn't put before above line */
620 
621  th->priority = current_th->priority;
622  th->thgroup = current_th->thgroup;
623 
627  RBASIC(th->pending_interrupt_mask_stack)->klass = 0;
628 
629  th->interrupt_mask = 0;
630 
631  native_mutex_initialize(&th->interrupt_lock);
632 
633  /* kick thread */
634  err = native_thread_create(th);
635  if (err) {
636  th->status = THREAD_KILLED;
637  rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
638  }
639  st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
640  return thval;
641 }
642 
643 /*
644  * call-seq:
645  * Thread.new { ... } -> thread
646  * Thread.new(*args, &proc) -> thread
647  * Thread.new(*args) { |args| ... } -> thread
648  *
649  * Creates a new thread executing the given block.
650  *
651  * Any +args+ given to ::new will be passed to the block:
652  *
653  * arr = []
654  * a, b, c = 1, 2, 3
655  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
656  * arr #=> [1, 2, 3]
657  *
658  * A ThreadError exception is raised if ::new is called without a block.
659  *
660  * If you're going to subclass Thread, be sure to call super in your
661  * +initialize+ method, otherwise a ThreadError will be raised.
662  */
663 static VALUE
665 {
666  rb_thread_t *th;
667  VALUE thread = rb_thread_alloc(klass);
668 
669  if (GET_VM()->main_thread->status == THREAD_KILLED)
670  rb_raise(rb_eThreadError, "can't alloc thread");
671 
672  rb_obj_call_init(thread, argc, argv);
673  GetThreadPtr(thread, th);
674  if (!th->first_args) {
675  rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
676  rb_class2name(klass));
677  }
678  return thread;
679 }
680 
681 /*
682  * call-seq:
683  * Thread.start([args]*) {|args| block } -> thread
684  * Thread.fork([args]*) {|args| block } -> thread
685  *
686  * Basically the same as ::new. However, if class Thread is subclassed, then
687  * calling +start+ in that subclass will not invoke the subclass's
688  * +initialize+ method.
689  */
690 
691 static VALUE
693 {
694  return thread_create_core(rb_thread_alloc(klass), args, 0);
695 }
696 
697 /* :nodoc: */
698 static VALUE
700 {
701  rb_thread_t *th;
702  if (!rb_block_given_p()) {
703  rb_raise(rb_eThreadError, "must be called with a block");
704  }
705  GetThreadPtr(thread, th);
706  if (th->first_args) {
707  VALUE proc = th->first_proc, line, loc;
708  const char *file;
709  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
710  rb_raise(rb_eThreadError, "already initialized thread");
711  }
712  file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
713  if (NIL_P(line = RARRAY_PTR(loc)[1])) {
714  rb_raise(rb_eThreadError, "already initialized thread - %s",
715  file);
716  }
717  rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
718  file, NUM2INT(line));
719  }
720  return thread_create_core(thread, args, 0);
721 }
722 
723 VALUE
725 {
727 }
728 
729 
730 /* +infty, for this purpose */
731 #define DELAY_INFTY 1E30
732 
733 struct join_arg {
735  double limit;
736  int forever;
737 };
738 
739 static VALUE
741 {
742  struct join_arg *p = (struct join_arg *)arg;
743  rb_thread_t *target_th = p->target, *th = p->waiting;
744 
745  if (target_th->status != THREAD_KILLED) {
746  rb_thread_list_t **p = &target_th->join_list;
747 
748  while (*p) {
749  if ((*p)->th == th) {
750  *p = (*p)->next;
751  break;
752  }
753  p = &(*p)->next;
754  }
755  }
756 
757  return Qnil;
758 }
759 
760 static VALUE
762 {
763  struct join_arg *p = (struct join_arg *)arg;
764  rb_thread_t *target_th = p->target, *th = p->waiting;
765  double now, limit = p->limit;
766 
767  while (target_th->status != THREAD_KILLED) {
768  if (p->forever) {
769  sleep_forever(th, 1, 0);
770  }
771  else {
772  now = timeofday();
773  if (now > limit) {
774  thread_debug("thread_join: timeout (thid: %p)\n",
775  (void *)target_th->thread_id);
776  return Qfalse;
777  }
778  sleep_wait_for_interrupt(th, limit - now, 0);
779  }
780  thread_debug("thread_join: interrupted (thid: %p)\n",
781  (void *)target_th->thread_id);
782  }
783  return Qtrue;
784 }
785 
786 static VALUE
787 thread_join(rb_thread_t *target_th, double delay)
788 {
789  rb_thread_t *th = GET_THREAD();
790  struct join_arg arg;
791 
792  if (th == target_th) {
793  rb_raise(rb_eThreadError, "Target thread must not be current thread");
794  }
795  if (GET_VM()->main_thread == target_th) {
796  rb_raise(rb_eThreadError, "Target thread must not be main thread");
797  }
798 
799  arg.target = target_th;
800  arg.waiting = th;
801  arg.limit = timeofday() + delay;
802  arg.forever = delay == DELAY_INFTY;
803 
804  thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
805 
806  if (target_th->status != THREAD_KILLED) {
808  list.next = target_th->join_list;
809  list.th = th;
810  target_th->join_list = &list;
811  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
812  remove_from_join_list, (VALUE)&arg)) {
813  return Qnil;
814  }
815  }
816 
817  thread_debug("thread_join: success (thid: %p)\n",
818  (void *)target_th->thread_id);
819 
820  if (target_th->errinfo != Qnil) {
821  VALUE err = target_th->errinfo;
822 
823  if (FIXNUM_P(err)) {
824  /* */
825  }
826  else if (RB_TYPE_P(target_th->errinfo, T_NODE)) {
829  }
830  else {
831  /* normal exception */
832  rb_exc_raise(err);
833  }
834  }
835  return target_th->self;
836 }
837 
838 /*
839  * call-seq:
840  * thr.join -> thr
841  * thr.join(limit) -> thr
842  *
843  * The calling thread will suspend execution and run <i>thr</i>. Does not
844  * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
845  * the time limit expires, <code>nil</code> will be returned, otherwise
846  * <i>thr</i> is returned.
847  *
848  * Any threads not joined will be killed when the main program exits. If
849  * <i>thr</i> had previously raised an exception and the
850  * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
851  * (so the exception has not yet been processed) it will be processed at this
852  * time.
853  *
854  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
855  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
856  * x.join # Let x thread finish, a will be killed on exit.
857  *
858  * <em>produces:</em>
859  *
860  * axyz
861  *
862  * The following example illustrates the <i>limit</i> parameter.
863  *
864  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
865  * puts "Waiting" until y.join(0.15)
866  *
867  * <em>produces:</em>
868  *
869  * tick...
870  * Waiting
871  * tick...
872  * Waitingtick...
873  *
874  *
875  * tick...
876  */
877 
878 static VALUE
880 {
881  rb_thread_t *target_th;
882  double delay = DELAY_INFTY;
883  VALUE limit;
884 
885  GetThreadPtr(self, target_th);
886 
887  rb_scan_args(argc, argv, "01", &limit);
888  if (!NIL_P(limit)) {
889  delay = rb_num2dbl(limit);
890  }
891 
892  return thread_join(target_th, delay);
893 }
894 
895 /*
896  * call-seq:
897  * thr.value -> obj
898  *
899  * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
900  * its value.
901  *
902  * a = Thread.new { 2 + 2 }
903  * a.value #=> 4
904  */
905 
906 static VALUE
908 {
909  rb_thread_t *th;
910  GetThreadPtr(self, th);
912  return th->value;
913 }
914 
915 /*
916  * Thread Scheduling
917  */
918 
919 static struct timeval
920 double2timeval(double d)
921 {
922  struct timeval time;
923 
924  if (isinf(d)) {
925  time.tv_sec = TIMET_MAX;
926  time.tv_usec = 0;
927  return time;
928  }
929 
930  time.tv_sec = (int)d;
931  time.tv_usec = (int)((d - (int)d) * 1e6);
932  if (time.tv_usec < 0) {
933  time.tv_usec += (int)1e6;
934  time.tv_sec -= 1;
935  }
936  return time;
937 }
938 
939 static void
940 sleep_forever(rb_thread_t *th, int deadlockable, int spurious_check)
941 {
942  enum rb_thread_status prev_status = th->status;
943  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
944 
945  th->status = status;
947  while (th->status == status) {
948  if (deadlockable) {
949  th->vm->sleeper++;
950  rb_check_deadlock(th->vm);
951  }
952  native_sleep(th, 0);
953  if (deadlockable) {
954  th->vm->sleeper--;
955  }
957  if (!spurious_check)
958  break;
959  }
960  th->status = prev_status;
961 }
962 
963 static void
965 {
966 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
967  struct timespec ts;
968 
969  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
970  tp->tv_sec = ts.tv_sec;
971  tp->tv_usec = ts.tv_nsec / 1000;
972  } else
973 #endif
974  {
975  gettimeofday(tp, NULL);
976  }
977 }
978 
979 static void
980 sleep_timeval(rb_thread_t *th, struct timeval tv, int spurious_check)
981 {
982  struct timeval to, tvn;
983  enum rb_thread_status prev_status = th->status;
984 
985  getclockofday(&to);
986  if (TIMET_MAX - tv.tv_sec < to.tv_sec)
987  to.tv_sec = TIMET_MAX;
988  else
989  to.tv_sec += tv.tv_sec;
990  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
991  if (to.tv_sec == TIMET_MAX)
992  to.tv_usec = 999999;
993  else {
994  to.tv_sec++;
995  to.tv_usec -= 1000000;
996  }
997  }
998 
999  th->status = THREAD_STOPPED;
1001  while (th->status == THREAD_STOPPED) {
1002  native_sleep(th, &tv);
1004  getclockofday(&tvn);
1005  if (to.tv_sec < tvn.tv_sec) break;
1006  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
1007  thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
1008  (long)to.tv_sec, (long)to.tv_usec,
1009  (long)tvn.tv_sec, (long)tvn.tv_usec);
1010  tv.tv_sec = to.tv_sec - tvn.tv_sec;
1011  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
1012  --tv.tv_sec;
1013  tv.tv_usec += 1000000;
1014  }
1015  if (!spurious_check)
1016  break;
1017  }
1018  th->status = prev_status;
1019 }
1020 
1021 void
1023 {
1024  thread_debug("rb_thread_sleep_forever\n");
1025  sleep_forever(GET_THREAD(), 0, 1);
1026 }
1027 
1028 static void
1030 {
1031  thread_debug("rb_thread_sleep_deadly\n");
1032  sleep_forever(GET_THREAD(), 1, 1);
1033 }
1034 
1035 static double
1037 {
1038 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1039  struct timespec tp;
1040 
1041  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1042  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
1043  } else
1044 #endif
1045  {
1046  struct timeval tv;
1047  gettimeofday(&tv, NULL);
1048  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
1049  }
1050 }
1051 
1052 static void
1053 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
1054 {
1055  sleep_timeval(th, double2timeval(sleepsec), spurious_check);
1056 }
1057 
1058 static void
1060 {
1061  struct timeval time;
1062  time.tv_sec = 0;
1063  time.tv_usec = 100 * 1000; /* 0.1 sec */
1064  sleep_timeval(th, time, 1);
1065 }
1066 
1067 void
1069 {
1070  rb_thread_t *th = GET_THREAD();
1071  sleep_timeval(th, time, 1);
1072 }
1073 
1074 void
1076 {
1077  if (!rb_thread_alone()) {
1078  rb_thread_t *th = GET_THREAD();
1080  sleep_for_polling(th);
1081  }
1082 }
1083 
1084 /*
1085  * CAUTION: This function causes thread switching.
1086  * rb_thread_check_ints() check ruby's interrupts.
1087  * some interrupt needs thread switching/invoke handlers,
1088  * and so on.
1089  */
1090 
1091 void
1093 {
1095 }
1096 
1097 /*
1098  * Hidden API for tcl/tk wrapper.
1099  * There is no guarantee to perpetuate it.
1100  */
1101 int
1103 {
1104  return rb_signal_buff_size() != 0;
1105 }
1106 
1107 /* This function can be called in blocking region. */
1108 int
1110 {
1111  rb_thread_t *th;
1112  GetThreadPtr(thval, th);
1113  return (int)RUBY_VM_INTERRUPTED(th);
1114 }
1115 
1116 void
1118 {
1120 }
1121 
1122 static void
1123 rb_thread_schedule_limits(unsigned long limits_us)
1124 {
1125  thread_debug("rb_thread_schedule\n");
1126  if (!rb_thread_alone()) {
1127  rb_thread_t *th = GET_THREAD();
1128 
1129  if (th->running_time_us >= limits_us) {
1130  thread_debug("rb_thread_schedule/switch start\n");
1132  gvl_yield(th->vm, th);
1134  thread_debug("rb_thread_schedule/switch done\n");
1135  }
1136  }
1137 }
1138 
1139 void
1141 {
1142  rb_thread_t *cur_th = GET_THREAD();
1144 
1145  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(cur_th))) {
1147  }
1148 }
1149 
1150 /* blocking region */
1151 
1152 static inline int
1154  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1155 {
1156  region->prev_status = th->status;
1157  if (set_unblock_function(th, ubf, arg, &region->oldubf, fail_if_interrupted)) {
1158  th->blocking_region_buffer = region;
1159  th->status = THREAD_STOPPED;
1160  thread_debug("enter blocking region (%p)\n", (void *)th);
1162  gvl_release(th->vm);
1163  return TRUE;
1164  }
1165  else {
1166  return FALSE;
1167  }
1168 }
1169 
1170 static inline void
1172 {
1173  gvl_acquire(th->vm, th);
1175  thread_debug("leave blocking region (%p)\n", (void *)th);
1176  remove_signal_thread_list(th);
1177  th->blocking_region_buffer = 0;
1178  reset_unblock_function(th, &region->oldubf);
1179  if (th->status == THREAD_STOPPED) {
1180  th->status = region->prev_status;
1181  }
1182 }
1183 
1186 {
1187  rb_thread_t *th = GET_THREAD();
1189  blocking_region_begin(th, region, ubf_select, th, FALSE);
1190  return region;
1191 }
1192 
1193 void
1195 {
1196  int saved_errno = errno;
1197  rb_thread_t *th = ruby_thread_from_native();
1198  blocking_region_end(th, region);
1199  xfree(region);
1201  errno = saved_errno;
1202 }
1203 
1204 static void *
1205 call_without_gvl(void *(*func)(void *), void *data1,
1206  rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
1207 {
1208  void *val = 0;
1209 
1210  rb_thread_t *th = GET_THREAD();
1211  int saved_errno = 0;
1212 
1213  th->waiting_fd = -1;
1214  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1215  ubf = ubf_select;
1216  data2 = th;
1217  }
1218 
1219  BLOCKING_REGION({
1220  val = func(data1);
1221  saved_errno = errno;
1222  }, ubf, data2, fail_if_interrupted);
1223 
1224  if (!fail_if_interrupted) {
1226  }
1227 
1228  errno = saved_errno;
1229 
1230  return val;
1231 }
1232 
1233 /*
1234  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1235  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1236  * without interrupt proceess.
1237  *
1238  * rb_thread_call_without_gvl() does:
1239  * (1) Check interrupts.
1240  * (2) release GVL.
1241  * Other Ruby threads may run in parallel.
1242  * (3) call func with data1
1243  * (4) acquire GVL.
1244  * Other Ruby threads can not run in parallel any more.
1245  * (5) Check interrupts.
1246  *
1247  * rb_thread_call_without_gvl2() does:
1248  * (1) Check interrupt and return if interrupted.
1249  * (2) release GVL.
1250  * (3) call func with data1 and a pointer to the flags.
1251  * (4) acquire GVL.
1252  *
1253  * If another thread interrupts this thread (Thread#kill, signal delivery,
1254  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1255  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1256  * toggling a cancellation flag, canceling the invocation of a call inside
1257  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1258  *
1259  * There are built-in ubfs and you can specify these ubfs:
1260  *
1261  * * RUBY_UBF_IO: ubf for IO operation
1262  * * RUBY_UBF_PROCESS: ubf for process operation
1263  *
1264  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1265  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1266  * provide proper ubf(), your program will not stop for Control+C or other
1267  * shutdown events.
1268  *
1269  * "Check interrupts" on above list means that check asynchronous
1270  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1271  * request, and so on) and call corresponding procedures
1272  * (such as `trap' for signals, raise an exception for Thread#raise).
1273  * If `func()' finished and receive interrupts, you may skip interrupt
1274  * checking. For example, assume the following func() it read data from file.
1275  *
1276  * read_func(...) {
1277  * // (a) before read
1278  * read(buffer); // (b) reading
1279  * // (c) after read
1280  * }
1281  *
1282  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1283  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1284  * at (c), after *read* operation is completed, check intterrupts is harmful
1285  * because it causes irrevocable side-effect, the read data will vanish. To
1286  * avoid such problem, the `read_func()' should be used with
1287  * `rb_thread_call_without_gvl2()'.
1288  *
1289  * If `rb_thread_call_without_gvl2()' detects interrupt, return its execution
1290  * immediately. This function does not show when the execution was interrupted.
1291  * For example, there are 4 possible timing (a), (b), (c) and before calling
1292  * read_func(). You need to record progress of a read_func() and check
1293  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1294  * `rb_thread_check_ints()' correctly or your program can not process proper
1295  * process such as `trap' and so on.
1296  *
1297  * NOTE: You can not execute most of Ruby C API and touch Ruby
1298  * objects in `func()' and `ubf()', including raising an
1299  * exception, because current thread doesn't acquire GVL
1300  * (it causes synchronization problems). If you need to
1301  * call ruby functions either use rb_thread_call_with_gvl()
1302  * or read source code of C APIs and confirm safety by
1303  * yourself.
1304  *
1305  * NOTE: In short, this API is difficult to use safely. I recommend you
1306  * use other ways if you have. We lack experiences to use this API.
1307  * Please report your problem related on it.
1308  *
1309  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1310  * for a short running `func()'. Be sure to benchmark and use this
1311  * mechanism when `func()' consumes enough time.
1312  *
1313  * Safe C API:
1314  * * rb_thread_interrupted() - check interrupt flag
1315  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1316  * they will work without GVL, and may acquire GVL when GC is needed.
1317  */
1318 void *
1319 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1320  rb_unblock_function_t *ubf, void *data2)
1321 {
1322  return call_without_gvl(func, data1, ubf, data2, TRUE);
1323 }
1324 
1325 void *
1326 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1327  rb_unblock_function_t *ubf, void *data2)
1328 {
1329  return call_without_gvl(func, data1, ubf, data2, FALSE);
1330 }
1331 
1332 VALUE
1334 {
1335  VALUE val = Qundef; /* shouldn't be used */
1336  rb_thread_t *th = GET_THREAD();
1337  int saved_errno = 0;
1338  int state;
1339 
1340  th->waiting_fd = fd;
1341 
1342  TH_PUSH_TAG(th);
1343  if ((state = EXEC_TAG()) == 0) {
1344  BLOCKING_REGION({
1345  val = func(data1);
1346  saved_errno = errno;
1347  }, ubf_select, th, FALSE);
1348  }
1349  TH_POP_TAG();
1350 
1351  /* clear waitinf_fd anytime */
1352  th->waiting_fd = -1;
1353 
1354  if (state) {
1355  JUMP_TAG(state);
1356  }
1357  /* TODO: check func() */
1359 
1360  errno = saved_errno;
1361 
1362  return val;
1363 }
1364 
1365 VALUE
1367  rb_blocking_function_t *func, void *data1,
1368  rb_unblock_function_t *ubf, void *data2)
1369 {
1370  void *(*f)(void*) = (void *(*)(void*))func;
1371  return (VALUE)rb_thread_call_without_gvl(f, data1, ubf, data2);
1372 }
1373 
1374 /*
1375  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1376  *
1377  * After releasing GVL using rb_thread_blocking_region() or
1378  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1379  * methods. If you need to access Ruby you must use this function
1380  * rb_thread_call_with_gvl().
1381  *
1382  * This function rb_thread_call_with_gvl() does:
1383  * (1) acquire GVL.
1384  * (2) call passed function `func'.
1385  * (3) release GVL.
1386  * (4) return a value which is returned at (2).
1387  *
1388  * NOTE: You should not return Ruby object at (2) because such Object
1389  * will not marked.
1390  *
1391  * NOTE: If an exception is raised in `func', this function DOES NOT
1392  * protect (catch) the exception. If you have any resources
1393  * which should free before throwing exception, you need use
1394  * rb_protect() in `func' and return a value which represents
1395  * exception is raised.
1396  *
1397  * NOTE: This function should not be called by a thread which was not
1398  * created as Ruby thread (created by Thread.new or so). In other
1399  * words, this function *DOES NOT* associate or convert a NON-Ruby
1400  * thread to a Ruby thread.
1401  */
1402 void *
1403 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1404 {
1405  rb_thread_t *th = ruby_thread_from_native();
1406  struct rb_blocking_region_buffer *brb;
1407  struct rb_unblock_callback prev_unblock;
1408  void *r;
1409 
1410  if (th == 0) {
1411  /* Error is occurred, but we can't use rb_bug()
1412  * because this thread is not Ruby's thread.
1413  * What should we do?
1414  */
1415 
1416  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1417  exit(EXIT_FAILURE);
1418  }
1419 
1421  prev_unblock = th->unblock;
1422 
1423  if (brb == 0) {
1424  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1425  }
1426 
1427  blocking_region_end(th, brb);
1428  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1429  r = (*func)(data1);
1430  /* leave from Ruby world: You can not access Ruby values, etc. */
1431  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1432  return r;
1433 }
1434 
1435 /*
1436  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1437  *
1438  ***
1439  *** This API is EXPERIMENTAL!
1440  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1441  ***
1442  */
1443 
1444 int
1446 {
1447  rb_thread_t *th = ruby_thread_from_native();
1448 
1449  if (th && th->blocking_region_buffer == 0) {
1450  return 1;
1451  }
1452  else {
1453  return 0;
1454  }
1455 }
1456 
1457 /*
1458  * call-seq:
1459  * Thread.pass -> nil
1460  *
1461  * Give the thread scheduler a hint to pass execution to another thread.
1462  * A running thread may or may not switch, it depends on OS and processor.
1463  */
1464 
1465 static VALUE
1467 {
1469  return Qnil;
1470 }
1471 
1472 /*****************************************************/
1473 
1474 /*
1475  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1476  *
1477  * Async events such as an exception throwed by Thread#raise,
1478  * Thread#kill and thread termination (after main thread termination)
1479  * will be queued to th->pending_interrupt_queue.
1480  * - clear: clear the queue.
1481  * - enque: enque err object into queue.
1482  * - deque: deque err object from queue.
1483  * - active_p: return 1 if the queue should be checked.
1484  *
1485  * All rb_threadptr_pending_interrupt_* functions are called by
1486  * a GVL acquired thread, of course.
1487  * Note that all "rb_" prefix APIs need GVL to call.
1488  */
1489 
1490 void
1492 {
1494 }
1495 
1496 void
1498 {
1501 }
1502 
1508 };
1509 
1510 static enum handle_interrupt_timing
1512 {
1513  VALUE mask;
1514  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1515  VALUE *mask_stack = RARRAY_PTR(th->pending_interrupt_mask_stack);
1516  VALUE ancestors = rb_mod_ancestors(err); /* TODO: GC guard */
1517  long ancestors_len = RARRAY_LEN(ancestors);
1518  VALUE *ancestors_ptr = RARRAY_PTR(ancestors);
1519  int i, j;
1520 
1521  for (i=0; i<mask_stack_len; i++) {
1522  mask = mask_stack[mask_stack_len-(i+1)];
1523 
1524  for (j=0; j<ancestors_len; j++) {
1525  VALUE klass = ancestors_ptr[j];
1526  VALUE sym;
1527 
1528  /* TODO: remove rb_intern() */
1529  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1530  if (sym == sym_immediate) {
1531  return INTERRUPT_IMMEDIATE;
1532  }
1533  else if (sym == sym_on_blocking) {
1534  return INTERRUPT_ON_BLOCKING;
1535  }
1536  else if (sym == sym_never) {
1537  return INTERRUPT_NEVER;
1538  }
1539  else {
1540  rb_raise(rb_eThreadError, "unknown mask signature");
1541  }
1542  }
1543  }
1544  /* try to next mask */
1545  }
1546  return INTERRUPT_NONE;
1547 }
1548 
1549 static int
1551 {
1552  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1553 }
1554 
1555 static int
1557 {
1558  int i;
1559  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1561  if (rb_class_inherited_p(e, err)) {
1562  return TRUE;
1563  }
1564  }
1565  return FALSE;
1566 }
1567 
1568 static VALUE
1570 {
1571 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1572  int i;
1573 
1574  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1576 
1578 
1579  switch (mask_timing) {
1580  case INTERRUPT_ON_BLOCKING:
1581  if (timing != INTERRUPT_ON_BLOCKING) {
1582  break;
1583  }
1584  /* fall through */
1585  case INTERRUPT_NONE: /* default: IMMEDIATE */
1586  case INTERRUPT_IMMEDIATE:
1588  return err;
1589  case INTERRUPT_NEVER:
1590  break;
1591  }
1592  }
1593 
1595  return Qundef;
1596 #else
1600  }
1601  return err;
1602 #endif
1603 }
1604 
1605 int
1607 {
1608  /*
1609  * For optimization, we don't check async errinfo queue
1610  * if it nor a thread interrupt mask were not changed
1611  * since last check.
1612  */
1614  return 0;
1615  }
1616 
1618  return 0;
1619  }
1620 
1621  return 1;
1622 }
1623 
1624 static int
1626 {
1627  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1628  rb_raise(rb_eArgError, "unknown mask signature");
1629  }
1630 
1631  return ST_CONTINUE;
1632 }
1633 
1634 /*
1635  * call-seq:
1636  * Thread.handle_interrupt(hash) { ... } -> result of the block
1637  *
1638  * Changes asynchronous interrupt timing.
1639  *
1640  * _interrupt_ means asynchronous event and corresponding procedure
1641  * by Thread#raise, Thread#kill, signal trap (not supported yet)
1642  * and main thread termination (if main thread terminates, then all
1643  * other thread will be killed).
1644  *
1645  * The given +hash+ has pairs like <code>ExceptionClass =>
1646  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1647  * the given block. The TimingSymbol can be one of the following symbols:
1648  *
1649  * [+:immediate+] Invoke interrupts immediately.
1650  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1651  * [+:never+] Never invoke all interrupts.
1652  *
1653  * _BlockingOperation_ means that the operation will block the calling thread,
1654  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1655  * operation executed without GVL.
1656  *
1657  * Masked asynchronous interrupts are delayed until they are enabled.
1658  * This method is similar to sigprocmask(3).
1659  *
1660  * === NOTE
1661  *
1662  * Asynchronous interrupts are difficult to use.
1663  *
1664  * If you need to communicate between threads, please consider to use another way such as Queue.
1665  *
1666  * Or use them with deep understanding about this method.
1667  *
1668  * === Usage
1669  *
1670  * In this example, we can guard from Thread#raise exceptions.
1671  *
1672  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1673  * ignored in the first block of the main thread. In the second
1674  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1675  *
1676  * th = Thread.new do
1677  * Thead.handle_interrupt(RuntimeError => :never) {
1678  * begin
1679  * # You can write resource allocation code safely.
1680  * Thread.handle_interrupt(RuntimeError => :immediate) {
1681  * # ...
1682  * }
1683  * ensure
1684  * # You can write resource deallocation code safely.
1685  * end
1686  * }
1687  * end
1688  * Thread.pass
1689  * # ...
1690  * th.raise "stop"
1691  *
1692  * While we are ignoring the RuntimeError exception, it's safe to write our
1693  * resource allocation code. Then, the ensure block is where we can safely
1694  * deallocate your resources.
1695  *
1696  * ==== Guarding from TimeoutError
1697  *
1698  * In the next example, we will guard from the TimeoutError exception. This
1699  * will help prevent from leaking resources when TimeoutError exceptions occur
1700  * during normal ensure clause. For this example we use the help of the
1701  * standard library Timeout, from lib/timeout.rb
1702  *
1703  * require 'timeout'
1704  * Thread.handle_interrupt(TimeoutError => :never) {
1705  * timeout(10){
1706  * # TimeoutError doesn't occur here
1707  * Thread.handle_interrupt(TimeoutError => :on_blocking) {
1708  * # possible to be killed by TimeoutError
1709  * # while blocking operation
1710  * }
1711  * # TimeoutError doesn't occur here
1712  * }
1713  * }
1714  *
1715  * In the first part of the +timeout+ block, we can rely on TimeoutError being
1716  * ignored. Then in the <code>TimeoutError => :on_blocking</code> block, any
1717  * operation that will block the calling thread is susceptible to a
1718  * TimeoutError exception being raised.
1719  *
1720  * ==== Stack control settings
1721  *
1722  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
1723  * to control more than one ExceptionClass and TimingSymbol at a time.
1724  *
1725  * Thread.handle_interrupt(FooError => :never) {
1726  * Thread.handle_interrupt(BarError => :never) {
1727  * # FooError and BarError are prohibited.
1728  * }
1729  * }
1730  *
1731  * ==== Inheritance with ExceptionClass
1732  *
1733  * All exceptions inherited from the ExceptionClass parameter will be considered.
1734  *
1735  * Thread.handle_interrupt(Exception => :never) {
1736  * # all exceptions inherited from Exception are prohibited.
1737  * }
1738  *
1739  */
1740 static VALUE
1742 {
1743  VALUE mask;
1744  rb_thread_t *th = GET_THREAD();
1745  VALUE r = Qnil;
1746  int state;
1747 
1748  if (!rb_block_given_p()) {
1749  rb_raise(rb_eArgError, "block is needed.");
1750  }
1751 
1752  mask = rb_convert_type(mask_arg, T_HASH, "Hash", "to_hash");
1758  }
1759 
1760  TH_PUSH_TAG(th);
1761  if ((state = EXEC_TAG()) == 0) {
1762  r = rb_yield(Qnil);
1763  }
1764  TH_POP_TAG();
1765 
1770  }
1771 
1772  RUBY_VM_CHECK_INTS(th);
1773 
1774  if (state) {
1775  JUMP_TAG(state);
1776  }
1777 
1778  return r;
1779 }
1780 
1781 /*
1782  * call-seq:
1783  * target_thread.pending_interrupt?(error = nil) -> true/false
1784  *
1785  * Returns whether or not the asychronous queue is empty for the target thread.
1786  *
1787  * If +error+ is given, then check only for +error+ type deferred events.
1788  *
1789  * See ::pending_interrupt? for more information.
1790  */
1791 static VALUE
1793 {
1794  rb_thread_t *target_th;
1795 
1796  GetThreadPtr(target_thread, target_th);
1797 
1798  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
1799  return Qfalse;
1800  }
1801  else {
1802  if (argc == 1) {
1803  VALUE err;
1804  rb_scan_args(argc, argv, "01", &err);
1805  if (!rb_obj_is_kind_of(err, rb_cModule)) {
1806  rb_raise(rb_eTypeError, "class or module required for rescue clause");
1807  }
1808  if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
1809  return Qtrue;
1810  }
1811  else {
1812  return Qfalse;
1813  }
1814  }
1815  return Qtrue;
1816  }
1817 }
1818 
1819 /*
1820  * call-seq:
1821  * Thread.pending_interrupt?(error = nil) -> true/false
1822  *
1823  * Returns whether or not the asynchronous queue is empty.
1824  *
1825  * Since Thread::handle_interrupt can be used to defer asynchronous events.
1826  * This method can be used to determine if there are any deferred events.
1827  *
1828  * If you find this method returns true, then you may finish +:never+ blocks.
1829  *
1830  * For example, the following method processes deferred asynchronous events
1831  * immediately.
1832  *
1833  * def Thread.kick_interrupt_immediately
1834  * Thread.handle_interrupt(Object => :immediate) {
1835  * Thread.pass
1836  * }
1837  * end
1838  *
1839  * If +error+ is given, then check only for +error+ type deferred events.
1840  *
1841  * === Usage
1842  *
1843  * th = Thread.new{
1844  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1845  * while true
1846  * ...
1847  * # reach safe point to invoke interrupt
1848  * if Thread.pending_interrupt?
1849  * Thread.handle_interrupt(Object => :immediate){}
1850  * end
1851  * ...
1852  * end
1853  * }
1854  * }
1855  * ...
1856  * th.raise # stop thread
1857  *
1858  * This example can also be written as the following, which you should use to
1859  * avoid asynchronous interrupts.
1860  *
1861  * flag = true
1862  * th = Thread.new{
1863  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1864  * while true
1865  * ...
1866  * # reach safe point to invoke interrupt
1867  * break if flag == false
1868  * ...
1869  * end
1870  * }
1871  * }
1872  * ...
1873  * flag = false # stop thread
1874  */
1875 
1876 static VALUE
1878 {
1879  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
1880 }
1881 
1882 static void
1884 {
1886  th->status = THREAD_RUNNABLE;
1887  th->to_kill = 1;
1888  th->errinfo = INT2FIX(TAG_FATAL);
1889  TH_JUMP_TAG(th, TAG_FATAL);
1890 }
1891 
1892 void
1894 {
1895  if (th->raised_flag) return;
1896 
1897  while (1) {
1898  rb_atomic_t interrupt;
1899  rb_atomic_t old;
1900  int sig;
1901  int timer_interrupt;
1902  int pending_interrupt;
1903  int finalizer_interrupt;
1904  int trap_interrupt;
1905 
1906  do {
1907  interrupt = th->interrupt_flag;
1908  old = ATOMIC_CAS(th->interrupt_flag, interrupt, interrupt & th->interrupt_mask);
1909  } while (old != interrupt);
1910 
1911  interrupt &= (rb_atomic_t)~th->interrupt_mask;
1912  if (!interrupt)
1913  return;
1914 
1915  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
1916  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
1917  finalizer_interrupt = interrupt & FINALIZER_INTERRUPT_MASK;
1918  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
1919 
1920  /* signal handling */
1921  if (trap_interrupt && (th == th->vm->main_thread)) {
1922  enum rb_thread_status prev_status = th->status;
1923  th->status = THREAD_RUNNABLE;
1924  while ((sig = rb_get_next_signal()) != 0) {
1925  rb_signal_exec(th, sig);
1926  }
1927  th->status = prev_status;
1928  }
1929 
1930  /* exception from another thread */
1931  if (pending_interrupt && rb_threadptr_pending_interrupt_active_p(th)) {
1933  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1934 
1935  if (err == Qundef) {
1936  /* no error */
1937  }
1938  else if (err == eKillSignal /* Thread#kill receieved */ ||
1939  err == eTerminateSignal /* Terminate thread */ ||
1940  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
1942  }
1943  else {
1944  /* set runnable if th was slept. */
1945  if (th->status == THREAD_STOPPED ||
1947  th->status = THREAD_RUNNABLE;
1948  rb_exc_raise(err);
1949  }
1950  }
1951 
1952  if (finalizer_interrupt) {
1954  }
1955 
1956  if (timer_interrupt) {
1957  unsigned long limits_us = TIME_QUANTUM_USEC;
1958 
1959  if (th->priority > 0)
1960  limits_us <<= th->priority;
1961  else
1962  limits_us >>= -th->priority;
1963 
1964  if (th->status == THREAD_RUNNABLE)
1965  th->running_time_us += TIME_QUANTUM_USEC;
1966 
1967  EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0, Qundef);
1968 
1969  rb_thread_schedule_limits(limits_us);
1970  }
1971  }
1972 }
1973 
1974 void
1976 {
1977  rb_thread_t *th;
1978  GetThreadPtr(thval, th);
1980 }
1981 
1982 static void
1984 {
1986 }
1987 
1988 static VALUE
1990 {
1991  VALUE exc;
1992 
1993  if (rb_threadptr_dead(th)) {
1994  return Qnil;
1995  }
1996 
1997  if (argc == 0) {
1998  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
1999  }
2000  else {
2001  exc = rb_make_exception(argc, argv);
2002  }
2005  return Qnil;
2006 }
2007 
2008 void
2010 {
2011  VALUE argv[2];
2012 
2013  argv[0] = rb_eSignal;
2014  argv[1] = INT2FIX(sig);
2015  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2016 }
2017 
2018 void
2020 {
2021  VALUE argv[2];
2022 
2023  argv[0] = rb_eSystemExit;
2024  argv[1] = rb_str_new2("exit");
2025  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2026 }
2027 
2028 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2029 #define USE_SIGALTSTACK
2030 #endif
2031 
2032 void
2034 {
2035  th->raised_flag = 0;
2036 #ifdef USE_SIGALTSTACK
2038 #else
2039  th->errinfo = sysstack_error;
2040  TH_JUMP_TAG(th, TAG_RAISE);
2041 #endif
2042 }
2043 
2044 int
2046 {
2047  if (th->raised_flag & RAISED_EXCEPTION) {
2048  return 1;
2049  }
2051  return 0;
2052 }
2053 
2054 int
2056 {
2057  if (!(th->raised_flag & RAISED_EXCEPTION)) {
2058  return 0;
2059  }
2060  th->raised_flag &= ~RAISED_EXCEPTION;
2061  return 1;
2062 }
2063 
2064 static int
2066 {
2067  int fd = (int)data;
2068  rb_thread_t *th;
2069  GetThreadPtr((VALUE)key, th);
2070 
2071  if (th->waiting_fd == fd) {
2075  }
2076  return ST_CONTINUE;
2077 }
2078 
2079 void
2081 {
2082  st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
2083 }
2084 
2085 /*
2086  * call-seq:
2087  * thr.raise
2088  * thr.raise(string)
2089  * thr.raise(exception [, string [, array]])
2090  *
2091  * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
2092  * caller does not have to be <i>thr</i>.
2093  *
2094  * Thread.abort_on_exception = true
2095  * a = Thread.new { sleep(200) }
2096  * a.raise("Gotcha")
2097  *
2098  * <em>produces:</em>
2099  *
2100  * prog.rb:3: Gotcha (RuntimeError)
2101  * from prog.rb:2:in `initialize'
2102  * from prog.rb:2:in `new'
2103  * from prog.rb:2
2104  */
2105 
2106 static VALUE
2108 {
2109  rb_thread_t *target_th;
2110  rb_thread_t *th = GET_THREAD();
2111  GetThreadPtr(self, target_th);
2112  rb_threadptr_raise(target_th, argc, argv);
2113 
2114  /* To perform Thread.current.raise as Kernel.raise */
2115  if (th == target_th) {
2116  RUBY_VM_CHECK_INTS(th);
2117  }
2118  return Qnil;
2119 }
2120 
2121 
2122 /*
2123  * call-seq:
2124  * thr.exit -> thr or nil
2125  * thr.kill -> thr or nil
2126  * thr.terminate -> thr or nil
2127  *
2128  * Terminates <i>thr</i> and schedules another thread to be run. If this thread
2129  * is already marked to be killed, <code>exit</code> returns the
2130  * <code>Thread</code>. If this is the main thread, or the last thread, exits
2131  * the process.
2132  */
2133 
2134 VALUE
2136 {
2137  rb_thread_t *th;
2138 
2139  GetThreadPtr(thread, th);
2140 
2141  if (th != GET_THREAD() && th->safe_level < 4) {
2142  rb_secure(4);
2143  }
2144  if (th->to_kill || th->status == THREAD_KILLED) {
2145  return thread;
2146  }
2147  if (th == th->vm->main_thread) {
2149  }
2150 
2151  thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
2152 
2153  if (th == GET_THREAD()) {
2154  /* kill myself immediately */
2156  }
2157  else {
2160  }
2161  return thread;
2162 }
2163 
2164 
2165 /*
2166  * call-seq:
2167  * Thread.kill(thread) -> thread
2168  *
2169  * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
2170  *
2171  * count = 0
2172  * a = Thread.new { loop { count += 1 } }
2173  * sleep(0.1) #=> 0
2174  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2175  * count #=> 93947
2176  * a.alive? #=> false
2177  */
2178 
2179 static VALUE
2181 {
2182  return rb_thread_kill(th);
2183 }
2184 
2185 
2186 /*
2187  * call-seq:
2188  * Thread.exit -> thread
2189  *
2190  * Terminates the currently running thread and schedules another thread to be
2191  * run. If this thread is already marked to be killed, <code>exit</code>
2192  * returns the <code>Thread</code>. If this is the main thread, or the last
2193  * thread, exit the process.
2194  */
2195 
2196 static VALUE
2198 {
2199  rb_thread_t *th = GET_THREAD();
2200  return rb_thread_kill(th->self);
2201 }
2202 
2203 
2204 /*
2205  * call-seq:
2206  * thr.wakeup -> thr
2207  *
2208  * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
2209  * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
2210  *
2211  * c = Thread.new { Thread.stop; puts "hey!" }
2212  * sleep 0.1 while c.status!='sleep'
2213  * c.wakeup
2214  * c.join
2215  *
2216  * <em>produces:</em>
2217  *
2218  * hey!
2219  */
2220 
2221 VALUE
2223 {
2224  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2225  rb_raise(rb_eThreadError, "killed thread");
2226  }
2227  return thread;
2228 }
2229 
2230 VALUE
2232 {
2233  rb_thread_t *th;
2234  GetThreadPtr(thread, th);
2235 
2236  if (th->status == THREAD_KILLED) {
2237  return Qnil;
2238  }
2239  rb_threadptr_ready(th);
2240  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2241  th->status = THREAD_RUNNABLE;
2242  return thread;
2243 }
2244 
2245 
2246 /*
2247  * call-seq:
2248  * thr.run -> thr
2249  *
2250  * Wakes up <i>thr</i>, making it eligible for scheduling.
2251  *
2252  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2253  * sleep 0.1 while a.status!='sleep'
2254  * puts "Got here"
2255  * a.run
2256  * a.join
2257  *
2258  * <em>produces:</em>
2259  *
2260  * a
2261  * Got here
2262  * c
2263  */
2264 
2265 VALUE
2267 {
2268  rb_thread_wakeup(thread);
2270  return thread;
2271 }
2272 
2273 
2274 /*
2275  * call-seq:
2276  * Thread.stop -> nil
2277  *
2278  * Stops execution of the current thread, putting it into a ``sleep'' state,
2279  * and schedules execution of another thread.
2280  *
2281  * a = Thread.new { print "a"; Thread.stop; print "c" }
2282  * sleep 0.1 while a.status!='sleep'
2283  * print "b"
2284  * a.run
2285  * a.join
2286  *
2287  * <em>produces:</em>
2288  *
2289  * abc
2290  */
2291 
2292 VALUE
2294 {
2295  if (rb_thread_alone()) {
2297  "stopping only thread\n\tnote: use sleep to stop forever");
2298  }
2300  return Qnil;
2301 }
2302 
2303 static int
2305 {
2306  VALUE ary = (VALUE)data;
2307  rb_thread_t *th;
2308  GetThreadPtr((VALUE)key, th);
2309 
2310  switch (th->status) {
2311  case THREAD_RUNNABLE:
2312  case THREAD_STOPPED:
2314  rb_ary_push(ary, th->self);
2315  default:
2316  break;
2317  }
2318  return ST_CONTINUE;
2319 }
2320 
2321 /********************************************************************/
2322 
2323 /*
2324  * call-seq:
2325  * Thread.list -> array
2326  *
2327  * Returns an array of <code>Thread</code> objects for all threads that are
2328  * either runnable or stopped.
2329  *
2330  * Thread.new { sleep(200) }
2331  * Thread.new { 1000000.times {|i| i*i } }
2332  * Thread.new { Thread.stop }
2333  * Thread.list.each {|t| p t}
2334  *
2335  * <em>produces:</em>
2336  *
2337  * #<Thread:0x401b3e84 sleep>
2338  * #<Thread:0x401b3f38 run>
2339  * #<Thread:0x401b3fb0 sleep>
2340  * #<Thread:0x401bdf4c run>
2341  */
2342 
2343 VALUE
2345 {
2346  VALUE ary = rb_ary_new();
2347  st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
2348  return ary;
2349 }
2350 
2351 VALUE
2353 {
2354  return GET_THREAD()->self;
2355 }
2356 
2357 /*
2358  * call-seq:
2359  * Thread.current -> thread
2360  *
2361  * Returns the currently executing thread.
2362  *
2363  * Thread.current #=> #<Thread:0x401bdf4c run>
2364  */
2365 
2366 static VALUE
2368 {
2369  return rb_thread_current();
2370 }
2371 
2372 VALUE
2374 {
2375  return GET_THREAD()->vm->main_thread->self;
2376 }
2377 
2378 /*
2379  * call-seq:
2380  * Thread.main -> thread
2381  *
2382  * Returns the main thread.
2383  */
2384 
2385 static VALUE
2387 {
2388  return rb_thread_main();
2389 }
2390 
2391 
2392 /*
2393  * call-seq:
2394  * Thread.abort_on_exception -> true or false
2395  *
2396  * Returns the status of the global ``abort on exception'' condition. The
2397  * default is <code>false</code>. When set to <code>true</code>, or if the
2398  * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
2399  * command line option <code>-d</code> was specified) all threads will abort
2400  * (the process will <code>exit(0)</code>) if an exception is raised in any
2401  * thread. See also <code>Thread::abort_on_exception=</code>.
2402  */
2403 
2404 static VALUE
2406 {
2408 }
2409 
2410 
2411 /*
2412  * call-seq:
2413  * Thread.abort_on_exception= boolean -> true or false
2414  *
2415  * When set to <code>true</code>, all threads will abort if an exception is
2416  * raised. Returns the new state.
2417  *
2418  * Thread.abort_on_exception = true
2419  * t1 = Thread.new do
2420  * puts "In new thread"
2421  * raise "Exception from thread"
2422  * end
2423  * sleep(1)
2424  * puts "not reached"
2425  *
2426  * <em>produces:</em>
2427  *
2428  * In new thread
2429  * prog.rb:4: Exception from thread (RuntimeError)
2430  * from prog.rb:2:in `initialize'
2431  * from prog.rb:2:in `new'
2432  * from prog.rb:2
2433  */
2434 
2435 static VALUE
2437 {
2438  rb_secure(4);
2440  return val;
2441 }
2442 
2443 
2444 /*
2445  * call-seq:
2446  * thr.abort_on_exception -> true or false
2447  *
2448  * Returns the status of the thread-local ``abort on exception'' condition for
2449  * <i>thr</i>. The default is <code>false</code>. See also
2450  * <code>Thread::abort_on_exception=</code>.
2451  */
2452 
2453 static VALUE
2455 {
2456  rb_thread_t *th;
2457  GetThreadPtr(thread, th);
2458  return th->abort_on_exception ? Qtrue : Qfalse;
2459 }
2460 
2461 
2462 /*
2463  * call-seq:
2464  * thr.abort_on_exception= boolean -> true or false
2465  *
2466  * When set to <code>true</code>, causes all threads (including the main
2467  * program) to abort if an exception is raised in <i>thr</i>. The process will
2468  * effectively <code>exit(0)</code>.
2469  */
2470 
2471 static VALUE
2473 {
2474  rb_thread_t *th;
2475  rb_secure(4);
2476 
2477  GetThreadPtr(thread, th);
2478  th->abort_on_exception = RTEST(val);
2479  return val;
2480 }
2481 
2482 
2483 /*
2484  * call-seq:
2485  * thr.group -> thgrp or nil
2486  *
2487  * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
2488  * the thread is not a member of any group.
2489  *
2490  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2491  */
2492 
2493 VALUE
2495 {
2496  rb_thread_t *th;
2497  VALUE group;
2498  GetThreadPtr(thread, th);
2499  group = th->thgroup;
2500 
2501  if (!group) {
2502  group = Qnil;
2503  }
2504  return group;
2505 }
2506 
2507 static const char *
2509 {
2510  switch (th->status) {
2511  case THREAD_RUNNABLE:
2512  if (th->to_kill)
2513  return "aborting";
2514  else
2515  return "run";
2516  case THREAD_STOPPED:
2518  return "sleep";
2519  case THREAD_KILLED:
2520  return "dead";
2521  default:
2522  return "unknown";
2523  }
2524 }
2525 
2526 static int
2528 {
2529  return th->status == THREAD_KILLED;
2530 }
2531 
2532 
2533 /*
2534  * call-seq:
2535  * thr.status -> string, false or nil
2536  *
2537  * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
2538  * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
2539  * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
2540  * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
2541  * terminated with an exception.
2542  *
2543  * a = Thread.new { raise("die now") }
2544  * b = Thread.new { Thread.stop }
2545  * c = Thread.new { Thread.exit }
2546  * d = Thread.new { sleep }
2547  * d.kill #=> #<Thread:0x401b3678 aborting>
2548  * a.status #=> nil
2549  * b.status #=> "sleep"
2550  * c.status #=> false
2551  * d.status #=> "aborting"
2552  * Thread.current.status #=> "run"
2553  */
2554 
2555 static VALUE
2557 {
2558  rb_thread_t *th;
2559  GetThreadPtr(thread, th);
2560 
2561  if (rb_threadptr_dead(th)) {
2562  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
2563  /* TODO */ ) {
2564  return Qnil;
2565  }
2566  return Qfalse;
2567  }
2568  return rb_str_new2(thread_status_name(th));
2569 }
2570 
2571 
2572 /*
2573  * call-seq:
2574  * thr.alive? -> true or false
2575  *
2576  * Returns <code>true</code> if <i>thr</i> is running or sleeping.
2577  *
2578  * thr = Thread.new { }
2579  * thr.join #=> #<Thread:0x401b3fb0 dead>
2580  * Thread.current.alive? #=> true
2581  * thr.alive? #=> false
2582  */
2583 
2584 static VALUE
2586 {
2587  rb_thread_t *th;
2588  GetThreadPtr(thread, th);
2589 
2590  if (rb_threadptr_dead(th))
2591  return Qfalse;
2592  return Qtrue;
2593 }
2594 
2595 /*
2596  * call-seq:
2597  * thr.stop? -> true or false
2598  *
2599  * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
2600  *
2601  * a = Thread.new { Thread.stop }
2602  * b = Thread.current
2603  * a.stop? #=> true
2604  * b.stop? #=> false
2605  */
2606 
2607 static VALUE
2609 {
2610  rb_thread_t *th;
2611  GetThreadPtr(thread, th);
2612 
2613  if (rb_threadptr_dead(th))
2614  return Qtrue;
2615  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2616  return Qtrue;
2617  return Qfalse;
2618 }
2619 
2620 /*
2621  * call-seq:
2622  * thr.safe_level -> integer
2623  *
2624  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2625  * levels can help when implementing sandboxes which run insecure code.
2626  *
2627  * thr = Thread.new { $SAFE = 3; sleep }
2628  * Thread.current.safe_level #=> 0
2629  * thr.safe_level #=> 3
2630  */
2631 
2632 static VALUE
2634 {
2635  rb_thread_t *th;
2636  GetThreadPtr(thread, th);
2637 
2638  return INT2NUM(th->safe_level);
2639 }
2640 
2641 /*
2642  * call-seq:
2643  * thr.inspect -> string
2644  *
2645  * Dump the name, id, and status of _thr_ to a string.
2646  */
2647 
2648 static VALUE
2650 {
2651  const char *cname = rb_obj_classname(thread);
2652  rb_thread_t *th;
2653  const char *status;
2654  VALUE str;
2655 
2656  GetThreadPtr(thread, th);
2657  status = thread_status_name(th);
2658  str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
2659  OBJ_INFECT(str, thread);
2660 
2661  return str;
2662 }
2663 
2664 VALUE
2666 {
2667  rb_thread_t *th;
2668  st_data_t val;
2669 
2670  GetThreadPtr(thread, th);
2671  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2672  rb_raise(rb_eSecurityError, "Insecure: thread locals");
2673  }
2674  if (!th->local_storage) {
2675  return Qnil;
2676  }
2677  if (st_lookup(th->local_storage, id, &val)) {
2678  return (VALUE)val;
2679  }
2680  return Qnil;
2681 }
2682 
2683 /*
2684  * call-seq:
2685  * thr[sym] -> obj or nil
2686  *
2687  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
2688  * if not explicitely inside a Fiber), using either a symbol or a string name.
2689  * If the specified variable does not exist, returns <code>nil</code>.
2690  *
2691  * [
2692  * Thread.new { Thread.current["name"] = "A" },
2693  * Thread.new { Thread.current[:name] = "B" },
2694  * Thread.new { Thread.current["name"] = "C" }
2695  * ].each do |th|
2696  * th.join
2697  * puts "#{th.inspect}: #{th[:name]}"
2698  * end
2699  *
2700  * <em>produces:</em>
2701  *
2702  * #<Thread:0x00000002a54220 dead>: A
2703  * #<Thread:0x00000002a541a8 dead>: B
2704  * #<Thread:0x00000002a54130 dead>: C
2705  *
2706  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
2707  * This confusion did not exist in Ruby 1.8 because
2708  * fibers were only available since Ruby 1.9.
2709  * Ruby 1.9 chooses that the methods behaves fiber-local to save
2710  * following idiom for dynamic scope.
2711  *
2712  * def meth(newvalue)
2713  * begin
2714  * oldvalue = Thread.current[:name]
2715  * Thread.current[:name] = newvalue
2716  * yield
2717  * ensure
2718  * Thread.current[:name] = oldvalue
2719  * end
2720  * end
2721  *
2722  * The idiom may not work as dynamic scope if the methods are thread-local
2723  * and a given block switches fiber.
2724  *
2725  * f = Fiber.new {
2726  * meth(1) {
2727  * Fiber.yield
2728  * }
2729  * }
2730  * meth(2) {
2731  * f.resume
2732  * }
2733  * f.resume
2734  * p Thread.current[:name]
2735  * #=> nil if fiber-local
2736  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
2737  *
2738  * For thread-local variables, please see <code>Thread#thread_local_get</code>
2739  * and <code>Thread#thread_local_set</code>.
2740  *
2741  */
2742 
2743 static VALUE
2745 {
2746  return rb_thread_local_aref(thread, rb_to_id(id));
2747 }
2748 
2749 VALUE
2751 {
2752  rb_thread_t *th;
2753  GetThreadPtr(thread, th);
2754 
2755  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2756  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2757  }
2758  if (OBJ_FROZEN(thread)) {
2759  rb_error_frozen("thread locals");
2760  }
2761  if (!th->local_storage) {
2763  }
2764  if (NIL_P(val)) {
2765  st_delete_wrap(th->local_storage, id);
2766  return Qnil;
2767  }
2768  st_insert(th->local_storage, id, val);
2769  return val;
2770 }
2771 
2772 /*
2773  * call-seq:
2774  * thr[sym] = obj -> obj
2775  *
2776  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
2777  * using either a symbol or a string. See also <code>Thread#[]</code>. For
2778  * thread-local variables, please see <code>Thread#thread_variable_set</code>
2779  * and <code>Thread#thread_variable_get</code>.
2780  */
2781 
2782 static VALUE
2784 {
2785  return rb_thread_local_aset(self, rb_to_id(id), val);
2786 }
2787 
2788 /*
2789  * call-seq:
2790  * thr.thread_variable_get(key) -> obj or nil
2791  *
2792  * Returns the value of a thread local variable that has been set. Note that
2793  * these are different than fiber local values. For fiber local values,
2794  * please see Thread#[] and Thread#[]=.
2795  *
2796  * Thread local values are carried along with threads, and do not respect
2797  * fibers. For example:
2798  *
2799  * Thread.new {
2800  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
2801  * Thread.current["foo"] = "bar" # set a fiber local
2802  *
2803  * Fiber.new {
2804  * Fiber.yield [
2805  * Thread.current.thread_variable_get("foo"), # get the thread local
2806  * Thread.current["foo"], # get the fiber local
2807  * ]
2808  * }.resume
2809  * }.join.value # => ['bar', nil]
2810  *
2811  * The value "bar" is returned for the thread local, where nil is returned
2812  * for the fiber local. The fiber is executed in the same thread, so the
2813  * thread local values are available.
2814  *
2815  * See also Thread#[]
2816  */
2817 
2818 static VALUE
2820 {
2821  VALUE locals;
2822  rb_thread_t *th;
2823 
2824  GetThreadPtr(thread, th);
2825 
2826  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2827  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2828  }
2829 
2830  locals = rb_iv_get(thread, "locals");
2831  return rb_hash_aref(locals, ID2SYM(rb_to_id(id)));
2832 }
2833 
2834 /*
2835  * call-seq:
2836  * thr.thread_variable_set(key, value)
2837  *
2838  * Sets a thread local with +key+ to +value+. Note that these are local to
2839  * threads, and not to fibers. Please see Thread#thread_variable_get and
2840  * Thread#[] for more information.
2841  */
2842 
2843 static VALUE
2845 {
2846  VALUE locals;
2847  rb_thread_t *th;
2848 
2849  GetThreadPtr(thread, th);
2850 
2851  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2852  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2853  }
2854  if (OBJ_FROZEN(thread)) {
2855  rb_error_frozen("thread locals");
2856  }
2857 
2858  locals = rb_iv_get(thread, "locals");
2859  return rb_hash_aset(locals, ID2SYM(rb_to_id(id)), val);
2860 }
2861 
2862 /*
2863  * call-seq:
2864  * thr.key?(sym) -> true or false
2865  *
2866  * Returns <code>true</code> if the given string (or symbol) exists as a
2867  * fiber-local variable.
2868  *
2869  * me = Thread.current
2870  * me[:oliver] = "a"
2871  * me.key?(:oliver) #=> true
2872  * me.key?(:stanley) #=> false
2873  */
2874 
2875 static VALUE
2877 {
2878  rb_thread_t *th;
2879  ID id = rb_to_id(key);
2880 
2881  GetThreadPtr(self, th);
2882 
2883  if (!th->local_storage) {
2884  return Qfalse;
2885  }
2886  if (st_lookup(th->local_storage, id, 0)) {
2887  return Qtrue;
2888  }
2889  return Qfalse;
2890 }
2891 
2892 static int
2894 {
2895  rb_ary_push(ary, ID2SYM(key));
2896  return ST_CONTINUE;
2897 }
2898 
2899 static int
2901 {
2902  return (int)vm->living_threads->num_entries;
2903 }
2904 
2905 int
2907 {
2908  int num = 1;
2909  if (GET_THREAD()->vm->living_threads) {
2910  num = vm_living_thread_num(GET_THREAD()->vm);
2911  thread_debug("rb_thread_alone: %d\n", num);
2912  }
2913  return num == 1;
2914 }
2915 
2916 /*
2917  * call-seq:
2918  * thr.keys -> array
2919  *
2920  * Returns an an array of the names of the fiber-local variables (as Symbols).
2921  *
2922  * thr = Thread.new do
2923  * Thread.current[:cat] = 'meow'
2924  * Thread.current["dog"] = 'woof'
2925  * end
2926  * thr.join #=> #<Thread:0x401b3f10 dead>
2927  * thr.keys #=> [:dog, :cat]
2928  */
2929 
2930 static VALUE
2932 {
2933  rb_thread_t *th;
2934  VALUE ary = rb_ary_new();
2935  GetThreadPtr(self, th);
2936 
2937  if (th->local_storage) {
2939  }
2940  return ary;
2941 }
2942 
2943 static int
2945 {
2946  rb_ary_push(ary, key);
2947  return ST_CONTINUE;
2948 }
2949 
2950 /*
2951  * call-seq:
2952  * thr.thread_variables -> array
2953  *
2954  * Returns an an array of the names of the thread-local variables (as Symbols).
2955  *
2956  * thr = Thread.new do
2957  * Thread.current.thread_variable_set(:cat, 'meow')
2958  * Thread.current.thread_variable_set("dog", 'woof')
2959  * end
2960  * thr.join #=> #<Thread:0x401b3f10 dead>
2961  * thr.thread_variables #=> [:dog, :cat]
2962  *
2963  * Note that these are not fiber local variables. Please see Thread#[] and
2964  * Thread#thread_variable_get for more details.
2965  */
2966 
2967 static VALUE
2969 {
2970  VALUE locals;
2971  VALUE ary;
2972 
2973  locals = rb_iv_get(thread, "locals");
2974  ary = rb_ary_new();
2975  rb_hash_foreach(locals, keys_i, ary);
2976 
2977  return ary;
2978 }
2979 
2980 /*
2981  * call-seq:
2982  * thr.thread_variable?(key) -> true or false
2983  *
2984  * Returns <code>true</code> if the given string (or symbol) exists as a
2985  * thread-local variable.
2986  *
2987  * me = Thread.current
2988  * me.thread_variable_set(:oliver, "a")
2989  * me.thread_variable?(:oliver) #=> true
2990  * me.thread_variable?(:stanley) #=> false
2991  *
2992  * Note that these are not fiber local variables. Please see Thread#[] and
2993  * Thread#thread_variable_get for more details.
2994  */
2995 
2996 static VALUE
2998 {
2999  VALUE locals;
3000 
3001  locals = rb_iv_get(thread, "locals");
3002 
3003  if (!RHASH(locals)->ntbl)
3004  return Qfalse;
3005 
3006  if (st_lookup(RHASH(locals)->ntbl, ID2SYM(rb_to_id(key)), 0)) {
3007  return Qtrue;
3008  }
3009 
3010  return Qfalse;
3011 }
3012 
3013 /*
3014  * call-seq:
3015  * thr.priority -> integer
3016  *
3017  * Returns the priority of <i>thr</i>. Default is inherited from the
3018  * current thread which creating the new thread, or zero for the
3019  * initial main thread; higher-priority thread will run more frequently
3020  * than lower-priority threads (but lower-priority threads can also run).
3021  *
3022  * This is just hint for Ruby thread scheduler. It may be ignored on some
3023  * platform.
3024  *
3025  * Thread.current.priority #=> 0
3026  */
3027 
3028 static VALUE
3030 {
3031  rb_thread_t *th;
3032  GetThreadPtr(thread, th);
3033  return INT2NUM(th->priority);
3034 }
3035 
3036 
3037 /*
3038  * call-seq:
3039  * thr.priority= integer -> thr
3040  *
3041  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3042  * will run more frequently than lower-priority threads (but lower-priority
3043  * threads can also run).
3044  *
3045  * This is just hint for Ruby thread scheduler. It may be ignored on some
3046  * platform.
3047  *
3048  * count1 = count2 = 0
3049  * a = Thread.new do
3050  * loop { count1 += 1 }
3051  * end
3052  * a.priority = -1
3053  *
3054  * b = Thread.new do
3055  * loop { count2 += 1 }
3056  * end
3057  * b.priority = -2
3058  * sleep 1 #=> 1
3059  * count1 #=> 622504
3060  * count2 #=> 5832
3061  */
3062 
3063 static VALUE
3065 {
3066  rb_thread_t *th;
3067  int priority;
3068  GetThreadPtr(thread, th);
3069 
3070  rb_secure(4);
3071 
3072 #if USE_NATIVE_THREAD_PRIORITY
3073  th->priority = NUM2INT(prio);
3074  native_thread_apply_priority(th);
3075 #else
3076  priority = NUM2INT(prio);
3077  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3078  priority = RUBY_THREAD_PRIORITY_MAX;
3079  }
3080  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3081  priority = RUBY_THREAD_PRIORITY_MIN;
3082  }
3083  th->priority = priority;
3084 #endif
3085  return INT2NUM(th->priority);
3086 }
3087 
3088 /* for IO */
3089 
3090 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3091 
3092 /*
3093  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3094  * in select(2) system call.
3095  *
3096  * - Linux 2.2.12 (?)
3097  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3098  * select(2) documents how to allocate fd_set dynamically.
3099  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3100  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3101  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3102  * select(2) documents how to allocate fd_set dynamically.
3103  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3104  * - HP-UX documents how to allocate fd_set dynamically.
3105  * http://docs.hp.com/en/B2355-60105/select.2.html
3106  * - Solaris 8 has select_large_fdset
3107  * - Mac OS X 10.7 (Lion)
3108  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3109  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3110  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3111  *
3112  * When fd_set is not big enough to hold big file descriptors,
3113  * it should be allocated dynamically.
3114  * Note that this assumes fd_set is structured as bitmap.
3115  *
3116  * rb_fd_init allocates the memory.
3117  * rb_fd_term free the memory.
3118  * rb_fd_set may re-allocates bitmap.
3119  *
3120  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3121  */
3122 
3123 void
3124 rb_fd_init(rb_fdset_t *fds)
3125 {
3126  fds->maxfd = 0;
3127  fds->fdset = ALLOC(fd_set);
3128  FD_ZERO(fds->fdset);
3129 }
3130 
3131 void
3133 {
3134  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3135 
3136  if (size < sizeof(fd_set))
3137  size = sizeof(fd_set);
3138  dst->maxfd = src->maxfd;
3139  dst->fdset = xmalloc(size);
3140  memcpy(dst->fdset, src->fdset, size);
3141 }
3142 
3143 void
3144 rb_fd_term(rb_fdset_t *fds)
3145 {
3146  if (fds->fdset) xfree(fds->fdset);
3147  fds->maxfd = 0;
3148  fds->fdset = 0;
3149 }
3150 
3151 void
3152 rb_fd_zero(rb_fdset_t *fds)
3153 {
3154  if (fds->fdset)
3155  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3156 }
3157 
3158 static void
3159 rb_fd_resize(int n, rb_fdset_t *fds)
3160 {
3161  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3162  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3163 
3164  if (m < sizeof(fd_set)) m = sizeof(fd_set);
3165  if (o < sizeof(fd_set)) o = sizeof(fd_set);
3166 
3167  if (m > o) {
3168  fds->fdset = xrealloc(fds->fdset, m);
3169  memset((char *)fds->fdset + o, 0, m - o);
3170  }
3171  if (n >= fds->maxfd) fds->maxfd = n + 1;
3172 }
3173 
3174 void
3175 rb_fd_set(int n, rb_fdset_t *fds)
3176 {
3177  rb_fd_resize(n, fds);
3178  FD_SET(n, fds->fdset);
3179 }
3180 
3181 void
3182 rb_fd_clr(int n, rb_fdset_t *fds)
3183 {
3184  if (n >= fds->maxfd) return;
3185  FD_CLR(n, fds->fdset);
3186 }
3187 
3188 int
3189 rb_fd_isset(int n, const rb_fdset_t *fds)
3190 {
3191  if (n >= fds->maxfd) return 0;
3192  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3193 }
3194 
3195 void
3196 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3197 {
3198  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3199 
3200  if (size < sizeof(fd_set)) size = sizeof(fd_set);
3201  dst->maxfd = max;
3202  dst->fdset = xrealloc(dst->fdset, size);
3203  memcpy(dst->fdset, src, size);
3204 }
3205 
3206 static void
3207 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3208 {
3209  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3210 
3211  if (size > sizeof(fd_set)) {
3212  rb_raise(rb_eArgError, "too large fdsets");
3213  }
3214  memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
3215 }
3216 
3217 void
3218 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3219 {
3220  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3221 
3222  if (size < sizeof(fd_set))
3223  size = sizeof(fd_set);
3224  dst->maxfd = src->maxfd;
3225  dst->fdset = xrealloc(dst->fdset, size);
3226  memcpy(dst->fdset, src->fdset, size);
3227 }
3228 
3229 #ifdef __native_client__
3230 int select(int nfds, fd_set *readfds, fd_set *writefds,
3231  fd_set *exceptfds, struct timeval *timeout);
3232 #endif
3233 
3234 int
3235 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3236 {
3237  fd_set *r = NULL, *w = NULL, *e = NULL;
3238  if (readfds) {
3239  rb_fd_resize(n - 1, readfds);
3240  r = rb_fd_ptr(readfds);
3241  }
3242  if (writefds) {
3243  rb_fd_resize(n - 1, writefds);
3244  w = rb_fd_ptr(writefds);
3245  }
3246  if (exceptfds) {
3247  rb_fd_resize(n - 1, exceptfds);
3248  e = rb_fd_ptr(exceptfds);
3249  }
3250  return select(n, r, w, e, timeout);
3251 }
3252 
3253 #undef FD_ZERO
3254 #undef FD_SET
3255 #undef FD_CLR
3256 #undef FD_ISSET
3257 
3258 #define FD_ZERO(f) rb_fd_zero(f)
3259 #define FD_SET(i, f) rb_fd_set((i), (f))
3260 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3261 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3262 
3263 #elif defined(_WIN32)
3264 
3265 void
3266 rb_fd_init(rb_fdset_t *set)
3267 {
3268  set->capa = FD_SETSIZE;
3269  set->fdset = ALLOC(fd_set);
3270  FD_ZERO(set->fdset);
3271 }
3272 
3273 void
3275 {
3276  rb_fd_init(dst);
3277  rb_fd_dup(dst, src);
3278 }
3279 
3280 static void
3281 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3282 {
3283  int max = rb_fd_max(src);
3284 
3285  /* we assume src is the result of select() with dst, so dst should be
3286  * larger or equal than src. */
3287  if (max > FD_SETSIZE || (UINT)max > dst->fd_count) {
3288  rb_raise(rb_eArgError, "too large fdsets");
3289  }
3290 
3291  memcpy(dst->fd_array, src->fdset->fd_array, max);
3292  dst->fd_count = max;
3293 }
3294 
3295 void
3296 rb_fd_term(rb_fdset_t *set)
3297 {
3298  xfree(set->fdset);
3299  set->fdset = NULL;
3300  set->capa = 0;
3301 }
3302 
3303 void
3304 rb_fd_set(int fd, rb_fdset_t *set)
3305 {
3306  unsigned int i;
3307  SOCKET s = rb_w32_get_osfhandle(fd);
3308 
3309  for (i = 0; i < set->fdset->fd_count; i++) {
3310  if (set->fdset->fd_array[i] == s) {
3311  return;
3312  }
3313  }
3314  if (set->fdset->fd_count >= (unsigned)set->capa) {
3315  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3316  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
3317  }
3318  set->fdset->fd_array[set->fdset->fd_count++] = s;
3319 }
3320 
3321 #undef FD_ZERO
3322 #undef FD_SET
3323 #undef FD_CLR
3324 #undef FD_ISSET
3325 
3326 #define FD_ZERO(f) rb_fd_zero(f)
3327 #define FD_SET(i, f) rb_fd_set((i), (f))
3328 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3329 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3330 
3331 #else
3332 #define rb_fd_rcopy(d, s) (*(d) = *(s))
3333 #endif
3334 
3335 static int
3336 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
3337  struct timeval *timeout)
3338 {
3340  int lerrno;
3341  rb_fdset_t UNINITIALIZED_VAR(orig_read);
3342  rb_fdset_t UNINITIALIZED_VAR(orig_write);
3343  rb_fdset_t UNINITIALIZED_VAR(orig_except);
3344  double limit = 0;
3345  struct timeval wait_rest;
3346  rb_thread_t *th = GET_THREAD();
3347 
3348  if (timeout) {
3349  limit = timeofday();
3350  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
3351  wait_rest = *timeout;
3352  timeout = &wait_rest;
3353  }
3354 
3355  if (read)
3356  rb_fd_init_copy(&orig_read, read);
3357  if (write)
3358  rb_fd_init_copy(&orig_write, write);
3359  if (except)
3360  rb_fd_init_copy(&orig_except, except);
3361 
3362  retry:
3363  lerrno = 0;
3364 
3365  BLOCKING_REGION({
3366  result = native_fd_select(n, read, write, except, timeout, th);
3367  if (result < 0) lerrno = errno;
3368  }, ubf_select, th, FALSE);
3369 
3371 
3372  errno = lerrno;
3373 
3374  if (result < 0) {
3375  switch (errno) {
3376  case EINTR:
3377 #ifdef ERESTART
3378  case ERESTART:
3379 #endif
3380  if (read)
3381  rb_fd_dup(read, &orig_read);
3382  if (write)
3383  rb_fd_dup(write, &orig_write);
3384  if (except)
3385  rb_fd_dup(except, &orig_except);
3386 
3387  if (timeout) {
3388  double d = limit - timeofday();
3389 
3390  wait_rest.tv_sec = (time_t)d;
3391  wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
3392  if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
3393  if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
3394  }
3395 
3396  goto retry;
3397  default:
3398  break;
3399  }
3400  }
3401 
3402  if (read)
3403  rb_fd_term(&orig_read);
3404  if (write)
3405  rb_fd_term(&orig_write);
3406  if (except)
3407  rb_fd_term(&orig_except);
3408 
3409  return result;
3410 }
3411 
3412 static void
3413 rb_thread_wait_fd_rw(int fd, int read)
3414 {
3415  int result = 0;
3416  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
3417 
3418  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
3419 
3420  if (fd < 0) {
3421  rb_raise(rb_eIOError, "closed stream");
3422  }
3423 
3424  result = rb_wait_for_single_fd(fd, events, NULL);
3425  if (result < 0) {
3426  rb_sys_fail(0);
3427  }
3428 
3429  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
3430 }
3431 
3432 void
3434 {
3435  rb_thread_wait_fd_rw(fd, 1);
3436 }
3437 
3438 int
3440 {
3441  rb_thread_wait_fd_rw(fd, 0);
3442  return TRUE;
3443 }
3444 
3445 int
3446 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
3447  struct timeval *timeout)
3448 {
3449  rb_fdset_t fdsets[3];
3450  rb_fdset_t *rfds = NULL;
3451  rb_fdset_t *wfds = NULL;
3452  rb_fdset_t *efds = NULL;
3453  int retval;
3454 
3455  if (read) {
3456  rfds = &fdsets[0];
3457  rb_fd_init(rfds);
3458  rb_fd_copy(rfds, read, max);
3459  }
3460  if (write) {
3461  wfds = &fdsets[1];
3462  rb_fd_init(wfds);
3463  rb_fd_copy(wfds, write, max);
3464  }
3465  if (except) {
3466  efds = &fdsets[2];
3467  rb_fd_init(efds);
3468  rb_fd_copy(efds, except, max);
3469  }
3470 
3471  retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
3472 
3473  if (rfds) {
3474  rb_fd_rcopy(read, rfds);
3475  rb_fd_term(rfds);
3476  }
3477  if (wfds) {
3478  rb_fd_rcopy(write, wfds);
3479  rb_fd_term(wfds);
3480  }
3481  if (efds) {
3482  rb_fd_rcopy(except, efds);
3483  rb_fd_term(efds);
3484  }
3485 
3486  return retval;
3487 }
3488 
3489 int
3490 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
3491  struct timeval *timeout)
3492 {
3493  if (!read && !write && !except) {
3494  if (!timeout) {
3496  return 0;
3497  }
3498  rb_thread_wait_for(*timeout);
3499  return 0;
3500  }
3501 
3502  if (read) {
3503  rb_fd_resize(max - 1, read);
3504  }
3505  if (write) {
3506  rb_fd_resize(max - 1, write);
3507  }
3508  if (except) {
3509  rb_fd_resize(max - 1, except);
3510  }
3511  return do_select(max, read, write, except, timeout);
3512 }
3513 
3514 /*
3515  * poll() is supported by many OSes, but so far Linux is the only
3516  * one we know of that supports using poll() in all places select()
3517  * would work.
3518  */
3519 #if defined(HAVE_POLL) && defined(__linux__)
3520 # define USE_POLL
3521 #endif
3522 
3523 #ifdef USE_POLL
3524 
3525 /* The same with linux kernel. TODO: make platform independent definition. */
3526 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
3527 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
3528 #define POLLEX_SET (POLLPRI)
3529 
3530 #ifndef HAVE_PPOLL
3531 /* TODO: don't ignore sigmask */
3532 int
3533 ppoll(struct pollfd *fds, nfds_t nfds,
3534  const struct timespec *ts, const sigset_t *sigmask)
3535 {
3536  int timeout_ms;
3537 
3538  if (ts) {
3539  int tmp, tmp2;
3540 
3541  if (ts->tv_sec > TIMET_MAX/1000)
3542  timeout_ms = -1;
3543  else {
3544  tmp = ts->tv_sec * 1000;
3545  tmp2 = ts->tv_nsec / (1000 * 1000);
3546  if (TIMET_MAX - tmp < tmp2)
3547  timeout_ms = -1;
3548  else
3549  timeout_ms = tmp + tmp2;
3550  }
3551  }
3552  else
3553  timeout_ms = -1;
3554 
3555  return poll(fds, nfds, timeout_ms);
3556 }
3557 #endif
3558 
3559 /*
3560  * returns a mask of events
3561  */
3562 int
3563 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3564 {
3565  struct pollfd fds;
3566  int result = 0, lerrno;
3567  double limit = 0;
3568  struct timespec ts;
3569  struct timespec *timeout = NULL;
3570  rb_thread_t *th = GET_THREAD();
3571 
3572  if (tv) {
3573  ts.tv_sec = tv->tv_sec;
3574  ts.tv_nsec = tv->tv_usec * 1000;
3575  limit = timeofday();
3576  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
3577  timeout = &ts;
3578  }
3579 
3580  fds.fd = fd;
3581  fds.events = (short)events;
3582 
3583 retry:
3584  lerrno = 0;
3585  BLOCKING_REGION({
3586  result = ppoll(&fds, 1, timeout, NULL);
3587  if (result < 0) lerrno = errno;
3588  }, ubf_select, th, FALSE);
3589 
3591 
3592  if (result < 0) {
3593  errno = lerrno;
3594  switch (errno) {
3595  case EINTR:
3596 #ifdef ERESTART
3597  case ERESTART:
3598 #endif
3599  if (timeout) {
3600  double d = limit - timeofday();
3601 
3602  ts.tv_sec = (long)d;
3603  ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
3604  if (ts.tv_sec < 0)
3605  ts.tv_sec = 0;
3606  if (ts.tv_nsec < 0)
3607  ts.tv_nsec = 0;
3608  }
3609  goto retry;
3610  }
3611  return -1;
3612  }
3613 
3614  if (fds.revents & POLLNVAL) {
3615  errno = EBADF;
3616  return -1;
3617  }
3618 
3619  /*
3620  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
3621  * Therefore we need fix it up.
3622  */
3623  result = 0;
3624  if (fds.revents & POLLIN_SET)
3625  result |= RB_WAITFD_IN;
3626  if (fds.revents & POLLOUT_SET)
3627  result |= RB_WAITFD_OUT;
3628  if (fds.revents & POLLEX_SET)
3629  result |= RB_WAITFD_PRI;
3630 
3631  return result;
3632 }
3633 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
3634 static rb_fdset_t *
3636 {
3637  rb_fd_init(fds);
3638  rb_fd_set(fd, fds);
3639 
3640  return fds;
3641 }
3642 
3643 struct select_args {
3644  union {
3645  int fd;
3646  int error;
3647  } as;
3651  struct timeval *tv;
3652 };
3653 
3654 static VALUE
3656 {
3657  struct select_args *args = (struct select_args *)ptr;
3658  int r;
3659 
3660  r = rb_thread_fd_select(args->as.fd + 1,
3661  args->read, args->write, args->except, args->tv);
3662  if (r == -1)
3663  args->as.error = errno;
3664  if (r > 0) {
3665  r = 0;
3666  if (args->read && rb_fd_isset(args->as.fd, args->read))
3667  r |= RB_WAITFD_IN;
3668  if (args->write && rb_fd_isset(args->as.fd, args->write))
3669  r |= RB_WAITFD_OUT;
3670  if (args->except && rb_fd_isset(args->as.fd, args->except))
3671  r |= RB_WAITFD_PRI;
3672  }
3673  return (VALUE)r;
3674 }
3675 
3676 static VALUE
3678 {
3679  struct select_args *args = (struct select_args *)ptr;
3680 
3681  if (args->read) rb_fd_term(args->read);
3682  if (args->write) rb_fd_term(args->write);
3683  if (args->except) rb_fd_term(args->except);
3684 
3685  return (VALUE)-1;
3686 }
3687 
3688 int
3689 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3690 {
3691  rb_fdset_t rfds, wfds, efds;
3692  struct select_args args;
3693  int r;
3694  VALUE ptr = (VALUE)&args;
3695 
3696  args.as.fd = fd;
3697  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
3698  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
3699  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
3700  args.tv = tv;
3701 
3702  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
3703  if (r == -1)
3704  errno = args.as.error;
3705 
3706  return r;
3707 }
3708 #endif /* ! USE_POLL */
3709 
3710 /*
3711  * for GC
3712  */
3713 
3714 #ifdef USE_CONSERVATIVE_STACK_END
3715 void
3717 {
3718  VALUE stack_end;
3719  *stack_end_p = &stack_end;
3720 }
3721 #endif
3722 
3723 
3724 /*
3725  *
3726  */
3727 
3728 void
3730 {
3731  /* mth must be main_thread */
3732  if (rb_signal_buff_size() > 0) {
3733  /* wakeup main thread */
3735  }
3736 }
3737 
3738 static void
3740 {
3741  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
3742 
3743  /*
3744  * Tricky: thread_destruct_lock doesn't close a race against
3745  * vm->running_thread switch. however it guarantee th->running_thread
3746  * point to valid pointer or NULL.
3747  */
3748  native_mutex_lock(&vm->thread_destruct_lock);
3749  /* for time slice */
3750  if (vm->running_thread)
3752  native_mutex_unlock(&vm->thread_destruct_lock);
3753 
3754  /* check signal */
3756 
3757 #if 0
3758  /* prove profiler */
3759  if (vm->prove_profile.enable) {
3760  rb_thread_t *th = vm->running_thread;
3761 
3762  if (vm->during_gc) {
3763  /* GC prove profiling */
3764  }
3765  }
3766 #endif
3767 }
3768 
3769 void
3771 {
3772  if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3773  native_reset_timer_thread();
3774  }
3775 }
3776 
3777 void
3779 {
3780  native_reset_timer_thread();
3781 }
3782 
3783 void
3785 {
3786  system_working = 1;
3787  rb_thread_create_timer_thread();
3788 }
3789 
3790 static int
3792 {
3793  int i;
3794  VALUE lines = (VALUE)val;
3795 
3796  for (i = 0; i < RARRAY_LEN(lines); i++) {
3797  if (RARRAY_PTR(lines)[i] != Qnil) {
3798  RARRAY_PTR(lines)[i] = INT2FIX(0);
3799  }
3800  }
3801  return ST_CONTINUE;
3802 }
3803 
3804 static void
3806 {
3807  VALUE coverages = rb_get_coverages();
3808  if (RTEST(coverages)) {
3809  st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
3810  }
3811 }
3812 
3813 static void
3815 {
3816  rb_thread_t *th = GET_THREAD();
3817  rb_vm_t *vm = th->vm;
3818  VALUE thval = th->self;
3819  vm->main_thread = th;
3820 
3821  gvl_atfork(th->vm);
3822  st_foreach(vm->living_threads, atfork, (st_data_t)th);
3823  st_clear(vm->living_threads);
3824  st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
3825  vm->sleeper = 0;
3826  clear_coverage();
3827 }
3828 
3829 static int
3831 {
3832  VALUE thval = key;
3833  rb_thread_t *th;
3834  GetThreadPtr(thval, th);
3835 
3836  if (th != (rb_thread_t *)current_th) {
3840  }
3841  return ST_CONTINUE;
3842 }
3843 
3844 void
3846 {
3848  GET_THREAD()->join_list = NULL;
3849 
3850  /* We don't want reproduce CVE-2003-0900. */
3852 }
3853 
3854 static int
3856 {
3857  VALUE thval = key;
3858  rb_thread_t *th;
3859  GetThreadPtr(thval, th);
3860 
3861  if (th != (rb_thread_t *)current_th) {
3863  }
3864  return ST_CONTINUE;
3865 }
3866 
3867 void
3869 {
3871 }
3872 
3873 struct thgroup {
3876 };
3877 
3878 static size_t
3879 thgroup_memsize(const void *ptr)
3880 {
3881  return ptr ? sizeof(struct thgroup) : 0;
3882 }
3883 
3885  "thgroup",
3887 };
3888 
3889 /*
3890  * Document-class: ThreadGroup
3891  *
3892  * <code>ThreadGroup</code> provides a means of keeping track of a number of
3893  * threads as a group. A <code>Thread</code> can belong to only one
3894  * <code>ThreadGroup</code> at a time; adding a thread to a new group will
3895  * remove it from any previous group.
3896  *
3897  * Newly created threads belong to the same group as the thread from which they
3898  * were created.
3899  */
3900 
3901 /*
3902  * Document-const: Default
3903  *
3904  * The default ThreadGroup created when Ruby starts; all Threads belong to it
3905  * by default.
3906  */
3907 static VALUE
3909 {
3910  VALUE group;
3911  struct thgroup *data;
3912 
3913  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
3914  data->enclosed = 0;
3915  data->group = group;
3916 
3917  return group;
3918 }
3919 
3923 };
3924 
3925 static int
3927 {
3928  VALUE thread = (VALUE)key;
3929  VALUE ary = ((struct thgroup_list_params *)data)->ary;
3930  VALUE group = ((struct thgroup_list_params *)data)->group;
3931  rb_thread_t *th;
3932  GetThreadPtr(thread, th);
3933 
3934  if (th->thgroup == group) {
3935  rb_ary_push(ary, thread);
3936  }
3937  return ST_CONTINUE;
3938 }
3939 
3940 /*
3941  * call-seq:
3942  * thgrp.list -> array
3943  *
3944  * Returns an array of all existing <code>Thread</code> objects that belong to
3945  * this group.
3946  *
3947  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
3948  */
3949 
3950 static VALUE
3952 {
3953  VALUE ary = rb_ary_new();
3954  struct thgroup_list_params param;
3955 
3956  param.ary = ary;
3957  param.group = group;
3958  st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
3959  return ary;
3960 }
3961 
3962 
3963 /*
3964  * call-seq:
3965  * thgrp.enclose -> thgrp
3966  *
3967  * Prevents threads from being added to or removed from the receiving
3968  * <code>ThreadGroup</code>. New threads can still be started in an enclosed
3969  * <code>ThreadGroup</code>.
3970  *
3971  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
3972  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
3973  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
3974  * tg.add thr
3975  *
3976  * <em>produces:</em>
3977  *
3978  * ThreadError: can't move from the enclosed thread group
3979  */
3980 
3981 static VALUE
3983 {
3984  struct thgroup *data;
3985 
3986  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3987  data->enclosed = 1;
3988 
3989  return group;
3990 }
3991 
3992 
3993 /*
3994  * call-seq:
3995  * thgrp.enclosed? -> true or false
3996  *
3997  * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
3998  * ThreadGroup#enclose.
3999  */
4000 
4001 static VALUE
4003 {
4004  struct thgroup *data;
4005 
4006  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4007  if (data->enclosed)
4008  return Qtrue;
4009  return Qfalse;
4010 }
4011 
4012 
4013 /*
4014  * call-seq:
4015  * thgrp.add(thread) -> thgrp
4016  *
4017  * Adds the given <em>thread</em> to this group, removing it from any other
4018  * group to which it may have previously belonged.
4019  *
4020  * puts "Initial group is #{ThreadGroup::Default.list}"
4021  * tg = ThreadGroup.new
4022  * t1 = Thread.new { sleep }
4023  * t2 = Thread.new { sleep }
4024  * puts "t1 is #{t1}"
4025  * puts "t2 is #{t2}"
4026  * tg.add(t1)
4027  * puts "Initial group now #{ThreadGroup::Default.list}"
4028  * puts "tg group now #{tg.list}"
4029  *
4030  * <em>produces:</em>
4031  *
4032  * Initial group is #<Thread:0x401bdf4c>
4033  * t1 is #<Thread:0x401b3c90>
4034  * t2 is #<Thread:0x401b3c18>
4035  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4036  * tg group now #<Thread:0x401b3c90>
4037  */
4038 
4039 static VALUE
4041 {
4042  rb_thread_t *th;
4043  struct thgroup *data;
4044 
4045  rb_secure(4);
4046  GetThreadPtr(thread, th);
4047 
4048  if (OBJ_FROZEN(group)) {
4049  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4050  }
4051  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4052  if (data->enclosed) {
4053  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4054  }
4055 
4056  if (!th->thgroup) {
4057  return Qnil;
4058  }
4059 
4060  if (OBJ_FROZEN(th->thgroup)) {
4061  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4062  }
4064  if (data->enclosed) {
4066  "can't move from the enclosed thread group");
4067  }
4068 
4069  th->thgroup = group;
4070  return group;
4071 }
4072 
4073 
4074 /*
4075  * Document-class: Mutex
4076  *
4077  * Mutex implements a simple semaphore that can be used to coordinate access to
4078  * shared data from multiple concurrent threads.
4079  *
4080  * Example:
4081  *
4082  * require 'thread'
4083  * semaphore = Mutex.new
4084  *
4085  * a = Thread.new {
4086  * semaphore.synchronize {
4087  * # access shared resource
4088  * }
4089  * }
4090  *
4091  * b = Thread.new {
4092  * semaphore.synchronize {
4093  * # access shared resource
4094  * }
4095  * }
4096  *
4097  */
4098 
4099 #define GetMutexPtr(obj, tobj) \
4100  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
4101 
4102 #define mutex_mark NULL
4103 
4104 static void
4105 mutex_free(void *ptr)
4106 {
4107  if (ptr) {
4108  rb_mutex_t *mutex = ptr;
4109  if (mutex->th) {
4110  /* rb_warn("free locked mutex"); */
4111  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
4112  if (err) rb_bug("%s", err);
4113  }
4114  native_mutex_destroy(&mutex->lock);
4115  native_cond_destroy(&mutex->cond);
4116  }
4117  ruby_xfree(ptr);
4118 }
4119 
4120 static size_t
4121 mutex_memsize(const void *ptr)
4122 {
4123  return ptr ? sizeof(rb_mutex_t) : 0;
4124 }
4125 
4127  "mutex",
4129 };
4130 
4131 VALUE
4133 {
4134  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
4135  return Qtrue;
4136  }
4137  else {
4138  return Qfalse;
4139  }
4140 }
4141 
4142 static VALUE
4144 {
4145  VALUE volatile obj;
4146  rb_mutex_t *mutex;
4147 
4148  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
4149  native_mutex_initialize(&mutex->lock);
4150  native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
4151  return obj;
4152 }
4153 
4154 /*
4155  * call-seq:
4156  * Mutex.new -> mutex
4157  *
4158  * Creates a new Mutex
4159  */
4160 static VALUE
4162 {
4163  return self;
4164 }
4165 
4166 VALUE
4168 {
4169  return mutex_alloc(rb_cMutex);
4170 }
4171 
4172 /*
4173  * call-seq:
4174  * mutex.locked? -> true or false
4175  *
4176  * Returns +true+ if this lock is currently held by some thread.
4177  */
4178 VALUE
4180 {
4181  rb_mutex_t *mutex;
4182  GetMutexPtr(self, mutex);
4183  return mutex->th ? Qtrue : Qfalse;
4184 }
4185 
4186 static void
4188 {
4189  rb_mutex_t *mutex;
4190  GetMutexPtr(self, mutex);
4191 
4192  if (th->keeping_mutexes) {
4193  mutex->next_mutex = th->keeping_mutexes;
4194  }
4195  th->keeping_mutexes = mutex;
4196 }
4197 
4198 /*
4199  * call-seq:
4200  * mutex.try_lock -> true or false
4201  *
4202  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
4203  * lock was granted.
4204  */
4205 VALUE
4207 {
4208  rb_mutex_t *mutex;
4209  VALUE locked = Qfalse;
4210  GetMutexPtr(self, mutex);
4211 
4212  native_mutex_lock(&mutex->lock);
4213  if (mutex->th == 0) {
4214  mutex->th = GET_THREAD();
4215  locked = Qtrue;
4216 
4217  mutex_locked(GET_THREAD(), self);
4218  }
4219  native_mutex_unlock(&mutex->lock);
4220 
4221  return locked;
4222 }
4223 
4224 static int
4225 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
4226 {
4227  int interrupted = 0;
4228  int err = 0;
4229 
4230  mutex->cond_waiting++;
4231  for (;;) {
4232  if (!mutex->th) {
4233  mutex->th = th;
4234  break;
4235  }
4236  if (RUBY_VM_INTERRUPTED(th)) {
4237  interrupted = 1;
4238  break;
4239  }
4240  if (err == ETIMEDOUT) {
4241  interrupted = 2;
4242  break;
4243  }
4244 
4245  if (timeout_ms) {
4246  struct timespec timeout_rel;
4247  struct timespec timeout;
4248 
4249  timeout_rel.tv_sec = 0;
4250  timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
4251  timeout = native_cond_timeout(&mutex->cond, timeout_rel);
4252  err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
4253  }
4254  else {
4255  native_cond_wait(&mutex->cond, &mutex->lock);
4256  err = 0;
4257  }
4258  }
4259  mutex->cond_waiting--;
4260 
4261  return interrupted;
4262 }
4263 
4264 static void
4265 lock_interrupt(void *ptr)
4266 {
4267  rb_mutex_t *mutex = (rb_mutex_t *)ptr;
4268  native_mutex_lock(&mutex->lock);
4269  if (mutex->cond_waiting > 0)
4270  native_cond_broadcast(&mutex->cond);
4271  native_mutex_unlock(&mutex->lock);
4272 }
4273 
4274 /*
4275  * At maximum, only one thread can use cond_timedwait and watch deadlock
4276  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
4277  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
4278  */
4280 
4281 /*
4282  * call-seq:
4283  * mutex.lock -> self
4284  *
4285  * Attempts to grab the lock and waits if it isn't available.
4286  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
4287  */
4288 VALUE
4290 {
4291  rb_thread_t *th = GET_THREAD();
4292  rb_mutex_t *mutex;
4293  GetMutexPtr(self, mutex);
4294 
4295  /* When running trap handler */
4296  if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) {
4297  rb_raise(rb_eThreadError, "can't be called from trap context");
4298  }
4299 
4300  if (rb_mutex_trylock(self) == Qfalse) {
4301  if (mutex->th == GET_THREAD()) {
4302  rb_raise(rb_eThreadError, "deadlock; recursive locking");
4303  }
4304 
4305  while (mutex->th != th) {
4306  int interrupted;
4307  enum rb_thread_status prev_status = th->status;
4308  volatile int timeout_ms = 0;
4309  struct rb_unblock_callback oldubf;
4310 
4311  set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
4313  th->locking_mutex = self;
4314 
4315  native_mutex_lock(&mutex->lock);
4316  th->vm->sleeper++;
4317  /*
4318  * Carefully! while some contended threads are in lock_func(),
4319  * vm->sleepr is unstable value. we have to avoid both deadlock
4320  * and busy loop.
4321  */
4322  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
4323  !patrol_thread) {
4324  timeout_ms = 100;
4325  patrol_thread = th;
4326  }
4327 
4328  GVL_UNLOCK_BEGIN();
4329  interrupted = lock_func(th, mutex, (int)timeout_ms);
4330  native_mutex_unlock(&mutex->lock);
4331  GVL_UNLOCK_END();
4332 
4333  if (patrol_thread == th)
4334  patrol_thread = NULL;
4335 
4336  reset_unblock_function(th, &oldubf);
4337 
4338  th->locking_mutex = Qfalse;
4339  if (mutex->th && interrupted == 2) {
4340  rb_check_deadlock(th->vm);
4341  }
4342  if (th->status == THREAD_STOPPED_FOREVER) {
4343  th->status = prev_status;
4344  }
4345  th->vm->sleeper--;
4346 
4347  if (mutex->th == th) mutex_locked(th, self);
4348 
4349  if (interrupted) {
4351  }
4352  }
4353  }
4354  return self;
4355 }
4356 
4357 /*
4358  * call-seq:
4359  * mutex.owned? -> true or false
4360  *
4361  * Returns +true+ if this lock is currently held by current thread.
4362  * <em>This API is experimental, and subject to change.</em>
4363  */
4364 VALUE
4366 {
4367  VALUE owned = Qfalse;
4368  rb_thread_t *th = GET_THREAD();
4369  rb_mutex_t *mutex;
4370 
4371  GetMutexPtr(self, mutex);
4372 
4373  if (mutex->th == th)
4374  owned = Qtrue;
4375 
4376  return owned;
4377 }
4378 
4379 static const char *
4381 {
4382  const char *err = NULL;
4383 
4384  native_mutex_lock(&mutex->lock);
4385 
4386  if (mutex->th == 0) {
4387  err = "Attempt to unlock a mutex which is not locked";
4388  }
4389  else if (mutex->th != th) {
4390  err = "Attempt to unlock a mutex which is locked by another thread";
4391  }
4392  else {
4393  mutex->th = 0;
4394  if (mutex->cond_waiting > 0)
4395  native_cond_signal(&mutex->cond);
4396  }
4397 
4398  native_mutex_unlock(&mutex->lock);
4399 
4400  if (!err) {
4401  rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
4402  while (*th_mutex != mutex) {
4403  th_mutex = &(*th_mutex)->next_mutex;
4404  }
4405  *th_mutex = mutex->next_mutex;
4406  mutex->next_mutex = NULL;
4407  }
4408 
4409  return err;
4410 }
4411 
4412 /*
4413  * call-seq:
4414  * mutex.unlock -> self
4415  *
4416  * Releases the lock.
4417  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
4418  */
4419 VALUE
4421 {
4422  const char *err;
4423  rb_mutex_t *mutex;
4424  GetMutexPtr(self, mutex);
4425 
4426  err = rb_mutex_unlock_th(mutex, GET_THREAD());
4427  if (err) rb_raise(rb_eThreadError, "%s", err);
4428 
4429  return self;
4430 }
4431 
4432 static void
4434 {
4435  if (th->keeping_mutexes) {
4437  }
4438  th->keeping_mutexes = NULL;
4439 }
4440 
4441 static void
4443 {
4444  rb_mutex_t *mutex;
4445 
4446  if (!th->locking_mutex) return;
4447 
4448  GetMutexPtr(th->locking_mutex, mutex);
4449  if (mutex->th == th)
4450  rb_mutex_abandon_all(mutex);
4451  th->locking_mutex = Qfalse;
4452 }
4453 
4454 static void
4456 {
4457  rb_mutex_t *mutex;
4458 
4459  while (mutexes) {
4460  mutex = mutexes;
4461  mutexes = mutex->next_mutex;
4462  mutex->th = 0;
4463  mutex->next_mutex = 0;
4464  }
4465 }
4466 
4467 static VALUE
4469 {
4470  sleep_forever(GET_THREAD(), 1, 0); /* permit spurious check */
4471  return Qnil;
4472 }
4473 
4474 static VALUE
4476 {
4477  struct timeval *t = (struct timeval *)time;
4478  sleep_timeval(GET_THREAD(), *t, 0); /* permit spurious check */
4479  return Qnil;
4480 }
4481 
4482 VALUE
4484 {
4485  time_t beg, end;
4486  struct timeval t;
4487 
4488  if (!NIL_P(timeout)) {
4489  t = rb_time_interval(timeout);
4490  }
4491  rb_mutex_unlock(self);
4492  beg = time(0);
4493  if (NIL_P(timeout)) {
4495  }
4496  else {
4498  }
4499  end = time(0) - beg;
4500  return INT2FIX(end);
4501 }
4502 
4503 /*
4504  * call-seq:
4505  * mutex.sleep(timeout = nil) -> number
4506  *
4507  * Releases the lock and sleeps +timeout+ seconds if it is given and
4508  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
4509  * the current thread.
4510  *
4511  * Note that this method can wakeup without explicit Thread#wakeup call.
4512  * For example, receiving signal and so on.
4513  */
4514 static VALUE
4516 {
4517  VALUE timeout;
4518 
4519  rb_scan_args(argc, argv, "01", &timeout);
4520  return rb_mutex_sleep(self, timeout);
4521 }
4522 
4523 /*
4524  * call-seq:
4525  * mutex.synchronize { ... } -> result of the block
4526  *
4527  * Obtains a lock, runs the block, and releases the lock when the block
4528  * completes. See the example under +Mutex+.
4529  */
4530 
4531 VALUE
4533 {
4534  rb_mutex_lock(mutex);
4535  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
4536 }
4537 
4538 /*
4539  * call-seq:
4540  * mutex.synchronize { ... } -> result of the block
4541  *
4542  * Obtains a lock, runs the block, and releases the lock when the block
4543  * completes. See the example under +Mutex+.
4544  */
4545 static VALUE
4547 {
4548  if (!rb_block_given_p()) {
4549  rb_raise(rb_eThreadError, "must be called with a block");
4550  }
4551 
4552  return rb_mutex_synchronize(self, rb_yield, Qundef);
4553 }
4554 
4555 void rb_mutex_allow_trap(VALUE self, int val)
4556 {
4557  rb_mutex_t *m;
4558  GetMutexPtr(self, m);
4559 
4560  m->allow_trap = val;
4561 }
4562 
4563 /*
4564  * Document-class: ThreadShield
4565  */
4566 static void
4568 {
4569  rb_gc_mark((VALUE)ptr);
4570 }
4571 
4573  "thread_shield",
4574  {thread_shield_mark, 0, 0,},
4575 };
4576 
4577 static VALUE
4579 {
4580  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4581 }
4582 
4583 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4584 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
4585 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4586 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
4587 
4588 static inline void
4590 {
4591  unsigned int w = rb_thread_shield_waiting(b);
4592  w++;
4594  rb_raise(rb_eRuntimeError, "waiting count overflow");
4595  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4596  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4597 }
4598 
4599 static inline void
4601 {
4602  unsigned int w = rb_thread_shield_waiting(b);
4603  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4604  w--;
4605  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4606  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4607 }
4608 
4609 VALUE
4611 {
4612  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4613  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4614  return thread_shield;
4615 }
4616 
4617 /*
4618  * Wait a thread shield.
4619  *
4620  * Returns
4621  * true: acquired the thread shield
4622  * false: the thread shield was destroyed and no other threads waiting
4623  * nil: the thread shield was destroyed but still in use
4624  */
4625 VALUE
4627 {
4628  VALUE mutex = GetThreadShieldPtr(self);
4629  rb_mutex_t *m;
4630 
4631  if (!mutex) return Qfalse;
4632  GetMutexPtr(mutex, m);
4633  if (m->th == GET_THREAD()) return Qnil;
4635  rb_mutex_lock(mutex);
4637  if (DATA_PTR(self)) return Qtrue;
4638  rb_mutex_unlock(mutex);
4639  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4640 }
4641 
4642 /*
4643  * Release a thread shield, and return true if it has waiting threads.
4644  */
4645 VALUE
4647 {
4648  VALUE mutex = GetThreadShieldPtr(self);
4649  rb_mutex_unlock(mutex);
4650  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4651 }
4652 
4653 /*
4654  * Release and destroy a thread shield, and return true if it has waiting threads.
4655  */
4656 VALUE
4658 {
4659  VALUE mutex = GetThreadShieldPtr(self);
4660  DATA_PTR(self) = 0;
4661  rb_mutex_unlock(mutex);
4662  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4663 }
4664 
4665 /* variables for recursive traversals */
4667 
4668 /*
4669  * Returns the current "recursive list" used to detect recursion.
4670  * This list is a hash table, unique for the current thread and for
4671  * the current __callee__.
4672  */
4673 
4674 static VALUE
4676 {
4677  volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
4679  VALUE list;
4680  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4681  hash = rb_hash_new();
4682  OBJ_UNTRUST(hash);
4683  rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
4684  list = Qnil;
4685  }
4686  else {
4687  list = rb_hash_aref(hash, sym);
4688  }
4689  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4690  list = rb_hash_new();
4691  OBJ_UNTRUST(list);
4692  rb_hash_aset(hash, sym, list);
4693  }
4694  return list;
4695 }
4696 
4697 /*
4698  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
4699  * in the recursion list.
4700  * Assumes the recursion list is valid.
4701  */
4702 
4703 static VALUE
4704 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
4705 {
4706 #if SIZEOF_LONG == SIZEOF_VOIDP
4707  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4708 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4709  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4710  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4711 #endif
4712 
4713  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
4714  if (pair_list == Qundef)
4715  return Qfalse;
4716  if (paired_obj_id) {
4717  if (!RB_TYPE_P(pair_list, T_HASH)) {
4718  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4719  return Qfalse;
4720  }
4721  else {
4722  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4723  return Qfalse;
4724  }
4725  }
4726  return Qtrue;
4727 }
4728 
4729 /*
4730  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
4731  * For a single obj_id, it sets list[obj_id] to Qtrue.
4732  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
4733  * otherwise list[obj_id] becomes a hash like:
4734  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4735  * Assumes the recursion list is valid.
4736  */
4737 
4738 static void
4740 {
4741  VALUE pair_list;
4742 
4743  if (!paired_obj) {
4744  rb_hash_aset(list, obj, Qtrue);
4745  }
4746  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4747  rb_hash_aset(list, obj, paired_obj);
4748  }
4749  else {
4750  if (!RB_TYPE_P(pair_list, T_HASH)){
4751  VALUE other_paired_obj = pair_list;
4752  pair_list = rb_hash_new();
4753  OBJ_UNTRUST(pair_list);
4754  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4755  rb_hash_aset(list, obj, pair_list);
4756  }
4757  rb_hash_aset(pair_list, paired_obj, Qtrue);
4758  }
4759 }
4760 
4761 /*
4762  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
4763  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
4764  * removed from the hash and no attempt is made to simplify
4765  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
4766  * Assumes the recursion list is valid.
4767  */
4768 
4769 static void
4771 {
4772  if (paired_obj) {
4773  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4774  if (pair_list == Qundef) {
4775  VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
4776  VALUE thrname = rb_inspect(rb_thread_current());
4777  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
4778  StringValuePtr(symname), StringValuePtr(thrname));
4779  }
4780  if (RB_TYPE_P(pair_list, T_HASH)) {
4781  rb_hash_delete(pair_list, paired_obj);
4782  if (!RHASH_EMPTY_P(pair_list)) {
4783  return; /* keep hash until is empty */
4784  }
4785  }
4786  }
4787  rb_hash_delete(list, obj);
4788 }
4789 
4791  VALUE (*func) (VALUE, VALUE, int);
4797 };
4798 
4799 static VALUE
4801 {
4802  VALUE result = Qundef;
4803  int state;
4804 
4805  recursive_push(p->list, p->objid, p->pairid);
4806  PUSH_TAG();
4807  if ((state = EXEC_TAG()) == 0) {
4808  result = (*p->func)(p->obj, p->arg, FALSE);
4809  }
4810  POP_TAG();
4811  recursive_pop(p->list, p->objid, p->pairid);
4812  if (state)
4813  JUMP_TAG(state);
4814  return result;
4815 }
4816 
4817 /*
4818  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4819  * current method is called recursively on obj, or on the pair <obj, pairid>
4820  * If outer is 0, then the innermost func will be called with recursive set
4821  * to Qtrue, otherwise the outermost func will be called. In the latter case,
4822  * all inner func are short-circuited by throw.
4823  * Implementation details: the value thrown is the recursive list which is
4824  * proper to the current method and unlikely to be catched anywhere else.
4825  * list[recursive_key] is used as a flag for the outermost call.
4826  */
4827 
4828 static VALUE
4829 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
4830 {
4831  VALUE result = Qundef;
4832  struct exec_recursive_params p;
4833  int outermost;
4835  p.objid = rb_obj_id(obj);
4836  p.obj = obj;
4837  p.pairid = pairid;
4838  p.arg = arg;
4839  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4840 
4841  if (recursive_check(p.list, p.objid, pairid)) {
4842  if (outer && !outermost) {
4843  rb_throw_obj(p.list, p.list);
4844  }
4845  return (*func)(obj, arg, TRUE);
4846  }
4847  else {
4848  p.func = func;
4849 
4850  if (outermost) {
4851  recursive_push(p.list, ID2SYM(recursive_key), 0);
4852  result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
4853  recursive_pop(p.list, ID2SYM(recursive_key), 0);
4854  if (result == p.list) {
4855  result = (*func)(obj, arg, TRUE);
4856  }
4857  }
4858  else {
4859  result = exec_recursive_i(0, &p);
4860  }
4861  }
4862  *(volatile struct exec_recursive_params *)&p;
4863  return result;
4864 }
4865 
4866 /*
4867  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4868  * current method is called recursively on obj
4869  */
4870 
4871 VALUE
4873 {
4874  return exec_recursive(func, obj, 0, arg, 0);
4875 }
4876 
4877 /*
4878  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4879  * current method is called recursively on the ordered pair <obj, paired_obj>
4880  */
4881 
4882 VALUE
4884 {
4885  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4886 }
4887 
4888 /*
4889  * If recursion is detected on the current method and obj, the outermost
4890  * func will be called with (obj, arg, Qtrue). All inner func will be
4891  * short-circuited using throw.
4892  */
4893 
4894 VALUE
4896 {
4897  return exec_recursive(func, obj, 0, arg, 1);
4898 }
4899 
4900 /*
4901  * If recursion is detected on the current method, obj and paired_obj,
4902  * the outermost func will be called with (obj, arg, Qtrue). All inner
4903  * func will be short-circuited using throw.
4904  */
4905 
4906 VALUE
4908 {
4909  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
4910 }
4911 
4912 /*
4913  * call-seq:
4914  * thr.backtrace -> array
4915  *
4916  * Returns the current backtrace of the target thread.
4917  *
4918  */
4919 
4920 static VALUE
4922 {
4923  return vm_thread_backtrace(argc, argv, thval);
4924 }
4925 
4926 /* call-seq:
4927  * thr.backtrace_locations(*args) -> array or nil
4928  *
4929  * Returns the execution stack for the target thread---an array containing
4930  * backtrace location objects.
4931  *
4932  * See Thread::Backtrace::Location for more information.
4933  *
4934  * This method behaves similarly to Kernel#caller_locations except it applies
4935  * to a specific thread.
4936  */
4937 static VALUE
4939 {
4940  return vm_thread_backtrace_locations(argc, argv, thval);
4941 }
4942 
4943 /*
4944  * Document-class: ThreadError
4945  *
4946  * Raised when an invalid operation is attempted on a thread.
4947  *
4948  * For example, when no other thread has been started:
4949  *
4950  * Thread.stop
4951  *
4952  * <em>raises the exception:</em>
4953  *
4954  * ThreadError: stopping only thread
4955  */
4956 
4957 /*
4958  * +Thread+ encapsulates the behavior of a thread of
4959  * execution, including the main thread of the Ruby script.
4960  *
4961  * In the descriptions of the methods in this class, the parameter _sym_
4962  * refers to a symbol, which is either a quoted string or a
4963  * +Symbol+ (such as <code>:name</code>).
4964  */
4965 
4966 void
4968 {
4969 #undef rb_intern
4970 #define rb_intern(str) rb_intern_const(str)
4971 
4972  VALUE cThGroup;
4973  rb_thread_t *th = GET_THREAD();
4974 
4975  sym_never = ID2SYM(rb_intern("never"));
4976  sym_immediate = ID2SYM(rb_intern("immediate"));
4977  sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
4978 
4989  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
4991 #if THREAD_DEBUG < 0
4992  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
4993  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
4994 #endif
4997  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
4998 
4999  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5004  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5015  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5016  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5017  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5018  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5021  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5022  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5026  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5027 
5029 
5030  closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
5033 
5034  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5036  rb_define_method(cThGroup, "list", thgroup_list, 0);
5037  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5038  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5039  rb_define_method(cThGroup, "add", thgroup_add, 1);
5040 
5041  {
5042  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
5043  rb_define_const(cThGroup, "Default", th->thgroup);
5044  }
5045 
5046  rb_cMutex = rb_define_class("Mutex", rb_cObject);
5048  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
5050  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
5053  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
5056 
5057  recursive_key = rb_intern("__recursive_key__");
5059 
5060  /* init thread core */
5061  {
5062  /* main thread setting */
5063  {
5064  /* acquire global vm lock */
5065  gvl_init(th->vm);
5066  gvl_acquire(th->vm, th);
5067  native_mutex_initialize(&th->vm->thread_destruct_lock);
5068  native_mutex_initialize(&th->interrupt_lock);
5069 
5073 
5074  th->interrupt_mask = 0;
5075  }
5076  }
5077 
5078  rb_thread_create_timer_thread();
5079 
5080  /* suppress warnings on cygwin, mingw and mswin.*/
5081  (void)native_mutex_trylock;
5082 }
5083 
5084 int
5086 {
5087  rb_thread_t *th = ruby_thread_from_native();
5088 
5089  return th != 0;
5090 }
5091 
5092 static int
5094 {
5095  VALUE thval = key;
5096  rb_thread_t *th;
5097  GetThreadPtr(thval, th);
5098 
5100  *found = 1;
5101  }
5102  else if (th->locking_mutex) {
5103  rb_mutex_t *mutex;
5104  GetMutexPtr(th->locking_mutex, mutex);
5105 
5106  native_mutex_lock(&mutex->lock);
5107  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
5108  *found = 1;
5109  }
5110  native_mutex_unlock(&mutex->lock);
5111  }
5112 
5113  return (*found) ? ST_STOP : ST_CONTINUE;
5114 }
5115 
5116 #ifdef DEBUG_DEADLOCK_CHECK
5117 static int
5118 debug_i(st_data_t key, st_data_t val, int *found)
5119 {
5120  VALUE thval = key;
5121  rb_thread_t *th;
5122  GetThreadPtr(thval, th);
5123 
5124  printf("th:%p %d %d", th, th->status, th->interrupt_flag);
5125  if (th->locking_mutex) {
5126  rb_mutex_t *mutex;
5127  GetMutexPtr(th->locking_mutex, mutex);
5128 
5129  native_mutex_lock(&mutex->lock);
5130  printf(" %p %d\n", mutex->th, mutex->cond_waiting);
5131  native_mutex_unlock(&mutex->lock);
5132  }
5133  else
5134  puts("");
5135 
5136  return ST_CONTINUE;
5137 }
5138 #endif
5139 
5140 static void
5142 {
5143  int found = 0;
5144 
5145  if (vm_living_thread_num(vm) > vm->sleeper) return;
5146  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5147  if (patrol_thread && patrol_thread != GET_THREAD()) return;
5148 
5150 
5151  if (!found) {
5152  VALUE argv[2];
5153  argv[0] = rb_eFatal;
5154  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5155 #ifdef DEBUG_DEADLOCK_CHECK
5156  printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
5157  st_foreach(vm->living_threads, debug_i, (st_data_t)0);
5158 #endif
5159  vm->sleeper--;
5160  rb_threadptr_raise(vm->main_thread, 2, argv);
5161  }
5162 }
5163 
5164 static void
5165 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
5166 {
5168  if (coverage && RBASIC(coverage)->klass == 0) {
5169  long line = rb_sourceline() - 1;
5170  long count;
5171  if (RARRAY_PTR(coverage)[line] == Qnil) {
5172  return;
5173  }
5174  count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
5175  if (POSFIXABLE(count)) {
5176  RARRAY_PTR(coverage)[line] = LONG2FIX(count);
5177  }
5178  }
5179 }
5180 
5181 VALUE
5183 {
5184  return GET_VM()->coverages;
5185 }
5186 
5187 void
5189 {
5190  GET_VM()->coverages = coverages;
5192 }
5193 
5194 void
5196 {
5197  GET_VM()->coverages = Qfalse;
5199 }
5200 
5201 VALUE
5203 {
5204  VALUE interrupt_mask = rb_hash_new();
5205  rb_thread_t *cur_th = GET_THREAD();
5206 
5207  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5208  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5209 
5210  return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5211 }
static int vm_living_thread_num(rb_vm_t *vm)
Definition: thread.c:2900
#define RB_TYPE_P(obj, type)
rb_control_frame_t * cfp
Definition: vm_core.h:500
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:392
rb_thread_list_t * join_list
Definition: vm_core.h:581
static VALUE sym_never
Definition: thread.c:84
#define ALLOC(type)
static VALUE thgroup_enclose(VALUE group)
Definition: thread.c:3982
VALUE rb_eStandardError
Definition: error.c:509
static VALUE rb_thread_variable_p(VALUE thread, VALUE key)
Definition: thread.c:2997
#define eKillSignal
Definition: thread.c:94
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:948
unsigned long running_time_us
Definition: vm_core.h:617
rb_vm_t * vm
Definition: vm_core.h:495
#define rb_fd_init_copy(d, s)
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:536
static VALUE thgroup_add(VALUE group, VALUE thread)
Definition: thread.c:4040
static int check_deadlock_i(st_data_t key, st_data_t val, int *found)
Definition: thread.c:5093
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread.c:4483
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1445
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:879
static VALUE rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
Definition: thread.c:1741
static const rb_thread_t * patrol_thread
Definition: thread.c:4279
struct rb_mutex_struct * next_mutex
Definition: thread.c:382
void ruby_thread_stack_overflow(rb_thread_t *th)
Definition: thread.c:2033
void rb_bug(const char *fmt,...)
Definition: error.c:290
static VALUE rb_thread_priority(VALUE thread)
Definition: thread.c:3029
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4013
#define FALSE
Definition: nkf.h:174
#define rb_hash_lookup
Definition: tcltklib.c:268
#define mutex_mark
Definition: thread.c:4102
RUBY_EXTERN VALUE rb_cModule
Definition: ripper.y:1445
static int lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
Definition: thread.c:4225
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
Definition: thread.c:4380
static void thread_cleanup_func_before_exec(void *th_ptr)
Definition: thread.c:443
static VALUE trap(int sig, sighandler_t func, VALUE command)
Definition: signal.c:900
#define OBJ_INFECT(x, s)
struct rb_thread_struct * running_thread
Definition: vm_core.h:344
int i
Definition: win32ole.c:784
VALUE rb_make_exception(int argc, VALUE *argv)
Definition: eval.c:642
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread.c:4555
struct timeval * tv
Definition: thread.c:3651
unsigned long VALUE
Definition: ripper.y:104
const char * rb_obj_classname(VALUE)
Definition: variable.c:396
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:916
static VALUE rb_thread_abort_exc_set(VALUE thread, VALUE val)
Definition: thread.c:2472
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread.c:4365
st_table * local_storage
Definition: vm_core.h:579
double limit
Definition: thread.c:735
VALUE rb_proc_location(VALUE self)
Definition: proc.c:758
void rb_thread_lock_unlock(rb_thread_lock_t *lock)
Definition: thread.c:281
int pending_interrupt_queue_checked
Definition: vm_core.h:551
VALUE rb_eSignal
Definition: error.c:507
static void rb_mutex_abandon_all(rb_mutex_t *mutexes)
Definition: thread.c:4455
struct rb_blocking_region_buffer * rb_thread_blocking_region_begin(void)
Definition: thread.c:1185
rb_fdset_t * read
Definition: thread.c:3648
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4895
int count
Definition: encoding.c:51
static int max(int a, int b)
Definition: strftime.c:141
int st_lookup(st_table *, st_data_t, st_data_t *)
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4791
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1497
static VALUE thgroup_enclosed_p(VALUE group)
Definition: thread.c:4002
int rb_thread_check_trap_pending(void)
Definition: thread.c:1102
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:1893
VALUE rb_thread_list(void)
Definition: thread.c:2344
static VALUE thread_join_sleep(VALUE arg)
Definition: thread.c:761
VALUE rb_blocking_function_t(void *)
Definition: ripper.y:836
rb_thread_lock_t interrupt_lock
Definition: vm_core.h:556
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4872
pthread_mutex_t rb_thread_lock_t
rb_thread_lock_t thread_destruct_lock
Definition: vm_core.h:341
st_table * st_init_numtable(void)
Definition: st.c:272
static int terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3855
void rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
Definition: thread.c:1194
static VALUE rb_thread_variables(VALUE thread)
Definition: thread.c:2968
struct rb_thread_struct * th
Definition: vm_core.h:489
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:2813
static VALUE recursive_list_access(void)
Definition: thread.c:4675
rb_unblock_function_t * func
Definition: vm_core.h:480
void rb_secure(int)
Definition: safe.c:79
static void update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
Definition: thread.c:5165
static VALUE thread_s_new(int argc, VALUE *argv, VALUE klass)
Definition: thread.c:664
void rb_error_frozen(const char *what)
Definition: error.c:1972
#define T_NODE
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:552
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:929
VALUE rb_mod_ancestors(VALUE mod)
Definition: class.c:920
static VALUE mutex_initialize(VALUE self)
Definition: thread.c:4161
VALUE coverage
Definition: vm_core.h:219
VALUE rb_hash_lookup2(VALUE, VALUE, VALUE)
Definition: hash.c:581
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
Definition: thread.c:4433
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2009
VALUE rb_iv_get(VALUE, const char *)
Definition: variable.c:2584
struct rb_thread_struct volatile * th
Definition: thread.c:380
static struct timeval double2timeval(double d)
Definition: thread.c:920
ID rb_frame_this_func(void)
Definition: eval.c:902
#define RHASH(obj)
#define sysstack_error
Definition: vm_core.h:861
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:958
VALUE rb_eTypeError
Definition: error.c:511
#define OBJ_FREEZE(x)
VALUE rb_thread_stop(void)
Definition: thread.c:2293
#define TH_JUMP_TAG(th, st)
Definition: eval_intern.h:116
static VALUE mutex_alloc(VALUE klass)
Definition: thread.c:4143
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread.c:4532
static const rb_data_type_t mutex_data_type
Definition: thread.c:4126
#define RUBY_UBF_IO
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:822
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1068
st_table * living_threads
Definition: vm_core.h:346
void rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:712
static int handle_interrupt_arg_check_i(VALUE key, VALUE val)
Definition: thread.c:1625
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:465
#define RHASH_TBL(h)
#define RSTRING_PTR(str)
#define CLASS_OF(v)
static VALUE rb_thread_safe_level(VALUE thread)
Definition: thread.c:2633
static VALUE rb_thread_aset(VALUE self, VALUE id, VALUE val)
Definition: thread.c:2783
#define xfree
VALUE rb_thread_current(void)
Definition: thread.c:2352
#define Qnil
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1497
#define OBJ_ID_EQL(obj_id, other)
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1780
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2002
static VALUE rb_mutex_sleep_forever(VALUE time)
Definition: thread.c:4468
static VALUE rb_thread_abort_exc(VALUE thread)
Definition: thread.c:2454
static void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
Definition: thread.c:1171
#define T_HASH
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1326
VALUE rb_obj_id(VALUE)
Definition: gc.c:1690
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3220
static void clear_coverage(void)
Definition: thread.c:3805
int rb_thread_alone(void)
Definition: thread.c:2906
#define POSFIXABLE(f)
#define TH_EXEC_TAG()
Definition: eval_intern.h:111
static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check)
Definition: thread.c:940
RUBY_EXTERN VALUE rb_eIOError
Definition: ripper.y:1476
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:2665
VALUE rb_eSecurityError
Definition: error.c:520
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
Definition: vm_core.h:918
static size_t thgroup_memsize(const void *ptr)
Definition: thread.c:3879
rb_thread_cond_t cond
Definition: thread.c:379
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th)
Definition: thread.c:4442
static VALUE sym_immediate
Definition: thread.c:82
static int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
Definition: thread.c:1153
static void thread_shield_mark(void *ptr)
Definition: thread.c:4567
#define vsnprintf
#define TAG_RAISE
Definition: eval_intern.h:140
#define PUSH_TAG()
Definition: eval_intern.h:108
#define rb_str_new2
VALUE rb_catch_obj(VALUE, VALUE(*)(ANYARGS), VALUE)
long tv_sec
Definition: ossl_asn1.c:17
static size_t mutex_memsize(const void *ptr)
Definition: thread.c:4121
static volatile int system_working
Definition: thread.c:96
static VALUE thread_join(rb_thread_t *target_th, double delay)
Definition: thread.c:787
static VALUE remove_from_join_list(VALUE arg)
Definition: thread.c:740
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2135
VALUE rb_mutex_locked_p(VALUE self)
Definition: thread.c:4179
static int rb_threadptr_dead(rb_thread_t *th)
Definition: thread.c:2527
static VALUE rb_thread_alive_p(VALUE thread)
Definition: thread.c:2585
rb_fdset_t * write
Definition: thread.c:3649
#define ID2SYM(x)
#define ruby_debug
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4883
#define T_OBJECT
#define rb_fd_rcopy(d, s)
Definition: thread.c:3332
static VALUE exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
Definition: thread.c:4800
void rb_thread_start_timer_thread(void)
Definition: thread.c:3784
static rb_fdset_t * init_set_fd(int fd, rb_fdset_t *fds)
Definition: thread.c:3635
#define rb_fd_term(f)
#define rb_fd_max(f)
VALUE rb_cMutex
Definition: thread.c:79
void rb_hash_foreach(VALUE, int(*)(ANYARGS), VALUE)
Definition: hash.c:200
unsigned long rb_event_flag_t
Definition: ripper.y:1603
int allow_trap
Definition: thread.c:383
VALUE rb_hash_delete(VALUE, VALUE)
Definition: hash.c:869
#define RB_WAITFD_OUT
Definition: io.h:49
VALUE thgroup_default
Definition: vm_core.h:347
time_t tv_sec
Definition: ripper.y:47
#define sym(x)
Definition: date_core.c:3715
static VALUE rb_thread_stop_p(VALUE thread)
Definition: thread.c:2608
static void thread_cleanup_func(void *th_ptr, int atfork)
Definition: thread.c:454
static double timeofday(void)
Definition: thread.c:1036
#define TAG_FATAL
Definition: eval_intern.h:142
VALUE vm_thread_backtrace(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:861
int ruby_native_thread_p(void)
Definition: thread.c:5085
static VALUE rb_thread_s_abort_exc_set(VALUE self, VALUE val)
Definition: thread.c:2436
Win32OLEIDispatch * p
Definition: win32ole.c:786
VALUE(* first_func)(ANYARGS)
Definition: vm_core.h:585
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2222
#define MEMZERO(p, type, n)
static VALUE rb_thread_s_main(VALUE klass)
Definition: thread.c:2386
void rb_exc_raise(VALUE mesg)
Definition: eval.c:527
static void rb_thread_wait_fd_rw(int fd, int read)
Definition: thread.c:3413
static VALUE sym_on_blocking
Definition: thread.c:83
int args
Definition: win32ole.c:785
unsigned long st_data_t
Definition: ripper.y:35
VALUE * stack
Definition: vm_core.h:498
static void rb_thread_schedule_limits(unsigned long limits_us)
Definition: thread.c:1123
int st_delete(st_table *, st_data_t *, st_data_t *)
int rb_thread_fd_writable(int fd)
Definition: thread.c:3439
static void rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
Definition: thread.c:330
static VALUE thgroup_s_alloc(VALUE klass)
Definition: thread.c:3908
#define RUBY_VM_INTERRUPTED_ANY(th)
Definition: vm_core.h:920
#define TH_POP_TAG()
Definition: eval_intern.h:101
static int thread_list_i(st_data_t key, st_data_t val, void *data)
Definition: thread.c:2304
#define closed_stream_error
Definition: thread.c:98
static const char * thread_status_name(rb_thread_t *th)
Definition: thread.c:2508
rb_thread_t * target
Definition: thread.c:734
VALUE rb_block_proc(void)
Definition: proc.c:479
#define rb_fd_select(n, rfds, wfds, efds, timeout)
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:68
static VALUE rb_thread_priority_set(VALUE thread, VALUE prio)
Definition: thread.c:3064
static int do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3336
#define FIXNUM_P(f)
static void sleep_for_polling(rb_thread_t *th)
Definition: thread.c:1059
#define RUBY_EVENT_COVERAGE
#define TypedData_Get_Struct(obj, type, data_type, sval)
int rb_block_given_p(void)
Definition: eval.c:672
#define EXEC_TAG()
Definition: eval_intern.h:113
#define RARRAY_LEN(a)
VALUE locking_mutex
Definition: vm_core.h:558
#define StringValuePtr(v)
static const rb_data_type_t thread_shield_data_type
Definition: thread.c:4572
#define val
long tv_usec
Definition: ossl_asn1.c:18
VALUE rb_eRuntimeError
Definition: error.c:510
#define Qtrue
void rb_gc_finalize_deferred(void)
Definition: gc.c:1457
rb_thread_lock_t lock
Definition: thread.c:378
static VALUE rb_thread_inspect(VALUE thread)
Definition: thread.c:2649
#define RB_WAITFD_PRI
Definition: io.h:48
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *)
Definition: thread.c:1989
fd_set rb_fdset_t
Definition: ripper.y:326
VALUE rb_mutex_trylock(VALUE self)
Definition: thread.c:4206
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:473
VALUE rb_ary_new(void)
Definition: array.c:424
void * blocking_region_buffer
Definition: vm_core.h:536
unsigned long ID
Definition: ripper.y:105
static VALUE exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
Definition: thread.c:4829
static VALUE thread_create_core(VALUE thval, VALUE args, VALUE(*fn)(ANYARGS))
Definition: thread.c:605
void rb_gc_mark(VALUE)
Definition: gc.c:2600
void Init_Thread(void)
Definition: thread.c:4967
#define JUMP_TAG(st)
Definition: eval_intern.h:120
rb_iseq_t * iseq
Definition: vm_core.h:428
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2202
static int rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
Definition: thread.c:1556
long tv_nsec
Definition: ripper.y:48
void rb_thread_stop_timer_thread(int close_anyway)
Definition: thread.c:3770
#define UNLIKELY(x)
Definition: vm_core.h:115
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:499
#define PRIxVALUE
static void rb_threadptr_ready(rb_thread_t *th)
Definition: thread.c:1983
int enclosed
Definition: thread.c:3874
#define INT2FIX(i)
#define Qfalse
#define rb_intern(str)
void rb_thread_atfork_before_exec(void)
Definition: thread.c:3868
#define FIX2LONG(x)
#define ANYARGS
#define thread_debug
Definition: thread.c:211
static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
Definition: thread.c:1550
double rb_num2dbl(VALUE)
Definition: object.c:2723
#define rb_fd_zero(f)
void rb_thread_recycle_stack_release(VALUE *)
Definition: vm.c:1766
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:3729
#define xmalloc
#define xrealloc
int thread_abort_on_exception
Definition: vm_core.h:350
int argc
Definition: ruby.c:130
#define NIL_P(v)
rb_thread_status
Definition: vm_core.h:455
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:476
static VALUE rb_thread_exit(void)
Definition: thread.c:2197
#define TypedData_Wrap_Struct(klass, data_type, sval)
void rb_thread_check_ints(void)
Definition: thread.c:1092
void rb_exit(int status)
Definition: process.c:3530
void rb_thread_fd_close(int fd)
Definition: thread.c:2080
static VALUE coverage(const char *f, int n)
Definition: ripper.c:11866
#define rb_fd_ptr(f)
VALUE rb_thread_shield_new(void)
Definition: thread.c:4610
void rb_unblock_function_t(void *)
Definition: ripper.y:835
volatile int sleeper
Definition: vm_core.h:352
int err
Definition: win32.c:87
arg
Definition: ripper.y:1316
#define EXIT_FAILURE
Definition: eval_intern.h:24
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4646
void rb_thread_atfork(void)
Definition: thread.c:3845
#define POP_TAG()
Definition: eval_intern.h:109
VALUE * machine_stack_start
Definition: vm_core.h:588
#define GVL_UNLOCK_BEGIN()
Definition: thread.c:137
static const rb_data_type_t thgroup_data_type
Definition: thread.c:3884
VALUE rb_thread_create(VALUE(*fn)(ANYARGS), void *arg)
Definition: thread.c:724
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:1722
static VALUE thread_s_current(VALUE klass)
Definition: thread.c:2367
#define FD_SET(fd, set)
Definition: win32.h:594
VALUE rb_cThreadShield
Definition: thread.c:80
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
Definition: thread.c:1053
#define TIMET_MAX
Definition: thread.c:76
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:132
void rb_thread_polling(void)
Definition: thread.c:1075
VALUE read
Definition: io.c:8223
#define rb_fd_set(n, f)
#define GetMutexPtr(obj, tobj)
Definition: thread.c:4099
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
static VALUE rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4938
VALUE rb_yield(VALUE)
Definition: vm_eval.c:933
int rb_thread_select(int max, fd_set *read, fd_set *write, fd_set *except, struct timeval *timeout)
Definition: thread.c:3446
#define RTEST(v)
int st_foreach(st_table *, int(*)(ANYARGS), st_data_t)
Definition: st.c:1000
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1319
struct rb_unblock_callback oldubf
Definition: thread.c:112
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:108
#define rb_thread_set_current(th)
Definition: vm_core.h:896
int errno
#define TRUE
Definition: nkf.h:175
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
Definition: thread.c:5202
static int thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:3926
#define DATA_PTR(dta)
#define EXIT_SUCCESS
Definition: error.c:31
VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:357
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:559
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4626
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1270
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3490
static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, struct rb_unblock_callback *old, int fail_if_interrupted)
Definition: thread.c:293
#define const
Definition: strftime.c:102
static int thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:2065
#define rb_fd_copy(d, s, n)
void ruby_xfree(void *x)
Definition: gc.c:3653
VALUE rb_class_inherited_p(VALUE, VALUE)
Definition: object.c:1480
#define DELAY_INFTY
Definition: thread.c:731
int rb_threadptr_reset_raised(rb_thread_t *th)
Definition: thread.c:2055
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1570
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
Definition: vm_core.h:915
union select_args::@140 as
static VALUE thread_initialize(VALUE thread, VALUE args)
Definition: thread.c:699
handle_interrupt_timing
Definition: thread.c:1503
#define RARRAY_PTR(a)
static void rb_check_deadlock(rb_vm_t *vm)
Definition: thread.c:5141
static VALUE rb_mutex_synchronize_m(VALUE self, VALUE args)
Definition: thread.c:4546
#define GVL_UNLOCK_END()
Definition: thread.c:142
void rb_thread_sleep_forever(void)
Definition: thread.c:1022
VALUE rb_exc_new2(VALUE etype, const char *s)
Definition: error.c:542
#define OBJ_FROZEN(x)
static VALUE thread_shield_alloc(VALUE klass)
Definition: thread.c:4578
VALUE group
Definition: thread.c:3875
VALUE vm_thread_backtrace_locations(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:867
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4584
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:86
static VALUE result
Definition: nkf.c:40
#define RUBY_TYPED_DEFAULT_FREE
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:194
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:3689
static int keys_i(VALUE key, VALUE value, VALUE ary)
Definition: thread.c:2944
#define UNINITIALIZED_VAR(x)
Definition: vm_core.h:121
struct rb_thread_struct * main_thread
Definition: vm_core.h:343
static int clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
Definition: thread.c:3791
int error
Definition: thread.c:3646
static VALUE rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
Definition: thread.c:1877
VALUE first_proc
Definition: vm_core.h:583
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1491
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:3716
static void rb_thread_shield_waiting_dec(VALUE b)
Definition: thread.c:4600
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:94
void rb_thread_schedule(void)
Definition: thread.c:1140
VALUE rb_mutex_new(void)
Definition: thread.c:4167
static VALUE rb_thread_variable_get(VALUE thread, VALUE id)
Definition: thread.c:2819
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
Definition: eval.c:804
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4907
static VALUE thread_value(VALUE self)
Definition: thread.c:907
static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
Definition: thread.c:322
rb_atomic_t interrupt_flag
Definition: vm_core.h:554
RUBY_EXTERN int isinf(double)
Definition: isinf.c:56
static void timer_thread_function(void *)
Definition: thread.c:3739
void rb_thread_wait_fd(int fd)
Definition: thread.c:3433
#define rb_fd_isset(n, f)
int rb_sourceline(void)
Definition: vm.c:816
void rb_sys_fail(const char *mesg)
Definition: error.c:1899
VALUE rb_thread_main(void)
Definition: thread.c:2373
#define rb_fd_resize(n, f)
static VALUE rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4921
int abort_on_exception
Definition: vm_core.h:613
static VALUE rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
Definition: thread.c:1792
ID rb_to_id(VALUE)
Definition: string.c:8155
enum rb_thread_status status
Definition: vm_core.h:531
static void st_delete_wrap(st_table *table, st_data_t key)
Definition: thread.c:101
void rb_thread_sleep(int sec)
Definition: thread.c:1117
static VALUE thread_s_pass(VALUE klass)
Definition: thread.c:1466
static VALUE thread_join_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:879
const char * rb_class2name(VALUE)
Definition: variable.c:389
#define thread_start_func_2(th, st, rst)
Definition: thread.c:215
static void rb_thread_sleep_deadly(void)
Definition: thread.c:1029
enum rb_thread_status prev_status
Definition: thread.c:111
static VALUE mutex_sleep(int argc, VALUE *argv, VALUE self)
Definition: thread.c:4515
VALUE * machine_stack_end
Definition: vm_core.h:589
VALUE first_args
Definition: vm_core.h:584
void rb_thread_terminate_all(void)
Definition: thread.c:409
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2492
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4585
static void rb_threadptr_to_kill(rb_thread_t *th)
Definition: thread.c:1883
int size
Definition: encoding.c:52
void rb_reset_coverages(void)
Definition: thread.c:5195
#define f
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:1975
void rb_thread_lock_destroy(rb_thread_lock_t *lock)
Definition: thread.c:287
static VALUE thgroup_list(VALUE group)
Definition: thread.c:3951
#define Qundef
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Definition: object.c:582
#define RUBY_EVENT_SWITCH
unsigned long interrupt_mask
Definition: vm_core.h:555
#define rb_fd_clr(n, f)
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:69
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2494
struct rb_unblock_callback unblock
Definition: vm_core.h:557
static VALUE rb_thread_aref(VALUE thread, VALUE id)
Definition: thread.c:2744
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:152
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:129
#define TypedData_Make_Struct(klass, type, data_type, sval)
void rb_thread_reset_timer_thread(void)
Definition: thread.c:3778
static VALUE rb_thread_status(VALUE thread)
Definition: thread.c:2556
RUBY_EXTERN VALUE rb_cObject
Definition: ripper.y:1426
st_data_t st_index_t
Definition: ripper.y:63
int rb_signal_buff_size(void)
Definition: signal.c:560
static void rb_thread_shield_waiting_inc(VALUE b)
Definition: thread.c:4589
#define LONG2FIX(i)
uint8_t key[16]
Definition: random.c:1370
#define RBASIC(obj)
#define FD_CLR(f, s)
Definition: win32.h:612
VALUE root_fiber
Definition: vm_core.h:608
rb_thread_t * waiting
Definition: thread.c:734
#define INT2NUM(x)
struct rb_encoding_entry * list
Definition: encoding.c:50
#define ETIMEDOUT
Definition: win32.h:549
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4657
static VALUE rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
Definition: thread.c:1569
static void recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4739
v
Definition: win32ole.c:798
static VALUE thread_start(VALUE klass, VALUE args)
Definition: thread.c:692
static VALUE rb_mutex_wait_for(VALUE time)
Definition: thread.c:4475
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:1778
int st_insert(st_table *, st_data_t, st_data_t)
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:452
static unsigned int hash(const char *str, unsigned int len)
Definition: lex.c:56
int rb_atomic_t
Definition: ruby_atomic.h:120
static VALUE thread_raise_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:2107
#define OBJ_UNTRUST(x)
#define rb_fd_dup(d, s)
#define rb_safe_level()
Definition: tcltklib.c:94
#define rb_thread_shield_waiting(b)
Definition: thread.c:4586
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_)
Definition: vm_core.h:993
static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check)
Definition: thread.c:980
void st_clear(st_table *)
Definition: st.c:308
#define PRIdVALUE
#define NUM2INT(x)
VALUE rb_hash_new(void)
Definition: hash.c:234
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1333
VALUE rb_obj_alloc(VALUE)
Definition: object.c:1721
void rb_threadptr_trap_interrupt(rb_thread_t *th)
Definition: thread.c:353
VALUE rb_eFatal
Definition: error.c:508
int forever
Definition: thread.c:736
#define rb_fd_init(f)
static int terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
Definition: thread.c:359
struct rb_thread_list_struct * next
Definition: vm_core.h:488
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:919
static VALUE rb_thread_s_abort_exc(void)
Definition: thread.c:2405
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:2750
static void recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4770
VALUE rb_hash_aref(VALUE, VALUE)
Definition: hash.c:570
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:273
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
Definition: thread.c:1606
rb_fdset_t * except
Definition: thread.c:3650
static void mutex_locked(rb_thread_t *th, VALUE self)
Definition: thread.c:4187
#define FD_ISSET(f, s)
Definition: win32.h:615
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start))
static VALUE rb_thread_keys(VALUE self)
Definition: thread.c:2931
#define GetThreadShieldPtr(obj)
Definition: thread.c:4583
#define RB_WAITFD_IN
Definition: io.h:47
#define RHASH_EMPTY_P(h)
VALUE pending_interrupt_queue
Definition: vm_core.h:550
#define OBJ_TAINT(x)
VALUE write
Definition: io.c:8223
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1403
static void getclockofday(struct timeval *tp)
Definition: thread.c:964
static VALUE select_single_cleanup(VALUE ptr)
Definition: thread.c:3677
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:907
static VALUE select_single(VALUE ptr)
Definition: thread.c:3655
struct rb_mutex_struct rb_mutex_t
#define eTerminateSignal
Definition: thread.c:95
int cond_waiting
Definition: thread.c:381
VALUE rb_get_coverages(void)
Definition: thread.c:5182
VALUE except
Definition: io.c:8223
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:135
VALUE rb_eSystemExit
Definition: error.c:505
#define NULL
Definition: _sdbm.c:103
int rb_get_next_signal(void)
Definition: signal.c:590
#define RUBY_UBF_PROCESS
static int thread_keys_i(ID key, VALUE value, VALUE ary)
Definition: thread.c:2893
static void * call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
Definition: thread.c:1205
#define GET_THROWOBJ_STATE(obj)
Definition: eval_intern.h:154
static VALUE rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
Definition: thread.c:2844
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:347
st_index_t num_entries
Definition: ripper.y:93
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2231
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1366
void rb_obj_call_init(VALUE obj, int argc, VALUE *argv)
Definition: eval.c:1227
static void mutex_free(void *ptr)
Definition: thread.c:4105
VALUE rb_mutex_unlock(VALUE self)
Definition: thread.c:4420
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:883
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1344
#define GET_THROWOBJ_VAL(obj)
Definition: eval_intern.h:152
void rb_set_coverages(VALUE coverages)
Definition: thread.c:5188
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
VALUE rb_eThreadError
Definition: eval.c:690
static VALUE rb_thread_key_p(VALUE self, VALUE key)
Definition: thread.c:2876
VALUE rb_eArgError
Definition: error.c:512
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
Definition: vm_core.h:937
VALUE rb_convert_type(VALUE, int, const char *, const char *)
Definition: object.c:2381
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread.c:4132
static VALUE rb_thread_s_kill(VALUE obj, VALUE th)
Definition: thread.c:2180
static VALUE recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
Definition: thread.c:4704
RUBY_EXTERN VALUE rb_cThread
Definition: ripper.y:1459
void rb_reset_random_seed(void)
Definition: random.c:1443
rb_thread_id_t thread_id
Definition: vm_core.h:530
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2266
if(c== ')') lex_state
Definition: ripper.y:7588
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2019
static void lock_interrupt(void *ptr)
Definition: thread.c:4265
static void rb_thread_atfork_internal(int(*atfork)(st_data_t, st_data_t, st_data_t))
Definition: thread.c:3814
char ** argv
Definition: ruby.c:131
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1109
struct timeval rb_time_timeval(VALUE)
Definition: time.c:2498
VALUE rb_mutex_lock(VALUE self)
Definition: thread.c:4289
int rb_threadptr_set_raised(rb_thread_t *th)
Definition: thread.c:2045
VALUE rb_inspect(VALUE)
Definition: object.c:402
static enum handle_interrupt_timing rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
Definition: thread.c:1511
static int terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3830
#define GET_VM()
Definition: vm_core.h:876
static ID recursive_key
Definition: thread.c:4666