Ruby  2.4.2p198(2017-09-14revision59899)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: nagachika $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /*
46  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
47  * 2.15 or later and set _FORTIFY_SOURCE > 0.
48  * However, the implementation is wrong. Even though Linux's select(2)
49  * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
50  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
51  * it doesn't work correctly and makes program abort. Therefore we need to
52  * disable FORTIFY_SOURCE until glibc fixes it.
53  */
54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
57 
58 /* for model 2 */
59 
60 #include "eval_intern.h"
61 #include "gc.h"
62 #include "timev.h"
63 #include "ruby/io.h"
64 #include "ruby/thread.h"
65 #include "ruby/thread_native.h"
66 #include "internal.h"
67 
68 #ifndef USE_NATIVE_THREAD_PRIORITY
69 #define USE_NATIVE_THREAD_PRIORITY 0
70 #define RUBY_THREAD_PRIORITY_MAX 3
71 #define RUBY_THREAD_PRIORITY_MIN -3
72 #endif
73 
74 #ifndef THREAD_DEBUG
75 #define THREAD_DEBUG 0
76 #endif
77 
79 
83 static ID id_locals;
84 
85 static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check);
86 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check);
87 static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check);
89 static double timeofday(void);
90 static int rb_threadptr_dead(rb_thread_t *th);
91 static void rb_check_deadlock(rb_vm_t *vm);
93 
94 #define eKillSignal INT2FIX(0)
95 #define eTerminateSignal INT2FIX(1)
96 static volatile int system_working = 1;
97 
98 struct waiting_fd {
99  struct list_node wfd_node; /* <=> vm.waiting_fds */
101  int fd;
102 };
103 
104 inline static void
106 {
107  st_delete(table, &key, 0);
108 }
109 
110 /********************************************************************************/
111 
112 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
113 
115  enum rb_thread_status prev_status;
116  struct rb_unblock_callback oldubf;
117 };
118 
120  struct rb_unblock_callback *old, int fail_if_interrupted);
121 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
122 
123 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
124  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
125 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
126 
127 #ifdef __ia64
128 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \
129  do{(th)->machine.register_stack_end = rb_ia64_bsp();}while(0)
130 #else
131 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
132 #endif
133 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
134  do { \
135  FLUSH_REGISTER_WINDOWS; \
136  RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
137  setjmp((th)->machine.regs); \
138  SET_MACHINE_STACK_END(&(th)->machine.stack_end); \
139  } while (0)
140 
141 #define GVL_UNLOCK_BEGIN() do { \
142  rb_thread_t *_th_stored = GET_THREAD(); \
143  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
144  gvl_release(_th_stored->vm);
145 
146 #define GVL_UNLOCK_END() \
147  gvl_acquire(_th_stored->vm, _th_stored); \
148  rb_thread_set_current(_th_stored); \
149 } while(0)
150 
151 #ifdef __GNUC__
152 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
153 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
154 #else
155 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
156 #endif
157 #else
158 #define only_if_constant(expr, notconst) notconst
159 #endif
160 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
161  rb_thread_t *__th = GET_THREAD(); \
162  struct rb_blocking_region_buffer __region; \
163  if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
164  /* always return true unless fail_if_interrupted */ \
165  !only_if_constant(fail_if_interrupted, TRUE)) { \
166  exec; \
167  blocking_region_end(__th, &__region); \
168  }; \
169 } while(0)
170 
171 #define RUBY_VM_CHECK_INTS_BLOCKING(th) vm_check_ints_blocking(th)
172 static inline void
174 {
176  if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(th))) return;
177  }
178  else {
180 
182  }
184 }
185 
186 static int
188 {
189  return (int)vm->living_thread_num;
190 }
191 
192 #if THREAD_DEBUG
193 #ifdef HAVE_VA_ARGS_MACRO
194 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
195 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
196 #define POSITION_FORMAT "%s:%d:"
197 #define POSITION_ARGS ,file, line
198 #else
199 void rb_thread_debug(const char *fmt, ...);
200 #define thread_debug rb_thread_debug
201 #define POSITION_FORMAT
202 #define POSITION_ARGS
203 #endif
204 
205 # ifdef NON_SCALAR_THREAD_ID
206 static const char *
207 fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_string_t buf)
208 {
209  extern const char ruby_digitmap[];
210  size_t i;
211 
212  buf[0] = '0';
213  buf[1] = 'x';
214  for (i = 0; i < sizeof(thid); i++) {
215 # ifdef LITTLE_ENDIAN
216  size_t j = sizeof(thid) - i - 1;
217 # else
218  size_t j = i;
219 # endif
220  unsigned char c = (unsigned char)((char *)&thid)[j];
221  buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
222  buf[3 + i * 2] = ruby_digitmap[c & 0xf];
223  }
224  buf[sizeof(rb_thread_id_string_t)-1] = '\0';
225  return buf;
226 }
227 # define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
228 # define thread_id_str(th) ((th)->thread_id_string)
229 # define PRI_THREAD_ID "s"
230 # endif
231 
232 # if THREAD_DEBUG < 0
233 static int rb_thread_debug_enabled;
234 
235 /*
236  * call-seq:
237  * Thread.DEBUG -> num
238  *
239  * Returns the thread debug level. Available only if compiled with
240  * THREAD_DEBUG=-1.
241  */
242 
243 static VALUE
244 rb_thread_s_debug(void)
245 {
246  return INT2NUM(rb_thread_debug_enabled);
247 }
248 
249 /*
250  * call-seq:
251  * Thread.DEBUG = num
252  *
253  * Sets the thread debug level. Available only if compiled with
254  * THREAD_DEBUG=-1.
255  */
256 
257 static VALUE
258 rb_thread_s_debug_set(VALUE self, VALUE val)
259 {
260  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
261  return val;
262 }
263 # else
264 # define rb_thread_debug_enabled THREAD_DEBUG
265 # endif
266 #else
267 #define thread_debug if(0)printf
268 #endif
269 
270 #ifndef fill_thread_id_str
271 # define fill_thread_id_string(thid, buf) (thid)
272 # define fill_thread_id_str(th) (void)0
273 # define thread_id_str(th) ((void *)(th)->thread_id)
274 # define PRI_THREAD_ID "p"
275 #endif
276 
277 #ifndef __ia64
278 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
279 #endif
280 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
281  VALUE *register_stack_start));
282 static void timer_thread_function(void *);
283 
284 #if defined(_WIN32)
285 #include "thread_win32.c"
286 
287 #define DEBUG_OUT() \
288  WaitForSingleObject(&debug_mutex, INFINITE); \
289  printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
290  fflush(stdout); \
291  ReleaseMutex(&debug_mutex);
292 
293 #elif defined(HAVE_PTHREAD_H)
294 #include "thread_pthread.c"
295 
296 #define DEBUG_OUT() \
297  pthread_mutex_lock(&debug_mutex); \
298  printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
299  fill_thread_id_string(pthread_self(), thread_id_string), buf); \
300  fflush(stdout); \
301  pthread_mutex_unlock(&debug_mutex);
302 
303 #else
304 #error "unsupported thread type"
305 #endif
306 
307 #if THREAD_DEBUG
308 static int debug_mutex_initialized = 1;
309 static rb_nativethread_lock_t debug_mutex;
310 
311 void
312 rb_thread_debug(
313 #ifdef HAVE_VA_ARGS_MACRO
314  const char *file, int line,
315 #endif
316  const char *fmt, ...)
317 {
318  va_list args;
319  char buf[BUFSIZ];
320 #ifdef NON_SCALAR_THREAD_ID
321  rb_thread_id_string_t thread_id_string;
322 #endif
323 
324  if (!rb_thread_debug_enabled) return;
325 
326  if (debug_mutex_initialized == 1) {
327  debug_mutex_initialized = 0;
328  native_mutex_initialize(&debug_mutex);
329  }
330 
331  va_start(args, fmt);
332  vsnprintf(buf, BUFSIZ, fmt, args);
333  va_end(args);
334 
335  DEBUG_OUT();
336 }
337 #endif
338 
339 #include "thread_sync.c"
340 
341 void
343 {
344  gvl_release(vm);
345  gvl_destroy(vm);
346  native_mutex_destroy(&vm->thread_destruct_lock);
347 }
348 
349 void
350 rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
351 {
352  native_mutex_initialize(lock);
353 }
354 
355 void
356 rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
357 {
358  native_mutex_destroy(lock);
359 }
360 
361 void
362 rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
363 {
364  native_mutex_lock(lock);
365 }
366 
367 void
368 rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
369 {
370  native_mutex_unlock(lock);
371 }
372 
373 static int
375  struct rb_unblock_callback *old, int fail_if_interrupted)
376 {
377  do {
378  if (fail_if_interrupted) {
379  if (RUBY_VM_INTERRUPTED_ANY(th)) {
380  return FALSE;
381  }
382  }
383  else {
384  RUBY_VM_CHECK_INTS(th);
385  }
386 
387  native_mutex_lock(&th->interrupt_lock);
388  } while (RUBY_VM_INTERRUPTED_ANY(th) &&
389  (native_mutex_unlock(&th->interrupt_lock), TRUE));
390 
391  if (old) *old = th->unblock;
392  th->unblock.func = func;
393  th->unblock.arg = arg;
394  native_mutex_unlock(&th->interrupt_lock);
395 
396  return TRUE;
397 }
398 
399 static void
401 {
402  native_mutex_lock(&th->interrupt_lock);
403  th->unblock = *old;
404  native_mutex_unlock(&th->interrupt_lock);
405 }
406 
407 static void
409 {
410  native_mutex_lock(&th->interrupt_lock);
411  if (trap)
413  else
415  if (th->unblock.func) {
416  (th->unblock.func)(th->unblock.arg);
417  }
418  else {
419  /* none */
420  }
421  native_cond_signal(&th->interrupt_cond);
422  native_mutex_unlock(&th->interrupt_lock);
423 }
424 
425 void
427 {
429 }
430 
431 void
433 {
435 }
436 
437 static void
438 terminate_all(rb_vm_t *vm, const rb_thread_t *main_thread)
439 {
440  rb_thread_t *th = 0;
441 
442  list_for_each(&vm->living_threads, th, vmlt_node) {
443  if (th != main_thread) {
444  thread_debug("terminate_i: %p\n", (void *)th);
447  }
448  else {
449  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
450  }
451  }
452 }
453 
454 void
456 {
457  const char *err;
458  rb_mutex_t *mutex;
459  rb_mutex_t *mutexes = th->keeping_mutexes;
460 
461  while (mutexes) {
462  mutex = mutexes;
463  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
464  mutexes); */
465  mutexes = mutex->next_mutex;
466  err = rb_mutex_unlock_th(mutex, th);
467  if (err) rb_bug("invalid keeping_mutexes: %s", err);
468  }
469 }
470 
471 void
473 {
474  rb_thread_t *volatile th = GET_THREAD(); /* main thread */
475  rb_vm_t *volatile vm = th->vm;
476  volatile int sleeping = 0;
477 
478  if (vm->main_thread != th) {
479  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
480  (void *)vm->main_thread, (void *)th);
481  }
482 
483  /* unlock all locking mutexes */
485 
486  TH_PUSH_TAG(th);
487  if (TH_EXEC_TAG() == 0) {
488  retry:
489  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
490  terminate_all(vm, th);
491 
492  while (vm_living_thread_num(vm) > 1) {
493  /*
494  * Thread exiting routine in thread_start_func_2 notify
495  * me when the last sub-thread exit.
496  */
497  sleeping = 1;
498  native_sleep(th, 0);
500  sleeping = 0;
501  }
502  }
503  else {
504  /*
505  * When caught an exception (e.g. Ctrl+C), let's broadcast
506  * kill request again to ensure killing all threads even
507  * if they are blocked on sleep, mutex, etc.
508  */
509  if (sleeping) {
510  sleeping = 0;
511  goto retry;
512  }
513  }
514  TH_POP_TAG();
515 }
516 
517 static void
519 {
520  rb_thread_t *th = th_ptr;
521  th->status = THREAD_KILLED;
522  th->machine.stack_start = th->machine.stack_end = 0;
523 #ifdef __ia64
524  th->machine.register_stack_start = th->machine.register_stack_end = 0;
525 #endif
526 }
527 
528 static void
529 thread_cleanup_func(void *th_ptr, int atfork)
530 {
531  rb_thread_t *th = th_ptr;
532 
533  th->locking_mutex = Qfalse;
535 
536  /*
537  * Unfortunately, we can't release native threading resource at fork
538  * because libc may have unstable locking state therefore touching
539  * a threading resource may cause a deadlock.
540  */
541  if (atfork)
542  return;
543 
544  native_mutex_destroy(&th->interrupt_lock);
545  native_thread_destroy(th);
546 }
547 
548 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
549 static VALUE rb_thread_inspect(VALUE thread);
550 
551 void
553 {
554  native_thread_init_stack(th);
555 }
556 
557 const VALUE *
559 {
560  const VALUE *ep = vm_proc_ep(proc);
561 
562  if (ep) {
563  return rb_vm_ep_local_ep(ep);
564  }
565  else {
566  return NULL;
567  }
568 }
569 
570 static void
572 {
573  native_set_thread_name(th);
574  if (!th->first_func) {
575  rb_proc_t *proc;
576  GetProcPtr(th->first_proc, proc);
577  th->errinfo = Qnil;
579  th->root_svar = Qfalse;
581  th->value = rb_vm_invoke_proc(th, proc,
582  (int)RARRAY_LEN(args), RARRAY_CONST_PTR(args),
584  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
585  }
586  else {
587  th->value = (*th->first_func)((void *)args);
588  }
589 }
590 
591 static int
592 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
593 {
594  int state;
595  VALUE args = th->first_args;
596  rb_thread_list_t *join_list;
597  rb_thread_t *main_th;
598  VALUE errinfo = Qnil;
599 # ifdef USE_SIGALTSTACK
600  void rb_register_sigaltstack(rb_thread_t *th);
601 
602  rb_register_sigaltstack(th);
603 # endif
604 
605  if (th == th->vm->main_thread)
606  rb_bug("thread_start_func_2 must not be used for main thread");
607 
608  ruby_thread_set_native(th);
609 
610  th->machine.stack_start = stack_start;
611 #ifdef __ia64
612  th->machine.register_stack_start = register_stack_start;
613 #endif
614  thread_debug("thread start: %p\n", (void *)th);
615 
616  gvl_acquire(th->vm, th);
617  {
618  thread_debug("thread start (get lock): %p\n", (void *)th);
620 
621  TH_PUSH_TAG(th);
622  if ((state = EXEC_TAG()) == 0) {
623  SAVE_ROOT_JMPBUF(th, thread_do_start(th, args));
624  }
625  else {
626  errinfo = th->errinfo;
627  if (state == TAG_FATAL) {
628  /* fatal error within this thread, need to stop whole script */
629  }
630  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
631  /* exit on main_thread. */
632  }
633  else if (th->vm->thread_abort_on_exception ||
634  th->abort_on_exception || RTEST(ruby_debug)) {
635  /* exit on main_thread */
636  }
637  else if (th->report_on_exception) {
638  VALUE mesg = rb_thread_inspect(th->self);
639  rb_str_cat_cstr(mesg, " terminated with exception:\n");
640  rb_write_error_str(mesg);
641  rb_threadptr_error_print(th, errinfo);
642  errinfo = Qnil;
643  }
644  else {
645  errinfo = Qnil;
646  }
647  th->value = Qnil;
648  }
649 
650  th->status = THREAD_KILLED;
651  thread_debug("thread end: %p\n", (void *)th);
652 
653  main_th = th->vm->main_thread;
654  if (main_th == th) {
655  ruby_stop(0);
656  }
657  if (RB_TYPE_P(errinfo, T_OBJECT)) {
658  /* treat with normal error object */
659  rb_threadptr_raise(main_th, 1, &errinfo);
660  }
661  TH_POP_TAG();
662 
663  /* locking_mutex must be Qfalse */
664  if (th->locking_mutex != Qfalse) {
665  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
666  (void *)th, th->locking_mutex);
667  }
668 
669  /* delete self other than main thread from living_threads */
670  rb_vm_living_threads_remove(th->vm, th);
671  if (main_th->status == THREAD_KILLED && rb_thread_alone()) {
672  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
673  rb_threadptr_interrupt(main_th);
674  }
675 
676  /* wake up joining threads */
677  join_list = th->join_list;
678  while (join_list) {
679  rb_threadptr_interrupt(join_list->th);
680  switch (join_list->th->status) {
682  join_list->th->status = THREAD_RUNNABLE;
683  default: break;
684  }
685  join_list = join_list->next;
686  }
687 
689  rb_check_deadlock(th->vm);
690 
692  th->stack = NULL;
693  }
694  native_mutex_lock(&th->vm->thread_destruct_lock);
695  /* make sure vm->running_thread never point me after this point.*/
696  th->vm->running_thread = NULL;
697  native_mutex_unlock(&th->vm->thread_destruct_lock);
699  gvl_release(th->vm);
700 
701  return 0;
702 }
703 
704 static VALUE
706 {
707  rb_thread_t *th, *current_th = GET_THREAD();
708  int err;
709 
710  if (OBJ_FROZEN(current_th->thgroup)) {
712  "can't start a new thread (frozen ThreadGroup)");
713  }
714  GetThreadPtr(thval, th);
715 
716  /* setup thread environment */
717  th->first_func = fn;
718  th->first_proc = fn ? Qfalse : rb_block_proc();
719  th->first_args = args; /* GC: shouldn't put before above line */
720 
721  th->priority = current_th->priority;
722  th->thgroup = current_th->thgroup;
723 
728 
729  th->interrupt_mask = 0;
730 
731  native_mutex_initialize(&th->interrupt_lock);
732  native_cond_initialize(&th->interrupt_cond, RB_CONDATTR_CLOCK_MONOTONIC);
734 
735  /* kick thread */
736  err = native_thread_create(th);
737  if (err) {
738  th->status = THREAD_KILLED;
739  rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
740  }
742  return thval;
743 }
744 
745 #define threadptr_initialized(th) ((th)->first_args != 0)
746 
747 /*
748  * call-seq:
749  * Thread.new { ... } -> thread
750  * Thread.new(*args, &proc) -> thread
751  * Thread.new(*args) { |args| ... } -> thread
752  *
753  * Creates a new thread executing the given block.
754  *
755  * Any +args+ given to ::new will be passed to the block:
756  *
757  * arr = []
758  * a, b, c = 1, 2, 3
759  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
760  * arr #=> [1, 2, 3]
761  *
762  * A ThreadError exception is raised if ::new is called without a block.
763  *
764  * If you're going to subclass Thread, be sure to call super in your
765  * +initialize+ method, otherwise a ThreadError will be raised.
766  */
767 static VALUE
769 {
770  rb_thread_t *th;
771  VALUE thread = rb_thread_alloc(klass);
772 
773  if (GET_VM()->main_thread->status == THREAD_KILLED)
774  rb_raise(rb_eThreadError, "can't alloc thread");
775 
776  rb_obj_call_init(thread, argc, argv);
777  GetThreadPtr(thread, th);
778  if (!threadptr_initialized(th)) {
779  rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
780  klass);
781  }
782  return thread;
783 }
784 
785 /*
786  * call-seq:
787  * Thread.start([args]*) {|args| block } -> thread
788  * Thread.fork([args]*) {|args| block } -> thread
789  *
790  * Basically the same as ::new. However, if class Thread is subclassed, then
791  * calling +start+ in that subclass will not invoke the subclass's
792  * +initialize+ method.
793  */
794 
795 static VALUE
797 {
798  return thread_create_core(rb_thread_alloc(klass), args, 0);
799 }
800 
801 /* :nodoc: */
802 static VALUE
804 {
805  rb_thread_t *th;
806  if (!rb_block_given_p()) {
807  rb_raise(rb_eThreadError, "must be called with a block");
808  }
809  GetThreadPtr(thread, th);
810  if (th->first_args) {
811  VALUE proc = th->first_proc, loc;
812  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
813  rb_raise(rb_eThreadError, "already initialized thread");
814  }
816  "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
817  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
818  }
819  return thread_create_core(thread, args, 0);
820 }
821 
822 VALUE
824 {
826 }
827 
828 
829 /* +infty, for this purpose */
830 #define DELAY_INFTY 1E30
831 
832 struct join_arg {
833  rb_thread_t *target, *waiting;
834  double delay;
835 };
836 
837 static VALUE
839 {
840  struct join_arg *p = (struct join_arg *)arg;
841  rb_thread_t *target_th = p->target, *th = p->waiting;
842 
843  if (target_th->status != THREAD_KILLED) {
844  rb_thread_list_t **p = &target_th->join_list;
845 
846  while (*p) {
847  if ((*p)->th == th) {
848  *p = (*p)->next;
849  break;
850  }
851  p = &(*p)->next;
852  }
853  }
854 
855  return Qnil;
856 }
857 
858 static VALUE
860 {
861  struct join_arg *p = (struct join_arg *)arg;
862  rb_thread_t *target_th = p->target, *th = p->waiting;
863  const int forever = p->delay == DELAY_INFTY;
864  const double limit = forever ? 0 : timeofday() + p->delay;
865 
866  while (target_th->status != THREAD_KILLED) {
867  if (forever) {
869  }
870  else {
871  double now = timeofday();
872  if (now > limit) {
873  thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
874  thread_id_str(target_th));
875  return Qfalse;
876  }
877  sleep_wait_for_interrupt(th, limit - now, 0);
878  }
879  thread_debug("thread_join: interrupted (thid: %"PRI_THREAD_ID")\n",
880  thread_id_str(target_th));
881  }
882  return Qtrue;
883 }
884 
885 static VALUE
886 thread_join(rb_thread_t *target_th, double delay)
887 {
889  struct join_arg arg;
890 
891  if (th == target_th) {
892  rb_raise(rb_eThreadError, "Target thread must not be current thread");
893  }
894  if (GET_VM()->main_thread == target_th) {
895  rb_raise(rb_eThreadError, "Target thread must not be main thread");
896  }
897 
898  arg.target = target_th;
899  arg.waiting = th;
900  arg.delay = delay;
901 
902  thread_debug("thread_join (thid: %"PRI_THREAD_ID")\n", thread_id_str(target_th));
903 
904  if (target_th->status != THREAD_KILLED) {
906  list.next = target_th->join_list;
907  list.th = th;
908  target_th->join_list = &list;
909  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
910  remove_from_join_list, (VALUE)&arg)) {
911  return Qnil;
912  }
913  }
914 
915  thread_debug("thread_join: success (thid: %"PRI_THREAD_ID")\n",
916  thread_id_str(target_th));
917 
918  if (target_th->errinfo != Qnil) {
919  VALUE err = target_th->errinfo;
920 
921  if (FIXNUM_P(err)) {
922  switch (err) {
923  case INT2FIX(TAG_FATAL):
924  /* OK. killed. */
925  break;
926  default:
927  rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
928  }
929  }
930  else if (THROW_DATA_P(target_th->errinfo)) {
931  rb_bug("thread_join: THROW_DATA should not reach here.");
932  }
933  else {
934  /* normal exception */
935  rb_exc_raise(err);
936  }
937  }
938  return target_th->self;
939 }
940 
941 /*
942  * call-seq:
943  * thr.join -> thr
944  * thr.join(limit) -> thr
945  *
946  * The calling thread will suspend execution and run this +thr+.
947  *
948  * Does not return until +thr+ exits or until the given +limit+ seconds have
949  * passed.
950  *
951  * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
952  * returned.
953  *
954  * Any threads not joined will be killed when the main program exits.
955  *
956  * If +thr+ had previously raised an exception and the ::abort_on_exception or
957  * $DEBUG flags are not set, (so the exception has not yet been processed), it
958  * will be processed at this time.
959  *
960  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
961  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
962  * x.join # Let thread x finish, thread a will be killed on exit.
963  * #=> "axyz"
964  *
965  * The following example illustrates the +limit+ parameter.
966  *
967  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
968  * puts "Waiting" until y.join(0.15)
969  *
970  * This will produce:
971  *
972  * tick...
973  * Waiting
974  * tick...
975  * Waiting
976  * tick...
977  * tick...
978  */
979 
980 static VALUE
982 {
983  rb_thread_t *target_th;
984  double delay = DELAY_INFTY;
985  VALUE limit;
986 
987  GetThreadPtr(self, target_th);
988 
989  rb_scan_args(argc, argv, "01", &limit);
990  if (!NIL_P(limit)) {
991  delay = rb_num2dbl(limit);
992  }
993 
994  return thread_join(target_th, delay);
995 }
996 
997 /*
998  * call-seq:
999  * thr.value -> obj
1000  *
1001  * Waits for +thr+ to complete, using #join, and returns its value or raises
1002  * the exception which terminated the thread.
1003  *
1004  * a = Thread.new { 2 + 2 }
1005  * a.value #=> 4
1006  *
1007  * b = Thread.new { raise 'something went wrong' }
1008  * b.value #=> RuntimeError: something went wrong
1009  */
1010 
1011 static VALUE
1013 {
1014  rb_thread_t *th;
1015  GetThreadPtr(self, th);
1016  thread_join(th, DELAY_INFTY);
1017  return th->value;
1018 }
1019 
1020 /*
1021  * Thread Scheduling
1022  */
1023 
1024 /*
1025  * The type of tv_sec in struct timeval is time_t in POSIX.
1026  * But several systems violate POSIX.
1027  *
1028  * OpenBSD 5.2 (amd64):
1029  * time_t: int (signed 32bit integer)
1030  * tv_sec: long (signed 64bit integer)
1031  *
1032  * MinGW-w64 (x64):
1033  * time_t: long long (signed 64bit integer)
1034  * tv_sec: long (signed 32bit integer)
1035  */
1036 
1037 #if SIGNEDNESS_OF_TIME_T < 0 /* signed */
1038 # define TIMEVAL_SEC_MAX SIGNED_INTEGER_MAX(TYPEOF_TIMEVAL_TV_SEC)
1039 # define TIMEVAL_SEC_MIN SIGNED_INTEGER_MIN(TYPEOF_TIMEVAL_TV_SEC)
1040 #elif SIGNEDNESS_OF_TIME_T > 0 /* unsigned */
1041 # define TIMEVAL_SEC_MAX ((TYPEOF_TIMEVAL_TV_SEC)(~(unsigned_time_t)0))
1042 # define TIMEVAL_SEC_MIN ((TYPEOF_TIMEVAL_TV_SEC)0)
1043 #endif
1044 
1045 static struct timeval
1047 {
1048  /* assume timeval.tv_sec has same signedness as time_t */
1049  const double TIMEVAL_SEC_MAX_PLUS_ONE = (2*(double)(TIMEVAL_SEC_MAX/2+1));
1050 
1051  struct timeval time;
1052 
1053  if (TIMEVAL_SEC_MAX_PLUS_ONE <= d) {
1054  time.tv_sec = TIMEVAL_SEC_MAX;
1055  time.tv_usec = 999999;
1056  }
1057  else if (d <= TIMEVAL_SEC_MIN) {
1058  time.tv_sec = TIMEVAL_SEC_MIN;
1059  time.tv_usec = 0;
1060  }
1061  else {
1062  time.tv_sec = (TYPEOF_TIMEVAL_TV_SEC)d;
1063  time.tv_usec = (int)((d - (time_t)d) * 1e6);
1064  if (time.tv_usec < 0) {
1065  time.tv_usec += (int)1e6;
1066  time.tv_sec -= 1;
1067  }
1068  }
1069  return time;
1070 }
1071 
1072 static void
1073 sleep_forever(rb_thread_t *th, int deadlockable, int spurious_check)
1074 {
1075  enum rb_thread_status prev_status = th->status;
1076  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1077 
1078  th->status = status;
1080  while (th->status == status) {
1081  if (deadlockable) {
1082  th->vm->sleeper++;
1083  rb_check_deadlock(th->vm);
1084  }
1085  native_sleep(th, 0);
1086  if (deadlockable) {
1087  th->vm->sleeper--;
1088  }
1090  if (!spurious_check)
1091  break;
1092  }
1093  th->status = prev_status;
1094 }
1095 
1096 static void
1098 {
1099 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1100  struct timespec ts;
1101 
1102  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
1103  tp->tv_sec = ts.tv_sec;
1104  tp->tv_usec = (int)(ts.tv_nsec / 1000);
1105  }
1106  else
1107 #endif
1108  {
1109  gettimeofday(tp, NULL);
1110  }
1111 }
1112 
1113 static void
1114 sleep_timeval(rb_thread_t *th, struct timeval tv, int spurious_check)
1115 {
1116  struct timeval to, tvn;
1117  enum rb_thread_status prev_status = th->status;
1118 
1119  getclockofday(&to);
1120  if (TIMEVAL_SEC_MAX - tv.tv_sec < to.tv_sec)
1121  to.tv_sec = TIMEVAL_SEC_MAX;
1122  else
1123  to.tv_sec += tv.tv_sec;
1124  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
1125  if (to.tv_sec == TIMEVAL_SEC_MAX)
1126  to.tv_usec = 999999;
1127  else {
1128  to.tv_sec++;
1129  to.tv_usec -= 1000000;
1130  }
1131  }
1132 
1133  th->status = THREAD_STOPPED;
1135  while (th->status == THREAD_STOPPED) {
1136  native_sleep(th, &tv);
1138  getclockofday(&tvn);
1139  if (to.tv_sec < tvn.tv_sec) break;
1140  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
1141  thread_debug("sleep_timeval: %"PRI_TIMET_PREFIX"d.%.6ld > %"PRI_TIMET_PREFIX"d.%.6ld\n",
1142  (time_t)to.tv_sec, (long)to.tv_usec,
1143  (time_t)tvn.tv_sec, (long)tvn.tv_usec);
1144  tv.tv_sec = to.tv_sec - tvn.tv_sec;
1145  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
1146  --tv.tv_sec;
1147  tv.tv_usec += 1000000;
1148  }
1149  if (!spurious_check)
1150  break;
1151  }
1152  th->status = prev_status;
1153 }
1154 
1155 void
1157 {
1158  thread_debug("rb_thread_sleep_forever\n");
1160 }
1161 
1162 void
1164 {
1165  thread_debug("rb_thread_sleep_deadly\n");
1167 }
1168 
1169 static void
1171 {
1172  thread_debug("rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1174 }
1175 
1176 static double
1178 {
1179 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1180  struct timespec tp;
1181 
1182  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1183  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
1184  }
1185  else
1186 #endif
1187  {
1188  struct timeval tv;
1189  gettimeofday(&tv, NULL);
1190  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
1191  }
1192 }
1193 
1194 static void
1195 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
1196 {
1197  sleep_timeval(th, double2timeval(sleepsec), spurious_check);
1198 }
1199 
1200 void
1202 {
1203  rb_thread_t *th = GET_THREAD();
1204  sleep_timeval(th, time, 1);
1205 }
1206 
1207 /*
1208  * CAUTION: This function causes thread switching.
1209  * rb_thread_check_ints() check ruby's interrupts.
1210  * some interrupt needs thread switching/invoke handlers,
1211  * and so on.
1212  */
1213 
1214 void
1216 {
1217  rb_thread_t *th = GET_THREAD();
1219 }
1220 
1221 /*
1222  * Hidden API for tcl/tk wrapper.
1223  * There is no guarantee to perpetuate it.
1224  */
1225 int
1227 {
1228  return rb_signal_buff_size() != 0;
1229 }
1230 
1231 /* This function can be called in blocking region. */
1232 int
1234 {
1235  rb_thread_t *th;
1236  GetThreadPtr(thval, th);
1237  return (int)RUBY_VM_INTERRUPTED(th);
1238 }
1239 
1240 void
1242 {
1244 }
1245 
1246 static void
1247 rb_thread_schedule_limits(unsigned long limits_us)
1248 {
1249  thread_debug("rb_thread_schedule\n");
1250  if (!rb_thread_alone()) {
1251  rb_thread_t *th = GET_THREAD();
1252 
1253  if (th->running_time_us >= limits_us) {
1254  thread_debug("rb_thread_schedule/switch start\n");
1256  gvl_yield(th->vm, th);
1258  thread_debug("rb_thread_schedule/switch done\n");
1259  }
1260  }
1261 }
1262 
1263 void
1265 {
1266  rb_thread_t *cur_th = GET_THREAD();
1268 
1269  RUBY_VM_CHECK_INTS(cur_th);
1270 }
1271 
1272 /* blocking region */
1273 
1274 static inline int
1276  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1277 {
1278  region->prev_status = th->status;
1279  if (set_unblock_function(th, ubf, arg, &region->oldubf, fail_if_interrupted)) {
1280  th->blocking_region_buffer = region;
1281  th->status = THREAD_STOPPED;
1282  thread_debug("enter blocking region (%p)\n", (void *)th);
1284  gvl_release(th->vm);
1285  return TRUE;
1286  }
1287  else {
1288  return FALSE;
1289  }
1290 }
1291 
1292 static inline void
1294 {
1295  gvl_acquire(th->vm, th);
1297  thread_debug("leave blocking region (%p)\n", (void *)th);
1298  unregister_ubf_list(th);
1299  th->blocking_region_buffer = 0;
1300  reset_unblock_function(th, &region->oldubf);
1301  if (th->status == THREAD_STOPPED) {
1302  th->status = region->prev_status;
1303  }
1304 }
1305 
1306 static void *
1307 call_without_gvl(void *(*func)(void *), void *data1,
1308  rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
1309 {
1310  void *val = 0;
1311 
1312  rb_thread_t *th = GET_THREAD();
1313  int saved_errno = 0;
1314 
1315  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1316  ubf = ubf_select;
1317  data2 = th;
1318  }
1319 
1320  BLOCKING_REGION({
1321  val = func(data1);
1322  saved_errno = errno;
1323  }, ubf, data2, fail_if_interrupted);
1324 
1325  if (!fail_if_interrupted) {
1327  }
1328 
1329  errno = saved_errno;
1330 
1331  return val;
1332 }
1333 
1334 /*
1335  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1336  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1337  * without interrupt process.
1338  *
1339  * rb_thread_call_without_gvl() does:
1340  * (1) Check interrupts.
1341  * (2) release GVL.
1342  * Other Ruby threads may run in parallel.
1343  * (3) call func with data1
1344  * (4) acquire GVL.
1345  * Other Ruby threads can not run in parallel any more.
1346  * (5) Check interrupts.
1347  *
1348  * rb_thread_call_without_gvl2() does:
1349  * (1) Check interrupt and return if interrupted.
1350  * (2) release GVL.
1351  * (3) call func with data1 and a pointer to the flags.
1352  * (4) acquire GVL.
1353  *
1354  * If another thread interrupts this thread (Thread#kill, signal delivery,
1355  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1356  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1357  * toggling a cancellation flag, canceling the invocation of a call inside
1358  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1359  *
1360  * There are built-in ubfs and you can specify these ubfs:
1361  *
1362  * * RUBY_UBF_IO: ubf for IO operation
1363  * * RUBY_UBF_PROCESS: ubf for process operation
1364  *
1365  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1366  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1367  * provide proper ubf(), your program will not stop for Control+C or other
1368  * shutdown events.
1369  *
1370  * "Check interrupts" on above list means checking asynchronous
1371  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1372  * request, and so on) and calling corresponding procedures
1373  * (such as `trap' for signals, raise an exception for Thread#raise).
1374  * If `func()' finished and received interrupts, you may skip interrupt
1375  * checking. For example, assume the following func() it reads data from file.
1376  *
1377  * read_func(...) {
1378  * // (a) before read
1379  * read(buffer); // (b) reading
1380  * // (c) after read
1381  * }
1382  *
1383  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1384  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1385  * at (c), after *read* operation is completed, checking interrupts is harmful
1386  * because it causes irrevocable side-effect, the read data will vanish. To
1387  * avoid such problem, the `read_func()' should be used with
1388  * `rb_thread_call_without_gvl2()'.
1389  *
1390  * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1391  * immediately. This function does not show when the execution was interrupted.
1392  * For example, there are 4 possible timing (a), (b), (c) and before calling
1393  * read_func(). You need to record progress of a read_func() and check
1394  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1395  * `rb_thread_check_ints()' correctly or your program can not process proper
1396  * process such as `trap' and so on.
1397  *
1398  * NOTE: You can not execute most of Ruby C API and touch Ruby
1399  * objects in `func()' and `ubf()', including raising an
1400  * exception, because current thread doesn't acquire GVL
1401  * (it causes synchronization problems). If you need to
1402  * call ruby functions either use rb_thread_call_with_gvl()
1403  * or read source code of C APIs and confirm safety by
1404  * yourself.
1405  *
1406  * NOTE: In short, this API is difficult to use safely. I recommend you
1407  * use other ways if you have. We lack experiences to use this API.
1408  * Please report your problem related on it.
1409  *
1410  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1411  * for a short running `func()'. Be sure to benchmark and use this
1412  * mechanism when `func()' consumes enough time.
1413  *
1414  * Safe C API:
1415  * * rb_thread_interrupted() - check interrupt flag
1416  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1417  * they will work without GVL, and may acquire GVL when GC is needed.
1418  */
1419 void *
1420 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1421  rb_unblock_function_t *ubf, void *data2)
1422 {
1423  return call_without_gvl(func, data1, ubf, data2, TRUE);
1424 }
1425 
1426 void *
1427 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1428  rb_unblock_function_t *ubf, void *data2)
1429 {
1430  return call_without_gvl(func, data1, ubf, data2, FALSE);
1431 }
1432 
1433 VALUE
1435 {
1436  volatile VALUE val = Qundef; /* shouldn't be used */
1437  rb_vm_t *vm = GET_VM();
1438  rb_thread_t *th = GET_THREAD();
1439  volatile int saved_errno = 0;
1440  int state;
1441  struct waiting_fd wfd;
1442 
1443  wfd.fd = fd;
1444  wfd.th = th;
1445  list_add(&vm->waiting_fds, &wfd.wfd_node);
1446 
1447  TH_PUSH_TAG(th);
1448  if ((state = EXEC_TAG()) == 0) {
1449  BLOCKING_REGION({
1450  val = func(data1);
1451  saved_errno = errno;
1452  }, ubf_select, th, FALSE);
1453  }
1454  TH_POP_TAG();
1455 
1456  /* must be deleted before jump */
1457  list_del(&wfd.wfd_node);
1458 
1459  if (state) {
1460  TH_JUMP_TAG(th, state);
1461  }
1462  /* TODO: check func() */
1464 
1465  errno = saved_errno;
1466 
1467  return val;
1468 }
1469 
1470 /*
1471  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1472  *
1473  * After releasing GVL using
1474  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1475  * methods. If you need to access Ruby you must use this function
1476  * rb_thread_call_with_gvl().
1477  *
1478  * This function rb_thread_call_with_gvl() does:
1479  * (1) acquire GVL.
1480  * (2) call passed function `func'.
1481  * (3) release GVL.
1482  * (4) return a value which is returned at (2).
1483  *
1484  * NOTE: You should not return Ruby object at (2) because such Object
1485  * will not be marked.
1486  *
1487  * NOTE: If an exception is raised in `func', this function DOES NOT
1488  * protect (catch) the exception. If you have any resources
1489  * which should free before throwing exception, you need use
1490  * rb_protect() in `func' and return a value which represents
1491  * exception was raised.
1492  *
1493  * NOTE: This function should not be called by a thread which was not
1494  * created as Ruby thread (created by Thread.new or so). In other
1495  * words, this function *DOES NOT* associate or convert a NON-Ruby
1496  * thread to a Ruby thread.
1497  */
1498 void *
1499 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1500 {
1501  rb_thread_t *th = ruby_thread_from_native();
1502  struct rb_blocking_region_buffer *brb;
1503  struct rb_unblock_callback prev_unblock;
1504  void *r;
1505 
1506  if (th == 0) {
1507  /* Error has occurred, but we can't use rb_bug()
1508  * because this thread is not Ruby's thread.
1509  * What should we do?
1510  */
1511 
1512  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1513  exit(EXIT_FAILURE);
1514  }
1515 
1517  prev_unblock = th->unblock;
1518 
1519  if (brb == 0) {
1520  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1521  }
1522 
1523  blocking_region_end(th, brb);
1524  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1525  r = (*func)(data1);
1526  /* leave from Ruby world: You can not access Ruby values, etc. */
1527  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1528  return r;
1529 }
1530 
1531 /*
1532  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1533  *
1534  ***
1535  *** This API is EXPERIMENTAL!
1536  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1537  ***
1538  */
1539 
1540 int
1542 {
1543  rb_thread_t *th = ruby_thread_from_native();
1544 
1545  if (th && th->blocking_region_buffer == 0) {
1546  return 1;
1547  }
1548  else {
1549  return 0;
1550  }
1551 }
1552 
1553 /*
1554  * call-seq:
1555  * Thread.pass -> nil
1556  *
1557  * Give the thread scheduler a hint to pass execution to another thread.
1558  * A running thread may or may not switch, it depends on OS and processor.
1559  */
1560 
1561 static VALUE
1563 {
1565  return Qnil;
1566 }
1567 
1568 /*****************************************************/
1569 
1570 /*
1571  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1572  *
1573  * Async events such as an exception thrown by Thread#raise,
1574  * Thread#kill and thread termination (after main thread termination)
1575  * will be queued to th->pending_interrupt_queue.
1576  * - clear: clear the queue.
1577  * - enque: enqueue err object into queue.
1578  * - deque: dequeue err object from queue.
1579  * - active_p: return 1 if the queue should be checked.
1580  *
1581  * All rb_threadptr_pending_interrupt_* functions are called by
1582  * a GVL acquired thread, of course.
1583  * Note that all "rb_" prefix APIs need GVL to call.
1584  */
1585 
1586 void
1588 {
1590 }
1591 
1592 void
1594 {
1597 }
1598 
1599 static void
1601 {
1602  if (!th->pending_interrupt_queue) {
1603  rb_raise(rb_eThreadError, "uninitialized thread");
1604  }
1605 }
1606 
1612 };
1613 
1614 static enum handle_interrupt_timing
1616 {
1617  VALUE mask;
1618  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1619  const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1620  VALUE ancestors = rb_mod_ancestors(err); /* TODO: GC guard */
1621  long ancestors_len = RARRAY_LEN(ancestors);
1622  const VALUE *ancestors_ptr = RARRAY_CONST_PTR(ancestors);
1623  int i, j;
1624 
1625  for (i=0; i<mask_stack_len; i++) {
1626  mask = mask_stack[mask_stack_len-(i+1)];
1627 
1628  for (j=0; j<ancestors_len; j++) {
1629  VALUE klass = ancestors_ptr[j];
1630  VALUE sym;
1631 
1632  /* TODO: remove rb_intern() */
1633  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1634  if (sym == sym_immediate) {
1635  return INTERRUPT_IMMEDIATE;
1636  }
1637  else if (sym == sym_on_blocking) {
1638  return INTERRUPT_ON_BLOCKING;
1639  }
1640  else if (sym == sym_never) {
1641  return INTERRUPT_NEVER;
1642  }
1643  else {
1644  rb_raise(rb_eThreadError, "unknown mask signature");
1645  }
1646  }
1647  }
1648  /* try to next mask */
1649  }
1650  return INTERRUPT_NONE;
1651 }
1652 
1653 static int
1655 {
1656  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1657 }
1658 
1659 static int
1661 {
1662  int i;
1663  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1665  if (rb_class_inherited_p(e, err)) {
1666  return TRUE;
1667  }
1668  }
1669  return FALSE;
1670 }
1671 
1672 static VALUE
1674 {
1675 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1676  int i;
1677 
1678  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1680 
1682 
1683  switch (mask_timing) {
1684  case INTERRUPT_ON_BLOCKING:
1685  if (timing != INTERRUPT_ON_BLOCKING) {
1686  break;
1687  }
1688  /* fall through */
1689  case INTERRUPT_NONE: /* default: IMMEDIATE */
1690  case INTERRUPT_IMMEDIATE:
1692  return err;
1693  case INTERRUPT_NEVER:
1694  break;
1695  }
1696  }
1697 
1699  return Qundef;
1700 #else
1704  }
1705  return err;
1706 #endif
1707 }
1708 
1709 int
1711 {
1712  /*
1713  * For optimization, we don't check async errinfo queue
1714  * if the queue and the thread interrupt mask were not changed
1715  * since last check.
1716  */
1718  return 0;
1719  }
1720 
1722  return 0;
1723  }
1724 
1725  return 1;
1726 }
1727 
1728 static int
1730 {
1731  VALUE *maskp = (VALUE *)args;
1732 
1733  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1734  rb_raise(rb_eArgError, "unknown mask signature");
1735  }
1736 
1737  if (!*maskp) {
1738  *maskp = rb_ident_hash_new();
1739  }
1740  rb_hash_aset(*maskp, key, val);
1741 
1742  return ST_CONTINUE;
1743 }
1744 
1745 /*
1746  * call-seq:
1747  * Thread.handle_interrupt(hash) { ... } -> result of the block
1748  *
1749  * Changes asynchronous interrupt timing.
1750  *
1751  * _interrupt_ means asynchronous event and corresponding procedure
1752  * by Thread#raise, Thread#kill, signal trap (not supported yet)
1753  * and main thread termination (if main thread terminates, then all
1754  * other thread will be killed).
1755  *
1756  * The given +hash+ has pairs like <code>ExceptionClass =>
1757  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1758  * the given block. The TimingSymbol can be one of the following symbols:
1759  *
1760  * [+:immediate+] Invoke interrupts immediately.
1761  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1762  * [+:never+] Never invoke all interrupts.
1763  *
1764  * _BlockingOperation_ means that the operation will block the calling thread,
1765  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1766  * operation executed without GVL.
1767  *
1768  * Masked asynchronous interrupts are delayed until they are enabled.
1769  * This method is similar to sigprocmask(3).
1770  *
1771  * === NOTE
1772  *
1773  * Asynchronous interrupts are difficult to use.
1774  *
1775  * If you need to communicate between threads, please consider to use another way such as Queue.
1776  *
1777  * Or use them with deep understanding about this method.
1778  *
1779  * === Usage
1780  *
1781  * In this example, we can guard from Thread#raise exceptions.
1782  *
1783  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1784  * ignored in the first block of the main thread. In the second
1785  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1786  *
1787  * th = Thread.new do
1788  * Thread.handle_interrupt(RuntimeError => :never) {
1789  * begin
1790  * # You can write resource allocation code safely.
1791  * Thread.handle_interrupt(RuntimeError => :immediate) {
1792  * # ...
1793  * }
1794  * ensure
1795  * # You can write resource deallocation code safely.
1796  * end
1797  * }
1798  * end
1799  * Thread.pass
1800  * # ...
1801  * th.raise "stop"
1802  *
1803  * While we are ignoring the RuntimeError exception, it's safe to write our
1804  * resource allocation code. Then, the ensure block is where we can safely
1805  * deallocate your resources.
1806  *
1807  * ==== Guarding from Timeout::Error
1808  *
1809  * In the next example, we will guard from the Timeout::Error exception. This
1810  * will help prevent from leaking resources when Timeout::Error exceptions occur
1811  * during normal ensure clause. For this example we use the help of the
1812  * standard library Timeout, from lib/timeout.rb
1813  *
1814  * require 'timeout'
1815  * Thread.handle_interrupt(Timeout::Error => :never) {
1816  * timeout(10){
1817  * # Timeout::Error doesn't occur here
1818  * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
1819  * # possible to be killed by Timeout::Error
1820  * # while blocking operation
1821  * }
1822  * # Timeout::Error doesn't occur here
1823  * }
1824  * }
1825  *
1826  * In the first part of the +timeout+ block, we can rely on Timeout::Error being
1827  * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
1828  * operation that will block the calling thread is susceptible to a
1829  * Timeout::Error exception being raised.
1830  *
1831  * ==== Stack control settings
1832  *
1833  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
1834  * to control more than one ExceptionClass and TimingSymbol at a time.
1835  *
1836  * Thread.handle_interrupt(FooError => :never) {
1837  * Thread.handle_interrupt(BarError => :never) {
1838  * # FooError and BarError are prohibited.
1839  * }
1840  * }
1841  *
1842  * ==== Inheritance with ExceptionClass
1843  *
1844  * All exceptions inherited from the ExceptionClass parameter will be considered.
1845  *
1846  * Thread.handle_interrupt(Exception => :never) {
1847  * # all exceptions inherited from Exception are prohibited.
1848  * }
1849  *
1850  */
1851 static VALUE
1853 {
1854  VALUE mask;
1855  rb_thread_t *th = GET_THREAD();
1856  volatile VALUE r = Qnil;
1857  int state;
1858 
1859  if (!rb_block_given_p()) {
1860  rb_raise(rb_eArgError, "block is needed.");
1861  }
1862 
1863  mask = 0;
1864  mask_arg = rb_convert_type(mask_arg, T_HASH, "Hash", "to_hash");
1866  if (!mask) {
1867  return rb_yield(Qnil);
1868  }
1869  OBJ_FREEZE_RAW(mask);
1874  }
1875 
1876  TH_PUSH_TAG(th);
1877  if ((state = EXEC_TAG()) == 0) {
1878  r = rb_yield(Qnil);
1879  }
1880  TH_POP_TAG();
1881 
1886  }
1887 
1888  RUBY_VM_CHECK_INTS(th);
1889 
1890  if (state) {
1891  TH_JUMP_TAG(th, state);
1892  }
1893 
1894  return r;
1895 }
1896 
1897 /*
1898  * call-seq:
1899  * target_thread.pending_interrupt?(error = nil) -> true/false
1900  *
1901  * Returns whether or not the asynchronous queue is empty for the target thread.
1902  *
1903  * If +error+ is given, then check only for +error+ type deferred events.
1904  *
1905  * See ::pending_interrupt? for more information.
1906  */
1907 static VALUE
1909 {
1910  rb_thread_t *target_th;
1911 
1912  GetThreadPtr(target_thread, target_th);
1913 
1914  if (!target_th->pending_interrupt_queue) {
1915  return Qfalse;
1916  }
1917  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
1918  return Qfalse;
1919  }
1920  else {
1921  if (argc == 1) {
1922  VALUE err;
1923  rb_scan_args(argc, argv, "01", &err);
1924  if (!rb_obj_is_kind_of(err, rb_cModule)) {
1925  rb_raise(rb_eTypeError, "class or module required for rescue clause");
1926  }
1927  if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
1928  return Qtrue;
1929  }
1930  else {
1931  return Qfalse;
1932  }
1933  }
1934  return Qtrue;
1935  }
1936 }
1937 
1938 /*
1939  * call-seq:
1940  * Thread.pending_interrupt?(error = nil) -> true/false
1941  *
1942  * Returns whether or not the asynchronous queue is empty.
1943  *
1944  * Since Thread::handle_interrupt can be used to defer asynchronous events,
1945  * this method can be used to determine if there are any deferred events.
1946  *
1947  * If you find this method returns true, then you may finish +:never+ blocks.
1948  *
1949  * For example, the following method processes deferred asynchronous events
1950  * immediately.
1951  *
1952  * def Thread.kick_interrupt_immediately
1953  * Thread.handle_interrupt(Object => :immediate) {
1954  * Thread.pass
1955  * }
1956  * end
1957  *
1958  * If +error+ is given, then check only for +error+ type deferred events.
1959  *
1960  * === Usage
1961  *
1962  * th = Thread.new{
1963  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1964  * while true
1965  * ...
1966  * # reach safe point to invoke interrupt
1967  * if Thread.pending_interrupt?
1968  * Thread.handle_interrupt(Object => :immediate){}
1969  * end
1970  * ...
1971  * end
1972  * }
1973  * }
1974  * ...
1975  * th.raise # stop thread
1976  *
1977  * This example can also be written as the following, which you should use to
1978  * avoid asynchronous interrupts.
1979  *
1980  * flag = true
1981  * th = Thread.new{
1982  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1983  * while true
1984  * ...
1985  * # reach safe point to invoke interrupt
1986  * break if flag == false
1987  * ...
1988  * end
1989  * }
1990  * }
1991  * ...
1992  * flag = false # stop thread
1993  */
1994 
1995 static VALUE
1997 {
1998  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
1999 }
2000 
2001 static void
2003 {
2005  th->status = THREAD_RUNNABLE;
2006  th->to_kill = 1;
2007  th->errinfo = INT2FIX(TAG_FATAL);
2008  TH_JUMP_TAG(th, TAG_FATAL);
2009 }
2010 
2011 static inline rb_atomic_t
2013 {
2014  rb_atomic_t interrupt;
2015  rb_atomic_t old;
2016 
2017  do {
2018  interrupt = th->interrupt_flag;
2019  old = ATOMIC_CAS(th->interrupt_flag, interrupt, interrupt & th->interrupt_mask);
2020  } while (old != interrupt);
2021  return interrupt & (rb_atomic_t)~th->interrupt_mask;
2022 }
2023 
2024 void
2026 {
2027  rb_atomic_t interrupt;
2028  int postponed_job_interrupt = 0;
2029 
2030  if (th->raised_flag) return;
2031 
2032  while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2033  int sig;
2034  int timer_interrupt;
2035  int pending_interrupt;
2036  int trap_interrupt;
2037 
2038  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2039  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2040  postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2041  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2042 
2043  if (postponed_job_interrupt) {
2045  }
2046 
2047  /* signal handling */
2048  if (trap_interrupt && (th == th->vm->main_thread)) {
2049  enum rb_thread_status prev_status = th->status;
2050  th->status = THREAD_RUNNABLE;
2051  while ((sig = rb_get_next_signal()) != 0) {
2052  rb_signal_exec(th, sig);
2053  }
2054  th->status = prev_status;
2055  }
2056 
2057  /* exception from another thread */
2058  if (pending_interrupt && rb_threadptr_pending_interrupt_active_p(th)) {
2060  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
2061 
2062  if (err == Qundef) {
2063  /* no error */
2064  }
2065  else if (err == eKillSignal /* Thread#kill received */ ||
2066  err == eTerminateSignal /* Terminate thread */ ||
2067  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2069  }
2070  else {
2071  /* set runnable if th was slept. */
2072  if (th->status == THREAD_STOPPED ||
2074  th->status = THREAD_RUNNABLE;
2075  rb_exc_raise(err);
2076  }
2077  }
2078 
2079  if (timer_interrupt) {
2080  unsigned long limits_us = TIME_QUANTUM_USEC;
2081 
2082  if (th->priority > 0)
2083  limits_us <<= th->priority;
2084  else
2085  limits_us >>= -th->priority;
2086 
2087  if (th->status == THREAD_RUNNABLE)
2088  th->running_time_us += TIME_QUANTUM_USEC;
2089 
2091 
2092  rb_thread_schedule_limits(limits_us);
2093  }
2094  }
2095 }
2096 
2097 void
2099 {
2100  rb_thread_t *th;
2101  GetThreadPtr(thval, th);
2103 }
2104 
2105 static void
2107 {
2109 }
2110 
2112 
2113 static VALUE
2115 {
2116  VALUE exc;
2117 
2118  if (rb_threadptr_dead(th)) {
2119  return Qnil;
2120  }
2121 
2122  if (argc == 0) {
2123  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2124  }
2125  else {
2126  exc = rb_make_exception(argc, argv);
2127  }
2128 
2129  /* making an exception object can switch thread,
2130  so we need to check thread deadness again */
2131  if (rb_threadptr_dead(th)) {
2132  return Qnil;
2133  }
2134 
2138  return Qnil;
2139 }
2140 
2141 void
2143 {
2144  VALUE argv[2];
2145 
2146  argv[0] = rb_eSignal;
2147  argv[1] = INT2FIX(sig);
2148  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2149 }
2150 
2151 void
2153 {
2154  VALUE argv[2];
2155 
2156  argv[0] = rb_eSystemExit;
2157  argv[1] = rb_str_new2("exit");
2158  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2159 }
2160 
2161 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2162 #define USE_SIGALTSTACK
2163 #endif
2164 
2166 void
2168 {
2169  th->raised_flag = 0;
2170 #ifdef USE_SIGALTSTACK
2171  if (!rb_during_gc()) {
2173  }
2174 #endif
2175  th->errinfo = sysstack_error;
2176  TH_JUMP_TAG(th, TAG_RAISE);
2177 }
2178 
2179 int
2181 {
2182  if (th->raised_flag & RAISED_EXCEPTION) {
2183  return 1;
2184  }
2186  return 0;
2187 }
2188 
2189 int
2191 {
2192  if (!(th->raised_flag & RAISED_EXCEPTION)) {
2193  return 0;
2194  }
2195  th->raised_flag &= ~RAISED_EXCEPTION;
2196  return 1;
2197 }
2198 
2199 int
2201 {
2202  rb_vm_t *vm = GET_THREAD()->vm;
2203  struct waiting_fd *wfd = 0;
2204  int busy;
2205 
2206  busy = 0;
2207  list_for_each(&vm->waiting_fds, wfd, wfd_node) {
2208  if (wfd->fd == fd) {
2209  rb_thread_t *th = wfd->th;
2210  VALUE err;
2211 
2212  busy = 1;
2213  if (!th) {
2214  continue;
2215  }
2216  wfd->th = 0;
2220  }
2221  }
2222  return busy;
2223 }
2224 
2225 void
2227 {
2228  while (rb_notify_fd_close(fd)) rb_thread_schedule();
2229 }
2230 
2231 /*
2232  * call-seq:
2233  * thr.raise
2234  * thr.raise(string)
2235  * thr.raise(exception [, string [, array]])
2236  *
2237  * Raises an exception from the given thread. The caller does not have to be
2238  * +thr+. See Kernel#raise for more information.
2239  *
2240  * Thread.abort_on_exception = true
2241  * a = Thread.new { sleep(200) }
2242  * a.raise("Gotcha")
2243  *
2244  * This will produce:
2245  *
2246  * prog.rb:3: Gotcha (RuntimeError)
2247  * from prog.rb:2:in `initialize'
2248  * from prog.rb:2:in `new'
2249  * from prog.rb:2
2250  */
2251 
2252 static VALUE
2254 {
2255  rb_thread_t *target_th;
2256  rb_thread_t *th = GET_THREAD();
2257  GetThreadPtr(self, target_th);
2259  rb_threadptr_raise(target_th, argc, argv);
2260 
2261  /* To perform Thread.current.raise as Kernel.raise */
2262  if (th == target_th) {
2263  RUBY_VM_CHECK_INTS(th);
2264  }
2265  return Qnil;
2266 }
2267 
2268 
2269 /*
2270  * call-seq:
2271  * thr.exit -> thr or nil
2272  * thr.kill -> thr or nil
2273  * thr.terminate -> thr or nil
2274  *
2275  * Terminates +thr+ and schedules another thread to be run.
2276  *
2277  * If this thread is already marked to be killed, #exit returns the Thread.
2278  *
2279  * If this is the main thread, or the last thread, exits the process.
2280  */
2281 
2282 VALUE
2284 {
2285  rb_thread_t *th;
2286 
2287  GetThreadPtr(thread, th);
2288 
2289  if (th->to_kill || th->status == THREAD_KILLED) {
2290  return thread;
2291  }
2292  if (th == th->vm->main_thread) {
2294  }
2295 
2296  thread_debug("rb_thread_kill: %p (%"PRI_THREAD_ID")\n", (void *)th, thread_id_str(th));
2297 
2298  if (th == GET_THREAD()) {
2299  /* kill myself immediately */
2301  }
2302  else {
2306  }
2307  return thread;
2308 }
2309 
2310 int
2312 {
2313  rb_thread_t *th;
2314 
2315  GetThreadPtr(thread, th);
2316 
2317  if (th->to_kill || th->status == THREAD_KILLED) {
2318  return TRUE;
2319  }
2320  return FALSE;
2321 }
2322 
2323 /*
2324  * call-seq:
2325  * Thread.kill(thread) -> thread
2326  *
2327  * Causes the given +thread+ to exit, see also Thread::exit.
2328  *
2329  * count = 0
2330  * a = Thread.new { loop { count += 1 } }
2331  * sleep(0.1) #=> 0
2332  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2333  * count #=> 93947
2334  * a.alive? #=> false
2335  */
2336 
2337 static VALUE
2339 {
2340  return rb_thread_kill(th);
2341 }
2342 
2343 
2344 /*
2345  * call-seq:
2346  * Thread.exit -> thread
2347  *
2348  * Terminates the currently running thread and schedules another thread to be
2349  * run.
2350  *
2351  * If this thread is already marked to be killed, ::exit returns the Thread.
2352  *
2353  * If this is the main thread, or the last thread, exit the process.
2354  */
2355 
2356 static VALUE
2358 {
2359  rb_thread_t *th = GET_THREAD();
2360  return rb_thread_kill(th->self);
2361 }
2362 
2363 
2364 /*
2365  * call-seq:
2366  * thr.wakeup -> thr
2367  *
2368  * Marks a given thread as eligible for scheduling, however it may still
2369  * remain blocked on I/O.
2370  *
2371  * *Note:* This does not invoke the scheduler, see #run for more information.
2372  *
2373  * c = Thread.new { Thread.stop; puts "hey!" }
2374  * sleep 0.1 while c.status!='sleep'
2375  * c.wakeup
2376  * c.join
2377  * #=> "hey!"
2378  */
2379 
2380 VALUE
2382 {
2383  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2384  rb_raise(rb_eThreadError, "killed thread");
2385  }
2386  return thread;
2387 }
2388 
2389 VALUE
2391 {
2392  rb_thread_t *th;
2393  GetThreadPtr(thread, th);
2394 
2395  if (th->status == THREAD_KILLED) {
2396  return Qnil;
2397  }
2398  rb_threadptr_ready(th);
2399  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2400  th->status = THREAD_RUNNABLE;
2401  return thread;
2402 }
2403 
2404 
2405 /*
2406  * call-seq:
2407  * thr.run -> thr
2408  *
2409  * Wakes up +thr+, making it eligible for scheduling.
2410  *
2411  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2412  * sleep 0.1 while a.status!='sleep'
2413  * puts "Got here"
2414  * a.run
2415  * a.join
2416  *
2417  * This will produce:
2418  *
2419  * a
2420  * Got here
2421  * c
2422  *
2423  * See also the instance method #wakeup.
2424  */
2425 
2426 VALUE
2428 {
2429  rb_thread_wakeup(thread);
2431  return thread;
2432 }
2433 
2434 
2435 /*
2436  * call-seq:
2437  * Thread.stop -> nil
2438  *
2439  * Stops execution of the current thread, putting it into a ``sleep'' state,
2440  * and schedules execution of another thread.
2441  *
2442  * a = Thread.new { print "a"; Thread.stop; print "c" }
2443  * sleep 0.1 while a.status!='sleep'
2444  * print "b"
2445  * a.run
2446  * a.join
2447  * #=> "abc"
2448  */
2449 
2450 VALUE
2452 {
2453  if (rb_thread_alone()) {
2455  "stopping only thread\n\tnote: use sleep to stop forever");
2456  }
2458  return Qnil;
2459 }
2460 
2461 /********************************************************************/
2462 
2463 /*
2464  * call-seq:
2465  * Thread.list -> array
2466  *
2467  * Returns an array of Thread objects for all threads that are either runnable
2468  * or stopped.
2469  *
2470  * Thread.new { sleep(200) }
2471  * Thread.new { 1000000.times {|i| i*i } }
2472  * Thread.new { Thread.stop }
2473  * Thread.list.each {|t| p t}
2474  *
2475  * This will produce:
2476  *
2477  * #<Thread:0x401b3e84 sleep>
2478  * #<Thread:0x401b3f38 run>
2479  * #<Thread:0x401b3fb0 sleep>
2480  * #<Thread:0x401bdf4c run>
2481  */
2482 
2483 VALUE
2485 {
2486  VALUE ary = rb_ary_new();
2487  rb_vm_t *vm = GET_THREAD()->vm;
2488  rb_thread_t *th = 0;
2489 
2490  list_for_each(&vm->living_threads, th, vmlt_node) {
2491  switch (th->status) {
2492  case THREAD_RUNNABLE:
2493  case THREAD_STOPPED:
2495  rb_ary_push(ary, th->self);
2496  default:
2497  break;
2498  }
2499  }
2500  return ary;
2501 }
2502 
2503 VALUE
2505 {
2506  return GET_THREAD()->self;
2507 }
2508 
2509 /*
2510  * call-seq:
2511  * Thread.current -> thread
2512  *
2513  * Returns the currently executing thread.
2514  *
2515  * Thread.current #=> #<Thread:0x401bdf4c run>
2516  */
2517 
2518 static VALUE
2520 {
2521  return rb_thread_current();
2522 }
2523 
2524 VALUE
2526 {
2527  return GET_THREAD()->vm->main_thread->self;
2528 }
2529 
2530 /*
2531  * call-seq:
2532  * Thread.main -> thread
2533  *
2534  * Returns the main thread.
2535  */
2536 
2537 static VALUE
2539 {
2540  return rb_thread_main();
2541 }
2542 
2543 
2544 /*
2545  * call-seq:
2546  * Thread.abort_on_exception -> true or false
2547  *
2548  * Returns the status of the global ``abort on exception'' condition.
2549  *
2550  * The default is +false+.
2551  *
2552  * When set to +true+, if any thread is aborted by an exception, the
2553  * raised exception will be re-raised in the main thread.
2554  *
2555  * Can also be specified by the global $DEBUG flag or command line option
2556  * +-d+.
2557  *
2558  * See also ::abort_on_exception=.
2559  *
2560  * There is also an instance level method to set this for a specific thread,
2561  * see #abort_on_exception.
2562  */
2563 
2564 static VALUE
2566 {
2567  return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
2568 }
2569 
2570 
2571 /*
2572  * call-seq:
2573  * Thread.abort_on_exception= boolean -> true or false
2574  *
2575  * When set to +true+, if any thread is aborted by an exception, the
2576  * raised exception will be re-raised in the main thread.
2577  * Returns the new state.
2578  *
2579  * Thread.abort_on_exception = true
2580  * t1 = Thread.new do
2581  * puts "In new thread"
2582  * raise "Exception from thread"
2583  * end
2584  * sleep(1)
2585  * puts "not reached"
2586  *
2587  * This will produce:
2588  *
2589  * In new thread
2590  * prog.rb:4: Exception from thread (RuntimeError)
2591  * from prog.rb:2:in `initialize'
2592  * from prog.rb:2:in `new'
2593  * from prog.rb:2
2594  *
2595  * See also ::abort_on_exception.
2596  *
2597  * There is also an instance level method to set this for a specific thread,
2598  * see #abort_on_exception=.
2599  */
2600 
2601 static VALUE
2603 {
2604  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
2605  return val;
2606 }
2607 
2608 
2609 /*
2610  * call-seq:
2611  * thr.abort_on_exception -> true or false
2612  *
2613  * Returns the status of the thread-local ``abort on exception'' condition for
2614  * this +thr+.
2615  *
2616  * The default is +false+.
2617  *
2618  * See also #abort_on_exception=.
2619  *
2620  * There is also a class level method to set this for all threads, see
2621  * ::abort_on_exception.
2622  */
2623 
2624 static VALUE
2626 {
2627  rb_thread_t *th;
2628  GetThreadPtr(thread, th);
2629  return th->abort_on_exception ? Qtrue : Qfalse;
2630 }
2631 
2632 
2633 /*
2634  * call-seq:
2635  * thr.abort_on_exception= boolean -> true or false
2636  *
2637  * When set to +true+, if this +thr+ is aborted by an exception, the
2638  * raised exception will be re-raised in the main thread.
2639  *
2640  * See also #abort_on_exception.
2641  *
2642  * There is also a class level method to set this for all threads, see
2643  * ::abort_on_exception=.
2644  */
2645 
2646 static VALUE
2648 {
2649  rb_thread_t *th;
2650 
2651  GetThreadPtr(thread, th);
2652  th->abort_on_exception = RTEST(val);
2653  return val;
2654 }
2655 
2656 
2657 /*
2658  * call-seq:
2659  * Thread.report_on_exception -> true or false
2660  *
2661  * Returns the status of the global ``report on exception'' condition.
2662  *
2663  * The default is +false+.
2664  *
2665  * When set to +true+, all threads will report the exception if an
2666  * exception is raised in any thread.
2667  *
2668  * See also ::report_on_exception=.
2669  *
2670  * There is also an instance level method to set this for a specific thread,
2671  * see #report_on_exception.
2672  */
2673 
2674 static VALUE
2676 {
2677  return GET_THREAD()->vm->thread_report_on_exception ? Qtrue : Qfalse;
2678 }
2679 
2680 
2681 /*
2682  * call-seq:
2683  * Thread.report_on_exception= boolean -> true or false
2684  *
2685  * When set to +true+, all threads will report the exception if an
2686  * exception is raised. Returns the new state.
2687  *
2688  * Thread.report_on_exception = true
2689  * t1 = Thread.new do
2690  * puts "In new thread"
2691  * raise "Exception from thread"
2692  * end
2693  * sleep(1)
2694  * puts "In the main thread"
2695  *
2696  * This will produce:
2697  *
2698  * In new thread
2699  * prog.rb:4: Exception from thread (RuntimeError)
2700  * from prog.rb:2:in `initialize'
2701  * from prog.rb:2:in `new'
2702  * from prog.rb:2
2703  * In the main thread
2704  *
2705  * See also ::report_on_exception.
2706  *
2707  * There is also an instance level method to set this for a specific thread,
2708  * see #report_on_exception=.
2709  */
2710 
2711 static VALUE
2713 {
2714  GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
2715  return val;
2716 }
2717 
2718 
2719 /*
2720  * call-seq:
2721  * thr.report_on_exception -> true or false
2722  *
2723  * Returns the status of the thread-local ``report on exception'' condition for
2724  * this +thr+.
2725  *
2726  * The default is +false+.
2727  *
2728  * See also #report_on_exception=.
2729  *
2730  * There is also a class level method to set this for all threads, see
2731  * ::report_on_exception.
2732  */
2733 
2734 static VALUE
2736 {
2737  rb_thread_t *th;
2738  GetThreadPtr(thread, th);
2739  return th->report_on_exception ? Qtrue : Qfalse;
2740 }
2741 
2742 
2743 /*
2744  * call-seq:
2745  * thr.report_on_exception= boolean -> true or false
2746  *
2747  * When set to +true+, all threads (including the main program) will
2748  * report the exception if an exception is raised in this +thr+.
2749  *
2750  * See also #report_on_exception.
2751  *
2752  * There is also a class level method to set this for all threads, see
2753  * ::report_on_exception=.
2754  */
2755 
2756 static VALUE
2758 {
2759  rb_thread_t *th;
2760 
2761  GetThreadPtr(thread, th);
2762  th->report_on_exception = RTEST(val);
2763  return val;
2764 }
2765 
2766 
2767 /*
2768  * call-seq:
2769  * thr.group -> thgrp or nil
2770  *
2771  * Returns the ThreadGroup which contains the given thread, or returns +nil+
2772  * if +thr+ is not a member of any group.
2773  *
2774  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2775  */
2776 
2777 VALUE
2779 {
2780  rb_thread_t *th;
2781  VALUE group;
2782  GetThreadPtr(thread, th);
2783  group = th->thgroup;
2784 
2785  if (!group) {
2786  group = Qnil;
2787  }
2788  return group;
2789 }
2790 
2791 static const char *
2793 {
2794  switch (th->status) {
2795  case THREAD_RUNNABLE:
2796  if (th->to_kill)
2797  return "aborting";
2798  else
2799  return "run";
2801  if (detail) return "sleep_forever";
2802  case THREAD_STOPPED:
2803  return "sleep";
2804  case THREAD_KILLED:
2805  return "dead";
2806  default:
2807  return "unknown";
2808  }
2809 }
2810 
2811 static int
2813 {
2814  return th->status == THREAD_KILLED;
2815 }
2816 
2817 
2818 /*
2819  * call-seq:
2820  * thr.status -> string, false or nil
2821  *
2822  * Returns the status of +thr+.
2823  *
2824  * [<tt>"sleep"</tt>]
2825  * Returned if this thread is sleeping or waiting on I/O
2826  * [<tt>"run"</tt>]
2827  * When this thread is executing
2828  * [<tt>"aborting"</tt>]
2829  * If this thread is aborting
2830  * [+false+]
2831  * When this thread is terminated normally
2832  * [+nil+]
2833  * If terminated with an exception.
2834  *
2835  * a = Thread.new { raise("die now") }
2836  * b = Thread.new { Thread.stop }
2837  * c = Thread.new { Thread.exit }
2838  * d = Thread.new { sleep }
2839  * d.kill #=> #<Thread:0x401b3678 aborting>
2840  * a.status #=> nil
2841  * b.status #=> "sleep"
2842  * c.status #=> false
2843  * d.status #=> "aborting"
2844  * Thread.current.status #=> "run"
2845  *
2846  * See also the instance methods #alive? and #stop?
2847  */
2848 
2849 static VALUE
2851 {
2852  rb_thread_t *th;
2853  GetThreadPtr(thread, th);
2854 
2855  if (rb_threadptr_dead(th)) {
2856  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
2857  /* TODO */ ) {
2858  return Qnil;
2859  }
2860  return Qfalse;
2861  }
2862  return rb_str_new2(thread_status_name(th, FALSE));
2863 }
2864 
2865 
2866 /*
2867  * call-seq:
2868  * thr.alive? -> true or false
2869  *
2870  * Returns +true+ if +thr+ is running or sleeping.
2871  *
2872  * thr = Thread.new { }
2873  * thr.join #=> #<Thread:0x401b3fb0 dead>
2874  * Thread.current.alive? #=> true
2875  * thr.alive? #=> false
2876  *
2877  * See also #stop? and #status.
2878  */
2879 
2880 static VALUE
2882 {
2883  rb_thread_t *th;
2884  GetThreadPtr(thread, th);
2885 
2886  if (rb_threadptr_dead(th))
2887  return Qfalse;
2888  return Qtrue;
2889 }
2890 
2891 /*
2892  * call-seq:
2893  * thr.stop? -> true or false
2894  *
2895  * Returns +true+ if +thr+ is dead or sleeping.
2896  *
2897  * a = Thread.new { Thread.stop }
2898  * b = Thread.current
2899  * a.stop? #=> true
2900  * b.stop? #=> false
2901  *
2902  * See also #alive? and #status.
2903  */
2904 
2905 static VALUE
2907 {
2908  rb_thread_t *th;
2909  GetThreadPtr(thread, th);
2910 
2911  if (rb_threadptr_dead(th))
2912  return Qtrue;
2913  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2914  return Qtrue;
2915  return Qfalse;
2916 }
2917 
2918 /*
2919  * call-seq:
2920  * thr.safe_level -> integer
2921  *
2922  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2923  * levels can help when implementing sandboxes which run insecure code.
2924  *
2925  * thr = Thread.new { $SAFE = 1; sleep }
2926  * Thread.current.safe_level #=> 0
2927  * thr.safe_level #=> 1
2928  */
2929 
2930 static VALUE
2932 {
2933  rb_thread_t *th;
2934  GetThreadPtr(thread, th);
2935 
2936  return INT2NUM(th->safe_level);
2937 }
2938 
2939 /*
2940  * call-seq:
2941  * thr.name -> string
2942  *
2943  * show the name of the thread.
2944  */
2945 
2946 static VALUE
2948 {
2949  rb_thread_t *th;
2950  GetThreadPtr(thread, th);
2951  return th->name;
2952 }
2953 
2954 /*
2955  * call-seq:
2956  * thr.name=(name) -> string
2957  *
2958  * set given name to the ruby thread.
2959  * On some platform, it may set the name to pthread and/or kernel.
2960  */
2961 
2962 static VALUE
2964 {
2965 #ifdef SET_ANOTHER_THREAD_NAME
2966  const char *s = "";
2967 #endif
2968  rb_thread_t *th;
2969  GetThreadPtr(thread, th);
2970  if (!NIL_P(name)) {
2971  rb_encoding *enc;
2972  StringValueCStr(name);
2973  enc = rb_enc_get(name);
2974  if (!rb_enc_asciicompat(enc)) {
2975  rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
2976  rb_enc_name(enc));
2977  }
2978  name = rb_str_new_frozen(name);
2979 #ifdef SET_ANOTHER_THREAD_NAME
2980  s = RSTRING_PTR(name);
2981 #endif
2982  }
2983  th->name = name;
2984 #if defined(SET_ANOTHER_THREAD_NAME)
2985  if (threadptr_initialized(th)) {
2986  SET_ANOTHER_THREAD_NAME(th->thread_id, s);
2987  }
2988 #endif
2989  return name;
2990 }
2991 
2992 /*
2993  * call-seq:
2994  * thr.inspect -> string
2995  *
2996  * Dump the name, id, and status of _thr_ to a string.
2997  */
2998 
2999 static VALUE
3001 {
3002  VALUE cname = rb_class_path(rb_obj_class(thread));
3003  rb_thread_t *th;
3004  const char *status;
3005  VALUE str;
3006 
3007  GetThreadPtr(thread, th);
3008  status = thread_status_name(th, TRUE);
3009  str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3010  if (!NIL_P(th->name)) {
3011  rb_str_catf(str, "@%"PRIsVALUE, th->name);
3012  }
3013  if (!th->first_func && th->first_proc) {
3014  VALUE loc = rb_proc_location(th->first_proc);
3015  if (!NIL_P(loc)) {
3016  const VALUE *ptr = RARRAY_CONST_PTR(loc);
3017  rb_str_catf(str, "@%"PRIsVALUE":%"PRIsVALUE, ptr[0], ptr[1]);
3018  rb_gc_force_recycle(loc);
3019  }
3020  }
3021  rb_str_catf(str, " %s>", status);
3022  OBJ_INFECT(str, thread);
3023 
3024  return str;
3025 }
3026 
3027 /* variables for recursive traversals */
3029 
3030 static VALUE
3032 {
3033  if (id == recursive_key) {
3034  return th->local_storage_recursive_hash;
3035  }
3036  else {
3037  st_data_t val;
3038 
3039  if (th->local_storage && st_lookup(th->local_storage, id, &val)) {
3040  return (VALUE)val;
3041  }
3042  else {
3043  return Qnil;
3044  }
3045  }
3046 }
3047 
3048 VALUE
3050 {
3051  rb_thread_t *th;
3052  GetThreadPtr(thread, th);
3053  return threadptr_local_aref(th, id);
3054 }
3055 
3056 /*
3057  * call-seq:
3058  * thr[sym] -> obj or nil
3059  *
3060  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3061  * if not explicitly inside a Fiber), using either a symbol or a string name.
3062  * If the specified variable does not exist, returns +nil+.
3063  *
3064  * [
3065  * Thread.new { Thread.current["name"] = "A" },
3066  * Thread.new { Thread.current[:name] = "B" },
3067  * Thread.new { Thread.current["name"] = "C" }
3068  * ].each do |th|
3069  * th.join
3070  * puts "#{th.inspect}: #{th[:name]}"
3071  * end
3072  *
3073  * This will produce:
3074  *
3075  * #<Thread:0x00000002a54220 dead>: A
3076  * #<Thread:0x00000002a541a8 dead>: B
3077  * #<Thread:0x00000002a54130 dead>: C
3078  *
3079  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3080  * This confusion did not exist in Ruby 1.8 because
3081  * fibers are only available since Ruby 1.9.
3082  * Ruby 1.9 chooses that the methods behaves fiber-local to save
3083  * following idiom for dynamic scope.
3084  *
3085  * def meth(newvalue)
3086  * begin
3087  * oldvalue = Thread.current[:name]
3088  * Thread.current[:name] = newvalue
3089  * yield
3090  * ensure
3091  * Thread.current[:name] = oldvalue
3092  * end
3093  * end
3094  *
3095  * The idiom may not work as dynamic scope if the methods are thread-local
3096  * and a given block switches fiber.
3097  *
3098  * f = Fiber.new {
3099  * meth(1) {
3100  * Fiber.yield
3101  * }
3102  * }
3103  * meth(2) {
3104  * f.resume
3105  * }
3106  * f.resume
3107  * p Thread.current[:name]
3108  * #=> nil if fiber-local
3109  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3110  *
3111  * For thread-local variables, please see #thread_variable_get and
3112  * #thread_variable_set.
3113  *
3114  */
3115 
3116 static VALUE
3118 {
3119  ID id = rb_check_id(&key);
3120  if (!id) return Qnil;
3121  return rb_thread_local_aref(thread, id);
3122 }
3123 
3124 static VALUE
3126 {
3127  if (id == recursive_key) {
3129  return val;
3130  }
3131  else if (NIL_P(val)) {
3132  if (!th->local_storage) return Qnil;
3133  st_delete_wrap(th->local_storage, id);
3134  return Qnil;
3135  }
3136  else {
3137  if (!th->local_storage) {
3139  }
3140  st_insert(th->local_storage, id, val);
3141  return val;
3142  }
3143 }
3144 
3145 VALUE
3147 {
3148  rb_thread_t *th;
3149  GetThreadPtr(thread, th);
3150 
3151  if (OBJ_FROZEN(thread)) {
3152  rb_error_frozen("thread locals");
3153  }
3154 
3155  return threadptr_local_aset(th, id, val);
3156 }
3157 
3158 /*
3159  * call-seq:
3160  * thr[sym] = obj -> obj
3161  *
3162  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3163  * using either a symbol or a string.
3164  *
3165  * See also Thread#[].
3166  *
3167  * For thread-local variables, please see #thread_variable_set and
3168  * #thread_variable_get.
3169  */
3170 
3171 static VALUE
3173 {
3174  return rb_thread_local_aset(self, rb_to_id(id), val);
3175 }
3176 
3177 /*
3178  * call-seq:
3179  * thr.thread_variable_get(key) -> obj or nil
3180  *
3181  * Returns the value of a thread local variable that has been set. Note that
3182  * these are different than fiber local values. For fiber local values,
3183  * please see Thread#[] and Thread#[]=.
3184  *
3185  * Thread local values are carried along with threads, and do not respect
3186  * fibers. For example:
3187  *
3188  * Thread.new {
3189  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3190  * Thread.current["foo"] = "bar" # set a fiber local
3191  *
3192  * Fiber.new {
3193  * Fiber.yield [
3194  * Thread.current.thread_variable_get("foo"), # get the thread local
3195  * Thread.current["foo"], # get the fiber local
3196  * ]
3197  * }.resume
3198  * }.join.value # => ['bar', nil]
3199  *
3200  * The value "bar" is returned for the thread local, where nil is returned
3201  * for the fiber local. The fiber is executed in the same thread, so the
3202  * thread local values are available.
3203  */
3204 
3205 static VALUE
3207 {
3208  VALUE locals;
3209 
3210  locals = rb_ivar_get(thread, id_locals);
3211  return rb_hash_aref(locals, rb_to_symbol(key));
3212 }
3213 
3214 /*
3215  * call-seq:
3216  * thr.thread_variable_set(key, value)
3217  *
3218  * Sets a thread local with +key+ to +value+. Note that these are local to
3219  * threads, and not to fibers. Please see Thread#thread_variable_get and
3220  * Thread#[] for more information.
3221  */
3222 
3223 static VALUE
3225 {
3226  VALUE locals;
3227 
3228  if (OBJ_FROZEN(thread)) {
3229  rb_error_frozen("thread locals");
3230  }
3231 
3232  locals = rb_ivar_get(thread, id_locals);
3233  return rb_hash_aset(locals, rb_to_symbol(id), val);
3234 }
3235 
3236 /*
3237  * call-seq:
3238  * thr.key?(sym) -> true or false
3239  *
3240  * Returns +true+ if the given string (or symbol) exists as a fiber-local
3241  * variable.
3242  *
3243  * me = Thread.current
3244  * me[:oliver] = "a"
3245  * me.key?(:oliver) #=> true
3246  * me.key?(:stanley) #=> false
3247  */
3248 
3249 static VALUE
3251 {
3252  rb_thread_t *th;
3253  ID id = rb_check_id(&key);
3254 
3255  GetThreadPtr(self, th);
3256 
3257  if (!id || !th->local_storage) {
3258  return Qfalse;
3259  }
3260  if (st_lookup(th->local_storage, id, 0)) {
3261  return Qtrue;
3262  }
3263  return Qfalse;
3264 }
3265 
3266 static int
3268 {
3269  rb_ary_push(ary, ID2SYM(key));
3270  return ST_CONTINUE;
3271 }
3272 
3273 int
3275 {
3276  return vm_living_thread_num(GET_VM()) == 1;
3277 }
3278 
3279 /*
3280  * call-seq:
3281  * thr.keys -> array
3282  *
3283  * Returns an array of the names of the fiber-local variables (as Symbols).
3284  *
3285  * thr = Thread.new do
3286  * Thread.current[:cat] = 'meow'
3287  * Thread.current["dog"] = 'woof'
3288  * end
3289  * thr.join #=> #<Thread:0x401b3f10 dead>
3290  * thr.keys #=> [:dog, :cat]
3291  */
3292 
3293 static VALUE
3295 {
3296  rb_thread_t *th;
3297  VALUE ary = rb_ary_new();
3298  GetThreadPtr(self, th);
3299 
3300  if (th->local_storage) {
3302  }
3303  return ary;
3304 }
3305 
3306 static int
3308 {
3309  rb_ary_push(ary, key);
3310  return ST_CONTINUE;
3311 }
3312 
3313 /*
3314  * call-seq:
3315  * thr.thread_variables -> array
3316  *
3317  * Returns an array of the names of the thread-local variables (as Symbols).
3318  *
3319  * thr = Thread.new do
3320  * Thread.current.thread_variable_set(:cat, 'meow')
3321  * Thread.current.thread_variable_set("dog", 'woof')
3322  * end
3323  * thr.join #=> #<Thread:0x401b3f10 dead>
3324  * thr.thread_variables #=> [:dog, :cat]
3325  *
3326  * Note that these are not fiber local variables. Please see Thread#[] and
3327  * Thread#thread_variable_get for more details.
3328  */
3329 
3330 static VALUE
3332 {
3333  VALUE locals;
3334  VALUE ary;
3335 
3336  locals = rb_ivar_get(thread, id_locals);
3337  ary = rb_ary_new();
3338  rb_hash_foreach(locals, keys_i, ary);
3339 
3340  return ary;
3341 }
3342 
3343 /*
3344  * call-seq:
3345  * thr.thread_variable?(key) -> true or false
3346  *
3347  * Returns +true+ if the given string (or symbol) exists as a thread-local
3348  * variable.
3349  *
3350  * me = Thread.current
3351  * me.thread_variable_set(:oliver, "a")
3352  * me.thread_variable?(:oliver) #=> true
3353  * me.thread_variable?(:stanley) #=> false
3354  *
3355  * Note that these are not fiber local variables. Please see Thread#[] and
3356  * Thread#thread_variable_get for more details.
3357  */
3358 
3359 static VALUE
3361 {
3362  VALUE locals;
3363  ID id = rb_check_id(&key);
3364 
3365  if (!id) return Qfalse;
3366 
3367  locals = rb_ivar_get(thread, id_locals);
3368 
3369  if (!RHASH(locals)->ntbl)
3370  return Qfalse;
3371 
3372  if (st_lookup(RHASH(locals)->ntbl, ID2SYM(id), 0)) {
3373  return Qtrue;
3374  }
3375 
3376  return Qfalse;
3377 }
3378 
3379 /*
3380  * call-seq:
3381  * thr.priority -> integer
3382  *
3383  * Returns the priority of <i>thr</i>. Default is inherited from the
3384  * current thread which creating the new thread, or zero for the
3385  * initial main thread; higher-priority thread will run more frequently
3386  * than lower-priority threads (but lower-priority threads can also run).
3387  *
3388  * This is just hint for Ruby thread scheduler. It may be ignored on some
3389  * platform.
3390  *
3391  * Thread.current.priority #=> 0
3392  */
3393 
3394 static VALUE
3396 {
3397  rb_thread_t *th;
3398  GetThreadPtr(thread, th);
3399  return INT2NUM(th->priority);
3400 }
3401 
3402 
3403 /*
3404  * call-seq:
3405  * thr.priority= integer -> thr
3406  *
3407  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3408  * will run more frequently than lower-priority threads (but lower-priority
3409  * threads can also run).
3410  *
3411  * This is just hint for Ruby thread scheduler. It may be ignored on some
3412  * platform.
3413  *
3414  * count1 = count2 = 0
3415  * a = Thread.new do
3416  * loop { count1 += 1 }
3417  * end
3418  * a.priority = -1
3419  *
3420  * b = Thread.new do
3421  * loop { count2 += 1 }
3422  * end
3423  * b.priority = -2
3424  * sleep 1 #=> 1
3425  * count1 #=> 622504
3426  * count2 #=> 5832
3427  */
3428 
3429 static VALUE
3431 {
3432  rb_thread_t *th;
3433  int priority;
3434  GetThreadPtr(thread, th);
3435 
3436 
3437 #if USE_NATIVE_THREAD_PRIORITY
3438  th->priority = NUM2INT(prio);
3439  native_thread_apply_priority(th);
3440 #else
3441  priority = NUM2INT(prio);
3442  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3443  priority = RUBY_THREAD_PRIORITY_MAX;
3444  }
3445  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3446  priority = RUBY_THREAD_PRIORITY_MIN;
3447  }
3448  th->priority = priority;
3449 #endif
3450  return INT2NUM(th->priority);
3451 }
3452 
3453 /* for IO */
3454 
3455 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3456 
3457 /*
3458  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3459  * in select(2) system call.
3460  *
3461  * - Linux 2.2.12 (?)
3462  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3463  * select(2) documents how to allocate fd_set dynamically.
3464  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3465  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3466  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3467  * select(2) documents how to allocate fd_set dynamically.
3468  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3469  * - HP-UX documents how to allocate fd_set dynamically.
3470  * http://docs.hp.com/en/B2355-60105/select.2.html
3471  * - Solaris 8 has select_large_fdset
3472  * - Mac OS X 10.7 (Lion)
3473  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3474  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3475  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3476  *
3477  * When fd_set is not big enough to hold big file descriptors,
3478  * it should be allocated dynamically.
3479  * Note that this assumes fd_set is structured as bitmap.
3480  *
3481  * rb_fd_init allocates the memory.
3482  * rb_fd_term free the memory.
3483  * rb_fd_set may re-allocates bitmap.
3484  *
3485  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3486  */
3487 
3488 void
3489 rb_fd_init(rb_fdset_t *fds)
3490 {
3491  fds->maxfd = 0;
3492  fds->fdset = ALLOC(fd_set);
3493  FD_ZERO(fds->fdset);
3494 }
3495 
3496 void
3498 {
3499  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3500 
3501  if (size < sizeof(fd_set))
3502  size = sizeof(fd_set);
3503  dst->maxfd = src->maxfd;
3504  dst->fdset = xmalloc(size);
3505  memcpy(dst->fdset, src->fdset, size);
3506 }
3507 
3508 void
3509 rb_fd_term(rb_fdset_t *fds)
3510 {
3511  if (fds->fdset) xfree(fds->fdset);
3512  fds->maxfd = 0;
3513  fds->fdset = 0;
3514 }
3515 
3516 void
3517 rb_fd_zero(rb_fdset_t *fds)
3518 {
3519  if (fds->fdset)
3520  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3521 }
3522 
3523 static void
3524 rb_fd_resize(int n, rb_fdset_t *fds)
3525 {
3526  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3527  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3528 
3529  if (m < sizeof(fd_set)) m = sizeof(fd_set);
3530  if (o < sizeof(fd_set)) o = sizeof(fd_set);
3531 
3532  if (m > o) {
3533  fds->fdset = xrealloc(fds->fdset, m);
3534  memset((char *)fds->fdset + o, 0, m - o);
3535  }
3536  if (n >= fds->maxfd) fds->maxfd = n + 1;
3537 }
3538 
3539 void
3540 rb_fd_set(int n, rb_fdset_t *fds)
3541 {
3542  rb_fd_resize(n, fds);
3543  FD_SET(n, fds->fdset);
3544 }
3545 
3546 void
3547 rb_fd_clr(int n, rb_fdset_t *fds)
3548 {
3549  if (n >= fds->maxfd) return;
3550  FD_CLR(n, fds->fdset);
3551 }
3552 
3553 int
3554 rb_fd_isset(int n, const rb_fdset_t *fds)
3555 {
3556  if (n >= fds->maxfd) return 0;
3557  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3558 }
3559 
3560 void
3561 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3562 {
3563  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3564 
3565  if (size < sizeof(fd_set)) size = sizeof(fd_set);
3566  dst->maxfd = max;
3567  dst->fdset = xrealloc(dst->fdset, size);
3568  memcpy(dst->fdset, src, size);
3569 }
3570 
3571 void
3572 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3573 {
3574  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3575 
3576  if (size < sizeof(fd_set))
3577  size = sizeof(fd_set);
3578  dst->maxfd = src->maxfd;
3579  dst->fdset = xrealloc(dst->fdset, size);
3580  memcpy(dst->fdset, src->fdset, size);
3581 }
3582 
3583 #ifdef __native_client__
3584 int select(int nfds, fd_set *readfds, fd_set *writefds,
3585  fd_set *exceptfds, struct timeval *timeout);
3586 #endif
3587 
3588 int
3589 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3590 {
3591  fd_set *r = NULL, *w = NULL, *e = NULL;
3592  if (readfds) {
3593  rb_fd_resize(n - 1, readfds);
3594  r = rb_fd_ptr(readfds);
3595  }
3596  if (writefds) {
3597  rb_fd_resize(n - 1, writefds);
3598  w = rb_fd_ptr(writefds);
3599  }
3600  if (exceptfds) {
3601  rb_fd_resize(n - 1, exceptfds);
3602  e = rb_fd_ptr(exceptfds);
3603  }
3604  return select(n, r, w, e, timeout);
3605 }
3606 
3607 #if defined __GNUC__ && __GNUC__ >= 6
3608 #define rb_fd_no_init(fds) ASSUME(!(fds)->maxfd)
3609 #endif
3610 
3611 #undef FD_ZERO
3612 #undef FD_SET
3613 #undef FD_CLR
3614 #undef FD_ISSET
3615 
3616 #define FD_ZERO(f) rb_fd_zero(f)
3617 #define FD_SET(i, f) rb_fd_set((i), (f))
3618 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3619 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3620 
3621 #elif defined(_WIN32)
3622 
3623 void
3624 rb_fd_init(rb_fdset_t *set)
3625 {
3626  set->capa = FD_SETSIZE;
3627  set->fdset = ALLOC(fd_set);
3628  FD_ZERO(set->fdset);
3629 }
3630 
3631 void
3633 {
3634  rb_fd_init(dst);
3635  rb_fd_dup(dst, src);
3636 }
3637 
3638 void
3639 rb_fd_term(rb_fdset_t *set)
3640 {
3641  xfree(set->fdset);
3642  set->fdset = NULL;
3643  set->capa = 0;
3644 }
3645 
3646 void
3647 rb_fd_set(int fd, rb_fdset_t *set)
3648 {
3649  unsigned int i;
3650  SOCKET s = rb_w32_get_osfhandle(fd);
3651 
3652  for (i = 0; i < set->fdset->fd_count; i++) {
3653  if (set->fdset->fd_array[i] == s) {
3654  return;
3655  }
3656  }
3657  if (set->fdset->fd_count >= (unsigned)set->capa) {
3658  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3659  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
3660  }
3661  set->fdset->fd_array[set->fdset->fd_count++] = s;
3662 }
3663 
3664 #undef FD_ZERO
3665 #undef FD_SET
3666 #undef FD_CLR
3667 #undef FD_ISSET
3668 
3669 #define FD_ZERO(f) rb_fd_zero(f)
3670 #define FD_SET(i, f) rb_fd_set((i), (f))
3671 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3672 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3673 
3674 #endif
3675 
3676 #ifndef rb_fd_no_init
3677 #define rb_fd_no_init(fds) (void)(fds)
3678 #endif
3679 
3680 static inline int
3682 {
3683  if (e == EINTR) return TRUE;
3684 #ifdef ERESTART
3685  if (e == ERESTART) return TRUE;
3686 #endif
3687  return FALSE;
3688 }
3689 
3690 #define restore_fdset(fds1, fds2) \
3691  ((fds1) ? rb_fd_dup(fds1, fds2) : (void)0)
3692 
3693 static inline void
3694 update_timeval(struct timeval *timeout, double limit)
3695 {
3696  if (timeout) {
3697  double d = limit - timeofday();
3698 
3699  timeout->tv_sec = (time_t)d;
3700  timeout->tv_usec = (int)((d-(double)timeout->tv_sec)*1e6);
3701  if (timeout->tv_sec < 0) timeout->tv_sec = 0;
3702  if (timeout->tv_usec < 0) timeout->tv_usec = 0;
3703  }
3704 }
3705 
3706 static int
3707 do_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds,
3708  rb_fdset_t *exceptfds, struct timeval *timeout)
3709 {
3710  int MAYBE_UNUSED(result);
3711  int lerrno;
3712  rb_fdset_t MAYBE_UNUSED(orig_read);
3713  rb_fdset_t MAYBE_UNUSED(orig_write);
3714  rb_fdset_t MAYBE_UNUSED(orig_except);
3715  double limit = 0;
3716  struct timeval wait_rest;
3717  rb_thread_t *th = GET_THREAD();
3718 
3719 #define do_select_update() \
3720  (restore_fdset(readfds, &orig_read), \
3721  restore_fdset(writefds, &orig_write), \
3722  restore_fdset(exceptfds, &orig_except), \
3723  update_timeval(timeout, limit), \
3724  TRUE)
3725 
3726  if (timeout) {
3727  limit = timeofday();
3728  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
3729  wait_rest = *timeout;
3730  timeout = &wait_rest;
3731  }
3732 
3733 #define fd_init_copy(f) \
3734  (f##fds) ? rb_fd_init_copy(&orig_##f, f##fds) : rb_fd_no_init(&orig_##f)
3735  fd_init_copy(read);
3736  fd_init_copy(write);
3737  fd_init_copy(except);
3738 #undef fd_init_copy
3739 
3740  do {
3741  lerrno = 0;
3742 
3743  BLOCKING_REGION({
3744  result = native_fd_select(n, readfds, writefds, exceptfds,
3745  timeout, th);
3746  if (result < 0) lerrno = errno;
3747  }, ubf_select, th, FALSE);
3748 
3750  } while (result < 0 && retryable(errno = lerrno) && do_select_update());
3751 
3752 #define fd_term(f) if (f##fds) rb_fd_term(&orig_##f)
3753  fd_term(read);
3754  fd_term(write);
3755  fd_term(except);
3756 #undef fd_term
3757 
3758  return result;
3759 }
3760 
3761 static void
3762 rb_thread_wait_fd_rw(int fd, int read)
3763 {
3764  int result = 0;
3765  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
3766 
3767  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
3768 
3769  if (fd < 0) {
3770  rb_raise(rb_eIOError, "closed stream");
3771  }
3772 
3773  result = rb_wait_for_single_fd(fd, events, NULL);
3774  if (result < 0) {
3775  rb_sys_fail(0);
3776  }
3777 
3778  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
3779 }
3780 
3781 void
3783 {
3784  rb_thread_wait_fd_rw(fd, 1);
3785 }
3786 
3787 int
3789 {
3790  rb_thread_wait_fd_rw(fd, 0);
3791  return TRUE;
3792 }
3793 
3794 int
3795 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
3796  struct timeval *timeout)
3797 {
3798  if (!read && !write && !except) {
3799  if (!timeout) {
3801  return 0;
3802  }
3803  rb_thread_wait_for(*timeout);
3804  return 0;
3805  }
3806 
3807  if (read) {
3808  rb_fd_resize(max - 1, read);
3809  }
3810  if (write) {
3811  rb_fd_resize(max - 1, write);
3812  }
3813  if (except) {
3814  rb_fd_resize(max - 1, except);
3815  }
3816  return do_select(max, read, write, except, timeout);
3817 }
3818 
3819 /*
3820  * poll() is supported by many OSes, but so far Linux is the only
3821  * one we know of that supports using poll() in all places select()
3822  * would work.
3823  */
3824 #if defined(HAVE_POLL) && defined(__linux__)
3825 # define USE_POLL
3826 #endif
3827 
3828 #ifdef USE_POLL
3829 
3830 /* The same with linux kernel. TODO: make platform independent definition. */
3831 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
3832 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
3833 #define POLLEX_SET (POLLPRI)
3834 
3835 #ifndef HAVE_PPOLL
3836 /* TODO: don't ignore sigmask */
3837 int
3838 ppoll(struct pollfd *fds, nfds_t nfds,
3839  const struct timespec *ts, const sigset_t *sigmask)
3840 {
3841  int timeout_ms;
3842 
3843  if (ts) {
3844  int tmp, tmp2;
3845 
3846  if (ts->tv_sec > INT_MAX/1000)
3847  timeout_ms = -1;
3848  else {
3849  tmp = (int)(ts->tv_sec * 1000);
3850  tmp2 = (int)(ts->tv_nsec / (1000 * 1000));
3851  if (INT_MAX - tmp < tmp2)
3852  timeout_ms = -1;
3853  else
3854  timeout_ms = (int)(tmp + tmp2);
3855  }
3856  }
3857  else
3858  timeout_ms = -1;
3859 
3860  return poll(fds, nfds, timeout_ms);
3861 }
3862 #endif
3863 
3864 static inline void
3865 update_timespec(struct timespec *timeout, double limit)
3866 {
3867  if (timeout) {
3868  double d = limit - timeofday();
3869 
3870  timeout->tv_sec = (long)d;
3871  timeout->tv_nsec = (long)((d-(double)timeout->tv_sec)*1e9);
3872  if (timeout->tv_sec < 0) timeout->tv_sec = 0;
3873  if (timeout->tv_nsec < 0) timeout->tv_nsec = 0;
3874  }
3875 }
3876 
3877 /*
3878  * returns a mask of events
3879  */
3880 int
3881 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3882 {
3883  struct pollfd fds;
3884  int result = 0, lerrno;
3885  double limit = 0;
3886  struct timespec ts;
3887  struct timespec *timeout = NULL;
3888  rb_thread_t *th = GET_THREAD();
3889 
3890 #define poll_update() \
3891  (update_timespec(timeout, limit), \
3892  TRUE)
3893 
3894  if (tv) {
3895  ts.tv_sec = tv->tv_sec;
3896  ts.tv_nsec = tv->tv_usec * 1000;
3897  limit = timeofday();
3898  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
3899  timeout = &ts;
3900  }
3901 
3902  fds.fd = fd;
3903  fds.events = (short)events;
3904 
3905  do {
3906  fds.revents = 0;
3907  lerrno = 0;
3908  BLOCKING_REGION({
3909  result = ppoll(&fds, 1, timeout, NULL);
3910  if (result < 0) lerrno = errno;
3911  }, ubf_select, th, FALSE);
3912 
3914  } while (result < 0 && retryable(errno = lerrno) && poll_update());
3915  if (result < 0) return -1;
3916 
3917  if (fds.revents & POLLNVAL) {
3918  errno = EBADF;
3919  return -1;
3920  }
3921 
3922  /*
3923  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
3924  * Therefore we need to fix it up.
3925  */
3926  result = 0;
3927  if (fds.revents & POLLIN_SET)
3928  result |= RB_WAITFD_IN;
3929  if (fds.revents & POLLOUT_SET)
3930  result |= RB_WAITFD_OUT;
3931  if (fds.revents & POLLEX_SET)
3932  result |= RB_WAITFD_PRI;
3933 
3934  return result;
3935 }
3936 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
3937 static rb_fdset_t *
3939 {
3940  rb_fd_init(fds);
3941  rb_fd_set(fd, fds);
3942 
3943  return fds;
3944 }
3945 
3946 struct select_args {
3947  union {
3948  int fd;
3949  int error;
3950  } as;
3954  struct timeval *tv;
3955 };
3956 
3957 static VALUE
3959 {
3960  struct select_args *args = (struct select_args *)ptr;
3961  int r;
3962 
3963  r = rb_thread_fd_select(args->as.fd + 1,
3964  args->read, args->write, args->except, args->tv);
3965  if (r == -1)
3966  args->as.error = errno;
3967  if (r > 0) {
3968  r = 0;
3969  if (args->read && rb_fd_isset(args->as.fd, args->read))
3970  r |= RB_WAITFD_IN;
3971  if (args->write && rb_fd_isset(args->as.fd, args->write))
3972  r |= RB_WAITFD_OUT;
3973  if (args->except && rb_fd_isset(args->as.fd, args->except))
3974  r |= RB_WAITFD_PRI;
3975  }
3976  return (VALUE)r;
3977 }
3978 
3979 static VALUE
3981 {
3982  struct select_args *args = (struct select_args *)ptr;
3983 
3984  if (args->read) rb_fd_term(args->read);
3985  if (args->write) rb_fd_term(args->write);
3986  if (args->except) rb_fd_term(args->except);
3987 
3988  return (VALUE)-1;
3989 }
3990 
3991 int
3992 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3993 {
3994  rb_fdset_t rfds, wfds, efds;
3995  struct select_args args;
3996  int r;
3997  VALUE ptr = (VALUE)&args;
3998 
3999  args.as.fd = fd;
4000  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4001  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4002  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4003  args.tv = tv;
4004 
4005  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4006  if (r == -1)
4007  errno = args.as.error;
4008 
4009  return r;
4010 }
4011 #endif /* ! USE_POLL */
4012 
4013 /*
4014  * for GC
4015  */
4016 
4017 #ifdef USE_CONSERVATIVE_STACK_END
4018 void
4020 {
4021  VALUE stack_end;
4022  *stack_end_p = &stack_end;
4023 }
4024 #endif
4025 
4026 
4027 /*
4028  *
4029  */
4030 
4031 void
4033 {
4034  /* mth must be main_thread */
4035  if (rb_signal_buff_size() > 0) {
4036  /* wakeup main thread */
4038  }
4039 }
4040 
4041 static void
4043 {
4044  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
4045 
4046  /*
4047  * Tricky: thread_destruct_lock doesn't close a race against
4048  * vm->running_thread switch. however it guarantees th->running_thread
4049  * point to valid pointer or NULL.
4050  */
4051  native_mutex_lock(&vm->thread_destruct_lock);
4052  /* for time slice */
4053  if (vm->running_thread)
4055  native_mutex_unlock(&vm->thread_destruct_lock);
4056 
4057  /* check signal */
4059 
4060 #if 0
4061  /* prove profiler */
4062  if (vm->prove_profile.enable) {
4063  rb_thread_t *th = vm->running_thread;
4064 
4065  if (vm->during_gc) {
4066  /* GC prove profiling */
4067  }
4068  }
4069 #endif
4070 }
4071 
4072 void
4074 {
4075  if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4076  native_reset_timer_thread();
4077  }
4078 }
4079 
4080 void
4082 {
4083  native_reset_timer_thread();
4084 }
4085 
4086 void
4088 {
4089  system_working = 1;
4090  rb_thread_create_timer_thread();
4091 }
4092 
4093 #if defined(HAVE_WORKING_FORK)
4094 static int
4095 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4096 {
4097  int i;
4098  VALUE lines = (VALUE)val;
4099 
4100  for (i = 0; i < RARRAY_LEN(lines); i++) {
4101  if (RARRAY_AREF(lines, i) != Qnil) {
4102  RARRAY_ASET(lines, i, INT2FIX(0));
4103  }
4104  }
4105  return ST_CONTINUE;
4106 }
4107 
4108 static void
4109 clear_coverage(void)
4110 {
4111  VALUE coverages = rb_get_coverages();
4112  if (RTEST(coverages)) {
4113  st_foreach(rb_hash_tbl_raw(coverages), clear_coverage_i, 0);
4114  }
4115 }
4116 
4117 static void
4118 rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4119 {
4120  rb_thread_t *i = 0;
4121  rb_vm_t *vm = th->vm;
4122  vm->main_thread = th;
4123 
4124  gvl_atfork(th->vm);
4125 
4126  list_for_each(&vm->living_threads, i, vmlt_node) {
4127  atfork(i, th);
4128  }
4131  vm->sleeper = 0;
4132  clear_coverage();
4133 }
4134 
4135 static void
4136 terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4137 {
4138  if (th != current_th) {
4139  rb_mutex_abandon_keeping_mutexes(th);
4140  rb_mutex_abandon_locking_mutex(th);
4142  }
4143 }
4144 
4145 void
4146 rb_thread_atfork(void)
4147 {
4148  rb_thread_t *th = GET_THREAD();
4149  rb_thread_atfork_internal(th, terminate_atfork_i);
4150  th->join_list = NULL;
4151 
4152  /* We don't want reproduce CVE-2003-0900. */
4154 }
4155 
4156 static void
4157 terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4158 {
4159  if (th != current_th) {
4161  }
4162 }
4163 
4164 void
4166 {
4167  rb_thread_t *th = GET_THREAD();
4168  rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4169 }
4170 #else
4171 void
4173 {
4174 }
4175 
4176 void
4178 {
4179 }
4180 #endif
4181 
4182 struct thgroup {
4185 };
4186 
4187 static size_t
4188 thgroup_memsize(const void *ptr)
4189 {
4190  return sizeof(struct thgroup);
4191 }
4192 
4194  "thgroup",
4197 };
4198 
4199 /*
4200  * Document-class: ThreadGroup
4201  *
4202  * ThreadGroup provides a means of keeping track of a number of threads as a
4203  * group.
4204  *
4205  * A given Thread object can only belong to one ThreadGroup at a time; adding
4206  * a thread to a new group will remove it from any previous group.
4207  *
4208  * Newly created threads belong to the same group as the thread from which they
4209  * were created.
4210  */
4211 
4212 /*
4213  * Document-const: Default
4214  *
4215  * The default ThreadGroup created when Ruby starts; all Threads belong to it
4216  * by default.
4217  */
4218 static VALUE
4220 {
4221  VALUE group;
4222  struct thgroup *data;
4223 
4224  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4225  data->enclosed = 0;
4226  data->group = group;
4227 
4228  return group;
4229 }
4230 
4231 /*
4232  * call-seq:
4233  * thgrp.list -> array
4234  *
4235  * Returns an array of all existing Thread objects that belong to this group.
4236  *
4237  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4238  */
4239 
4240 static VALUE
4242 {
4243  VALUE ary = rb_ary_new();
4244  rb_vm_t *vm = GET_THREAD()->vm;
4245  rb_thread_t *th = 0;
4246 
4247  list_for_each(&vm->living_threads, th, vmlt_node) {
4248  if (th->thgroup == group) {
4249  rb_ary_push(ary, th->self);
4250  }
4251  }
4252  return ary;
4253 }
4254 
4255 
4256 /*
4257  * call-seq:
4258  * thgrp.enclose -> thgrp
4259  *
4260  * Prevents threads from being added to or removed from the receiving
4261  * ThreadGroup.
4262  *
4263  * New threads can still be started in an enclosed ThreadGroup.
4264  *
4265  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4266  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4267  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
4268  * tg.add thr
4269  * #=> ThreadError: can't move from the enclosed thread group
4270  */
4271 
4272 static VALUE
4274 {
4275  struct thgroup *data;
4276 
4277  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4278  data->enclosed = 1;
4279 
4280  return group;
4281 }
4282 
4283 
4284 /*
4285  * call-seq:
4286  * thgrp.enclosed? -> true or false
4287  *
4288  * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4289  */
4290 
4291 static VALUE
4293 {
4294  struct thgroup *data;
4295 
4296  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4297  if (data->enclosed)
4298  return Qtrue;
4299  return Qfalse;
4300 }
4301 
4302 
4303 /*
4304  * call-seq:
4305  * thgrp.add(thread) -> thgrp
4306  *
4307  * Adds the given +thread+ to this group, removing it from any other
4308  * group to which it may have previously been a member.
4309  *
4310  * puts "Initial group is #{ThreadGroup::Default.list}"
4311  * tg = ThreadGroup.new
4312  * t1 = Thread.new { sleep }
4313  * t2 = Thread.new { sleep }
4314  * puts "t1 is #{t1}"
4315  * puts "t2 is #{t2}"
4316  * tg.add(t1)
4317  * puts "Initial group now #{ThreadGroup::Default.list}"
4318  * puts "tg group now #{tg.list}"
4319  *
4320  * This will produce:
4321  *
4322  * Initial group is #<Thread:0x401bdf4c>
4323  * t1 is #<Thread:0x401b3c90>
4324  * t2 is #<Thread:0x401b3c18>
4325  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4326  * tg group now #<Thread:0x401b3c90>
4327  */
4328 
4329 static VALUE
4331 {
4332  rb_thread_t *th;
4333  struct thgroup *data;
4334 
4335  GetThreadPtr(thread, th);
4336 
4337  if (OBJ_FROZEN(group)) {
4338  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4339  }
4340  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4341  if (data->enclosed) {
4342  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4343  }
4344 
4345  if (!th->thgroup) {
4346  return Qnil;
4347  }
4348 
4349  if (OBJ_FROZEN(th->thgroup)) {
4350  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4351  }
4352  TypedData_Get_Struct(th->thgroup, struct thgroup, &thgroup_data_type, data);
4353  if (data->enclosed) {
4355  "can't move from the enclosed thread group");
4356  }
4357 
4358  th->thgroup = group;
4359  return group;
4360 }
4361 
4362 /*
4363  * Document-class: ThreadShield
4364  */
4365 static void
4367 {
4368  rb_gc_mark((VALUE)ptr);
4369 }
4370 
4372  "thread_shield",
4373  {thread_shield_mark, 0, 0,},
4374  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4375 };
4376 
4377 static VALUE
4379 {
4380  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4381 }
4382 
4383 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4384 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
4385 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4386 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
4387 
4388 static inline void
4390 {
4391  unsigned int w = rb_thread_shield_waiting(b);
4392  w++;
4394  rb_raise(rb_eRuntimeError, "waiting count overflow");
4395  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4396  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4397 }
4398 
4399 static inline void
4401 {
4402  unsigned int w = rb_thread_shield_waiting(b);
4403  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4404  w--;
4405  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4406  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4407 }
4408 
4409 VALUE
4411 {
4412  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4413  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4414  return thread_shield;
4415 }
4416 
4417 /*
4418  * Wait a thread shield.
4419  *
4420  * Returns
4421  * true: acquired the thread shield
4422  * false: the thread shield was destroyed and no other threads waiting
4423  * nil: the thread shield was destroyed but still in use
4424  */
4425 VALUE
4427 {
4428  VALUE mutex = GetThreadShieldPtr(self);
4429  rb_mutex_t *m;
4430 
4431  if (!mutex) return Qfalse;
4432  GetMutexPtr(mutex, m);
4433  if (m->th == GET_THREAD()) return Qnil;
4435  rb_mutex_lock(mutex);
4437  if (DATA_PTR(self)) return Qtrue;
4438  rb_mutex_unlock(mutex);
4439  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4440 }
4441 
4442 static VALUE
4444 {
4445  VALUE mutex = GetThreadShieldPtr(self);
4446  if (!mutex)
4447  rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
4448  return mutex;
4449 }
4450 
4451 /*
4452  * Release a thread shield, and return true if it has waiting threads.
4453  */
4454 VALUE
4456 {
4457  VALUE mutex = thread_shield_get_mutex(self);
4458  rb_mutex_unlock(mutex);
4459  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4460 }
4461 
4462 /*
4463  * Release and destroy a thread shield, and return true if it has waiting threads.
4464  */
4465 VALUE
4467 {
4468  VALUE mutex = thread_shield_get_mutex(self);
4469  DATA_PTR(self) = 0;
4470  rb_mutex_unlock(mutex);
4471  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4472 }
4473 
4474 static VALUE
4476 {
4477  return th->local_storage_recursive_hash;
4478 }
4479 
4480 static void
4482 {
4484 }
4485 
4486 ID rb_frame_last_func(void);
4487 
4488 /*
4489  * Returns the current "recursive list" used to detect recursion.
4490  * This list is a hash table, unique for the current thread and for
4491  * the current __callee__.
4492  */
4493 
4494 static VALUE
4496 {
4497  rb_thread_t *th = GET_THREAD();
4499  VALUE list;
4500  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4501  hash = rb_ident_hash_new();
4502  threadptr_recursive_hash_set(th, hash);
4503  list = Qnil;
4504  }
4505  else {
4506  list = rb_hash_aref(hash, sym);
4507  }
4508  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4509  list = rb_hash_new();
4510  rb_hash_aset(hash, sym, list);
4511  }
4512  return list;
4513 }
4514 
4515 /*
4516  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
4517  * in the recursion list.
4518  * Assumes the recursion list is valid.
4519  */
4520 
4521 static VALUE
4522 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
4523 {
4524 #if SIZEOF_LONG == SIZEOF_VOIDP
4525  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4526 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4527  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4528  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4529 #endif
4530 
4531  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
4532  if (pair_list == Qundef)
4533  return Qfalse;
4534  if (paired_obj_id) {
4535  if (!RB_TYPE_P(pair_list, T_HASH)) {
4536  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4537  return Qfalse;
4538  }
4539  else {
4540  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4541  return Qfalse;
4542  }
4543  }
4544  return Qtrue;
4545 }
4546 
4547 /*
4548  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
4549  * For a single obj_id, it sets list[obj_id] to Qtrue.
4550  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
4551  * otherwise list[obj_id] becomes a hash like:
4552  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4553  * Assumes the recursion list is valid.
4554  */
4555 
4556 static void
4558 {
4559  VALUE pair_list;
4560 
4561  if (!paired_obj) {
4562  rb_hash_aset(list, obj, Qtrue);
4563  }
4564  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4565  rb_hash_aset(list, obj, paired_obj);
4566  }
4567  else {
4568  if (!RB_TYPE_P(pair_list, T_HASH)){
4569  VALUE other_paired_obj = pair_list;
4570  pair_list = rb_hash_new();
4571  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4572  rb_hash_aset(list, obj, pair_list);
4573  }
4574  rb_hash_aset(pair_list, paired_obj, Qtrue);
4575  }
4576 }
4577 
4578 /*
4579  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
4580  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
4581  * removed from the hash and no attempt is made to simplify
4582  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
4583  * Assumes the recursion list is valid.
4584  */
4585 
4586 static int
4588 {
4589  if (paired_obj) {
4590  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4591  if (pair_list == Qundef) {
4592  return 0;
4593  }
4594  if (RB_TYPE_P(pair_list, T_HASH)) {
4595  rb_hash_delete_entry(pair_list, paired_obj);
4596  if (!RHASH_EMPTY_P(pair_list)) {
4597  return 1; /* keep hash until is empty */
4598  }
4599  }
4600  }
4601  rb_hash_delete_entry(list, obj);
4602  return 1;
4603 }
4604 
4606  VALUE (*func) (VALUE, VALUE, int);
4607  VALUE list;
4608  VALUE obj;
4609  VALUE objid;
4610  VALUE pairid;
4611  VALUE arg;
4612 };
4613 
4614 static VALUE
4616 {
4617  struct exec_recursive_params *p = (void *)data;
4618  return (*p->func)(p->obj, p->arg, FALSE);
4619 }
4620 
4621 /*
4622  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4623  * current method is called recursively on obj, or on the pair <obj, pairid>
4624  * If outer is 0, then the innermost func will be called with recursive set
4625  * to Qtrue, otherwise the outermost func will be called. In the latter case,
4626  * all inner func are short-circuited by throw.
4627  * Implementation details: the value thrown is the recursive list which is
4628  * proper to the current method and unlikely to be caught anywhere else.
4629  * list[recursive_key] is used as a flag for the outermost call.
4630  */
4631 
4632 static VALUE
4634 {
4635  VALUE result = Qundef;
4636  const ID mid = rb_frame_last_func();
4637  const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
4638  struct exec_recursive_params p;
4639  int outermost;
4640  p.list = recursive_list_access(sym);
4641  p.objid = rb_obj_id(obj);
4642  p.obj = obj;
4643  p.pairid = pairid;
4644  p.arg = arg;
4645  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4646 
4647  if (recursive_check(p.list, p.objid, pairid)) {
4648  if (outer && !outermost) {
4649  rb_throw_obj(p.list, p.list);
4650  }
4651  return (*func)(obj, arg, TRUE);
4652  }
4653  else {
4654  int state;
4655 
4656  p.func = func;
4657 
4658  if (outermost) {
4659  recursive_push(p.list, ID2SYM(recursive_key), 0);
4660  recursive_push(p.list, p.objid, p.pairid);
4661  result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
4662  if (!recursive_pop(p.list, p.objid, p.pairid)) goto invalid;
4663  if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
4664  if (state) JUMP_TAG(state);
4665  if (result == p.list) {
4666  result = (*func)(obj, arg, TRUE);
4667  }
4668  }
4669  else {
4670  volatile VALUE ret = Qundef;
4671  recursive_push(p.list, p.objid, p.pairid);
4672  PUSH_TAG();
4673  if ((state = EXEC_TAG()) == 0) {
4674  ret = (*func)(obj, arg, FALSE);
4675  }
4676  POP_TAG();
4677  if (!recursive_pop(p.list, p.objid, p.pairid)) {
4678  invalid:
4679  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
4680  "for %+"PRIsVALUE" in %+"PRIsVALUE,
4681  sym, rb_thread_current());
4682  }
4683  if (state) JUMP_TAG(state);
4684  result = ret;
4685  }
4686  }
4687  *(volatile struct exec_recursive_params *)&p;
4688  return result;
4689 }
4690 
4691 /*
4692  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4693  * current method is called recursively on obj
4694  */
4695 
4696 VALUE
4698 {
4699  return exec_recursive(func, obj, 0, arg, 0);
4700 }
4701 
4702 /*
4703  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4704  * current method is called recursively on the ordered pair <obj, paired_obj>
4705  */
4706 
4707 VALUE
4709 {
4710  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4711 }
4712 
4713 /*
4714  * If recursion is detected on the current method and obj, the outermost
4715  * func will be called with (obj, arg, Qtrue). All inner func will be
4716  * short-circuited using throw.
4717  */
4718 
4719 VALUE
4721 {
4722  return exec_recursive(func, obj, 0, arg, 1);
4723 }
4724 
4725 /*
4726  * If recursion is detected on the current method, obj and paired_obj,
4727  * the outermost func will be called with (obj, arg, Qtrue). All inner
4728  * func will be short-circuited using throw.
4729  */
4730 
4731 VALUE
4733 {
4734  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
4735 }
4736 
4737 /*
4738  * call-seq:
4739  * thread.backtrace -> array
4740  *
4741  * Returns the current backtrace of the target thread.
4742  *
4743  */
4744 
4745 static VALUE
4747 {
4748  return rb_vm_thread_backtrace(argc, argv, thval);
4749 }
4750 
4751 /* call-seq:
4752  * thread.backtrace_locations(*args) -> array or nil
4753  *
4754  * Returns the execution stack for the target thread---an array containing
4755  * backtrace location objects.
4756  *
4757  * See Thread::Backtrace::Location for more information.
4758  *
4759  * This method behaves similarly to Kernel#caller_locations except it applies
4760  * to a specific thread.
4761  */
4762 static VALUE
4764 {
4765  return rb_vm_thread_backtrace_locations(argc, argv, thval);
4766 }
4767 
4768 /*
4769  * Document-class: ThreadError
4770  *
4771  * Raised when an invalid operation is attempted on a thread.
4772  *
4773  * For example, when no other thread has been started:
4774  *
4775  * Thread.stop
4776  *
4777  * This will raises the following exception:
4778  *
4779  * ThreadError: stopping only thread
4780  * note: use sleep to stop forever
4781  */
4782 
4783 void
4785 {
4786 #undef rb_intern
4787 #define rb_intern(str) rb_intern_const(str)
4788 
4789  VALUE cThGroup;
4790  rb_thread_t *th = GET_THREAD();
4791 
4792  sym_never = ID2SYM(rb_intern("never"));
4793  sym_immediate = ID2SYM(rb_intern("immediate"));
4794  sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
4795  id_locals = rb_intern("locals");
4796 
4807  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
4809  rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
4811 #if THREAD_DEBUG < 0
4812  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
4813  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
4814 #endif
4817  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
4818 
4819  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
4824  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
4835  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
4836  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
4837  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
4838  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
4841  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
4842  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
4843  rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
4844  rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
4848  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
4849 
4852  rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
4853 
4855 
4856  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
4858  rb_define_method(cThGroup, "list", thgroup_list, 0);
4859  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
4860  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
4861  rb_define_method(cThGroup, "add", thgroup_add, 1);
4862 
4863  {
4864  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
4865  rb_define_const(cThGroup, "Default", th->thgroup);
4866  }
4867 
4868  recursive_key = rb_intern("__recursive_key__");
4870 
4871  /* init thread core */
4872  {
4873  /* main thread setting */
4874  {
4875  /* acquire global vm lock */
4876  gvl_init(th->vm);
4877  gvl_acquire(th->vm, th);
4878  native_mutex_initialize(&th->vm->thread_destruct_lock);
4879  native_mutex_initialize(&th->interrupt_lock);
4880  native_cond_initialize(&th->interrupt_cond,
4881  RB_CONDATTR_CLOCK_MONOTONIC);
4882 
4886 
4887  th->interrupt_mask = 0;
4888  }
4889  }
4890 
4891  rb_thread_create_timer_thread();
4892 
4893  /* suppress warnings on cygwin, mingw and mswin.*/
4894  (void)native_mutex_trylock;
4895 
4896  Init_thread_sync();
4897 }
4898 
4899 int
4901 {
4902  rb_thread_t *th = ruby_thread_from_native();
4903 
4904  return th != 0;
4905 }
4906 
4907 VALUE rb_vm_backtrace_str_ary(rb_thread_t *th, long lev, long n);
4908 static void
4910 {
4911  rb_thread_t *th = 0;
4912  VALUE sep = rb_str_new_cstr("\n ");
4913 
4914  rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
4916  list_for_each(&vm->living_threads, th, vmlt_node) {
4917  rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
4918  "native:%"PRI_THREAD_ID" int:%u",
4919  th->self, th, thread_id_str(th), th->interrupt_flag);
4920  if (th->locking_mutex) {
4921  rb_mutex_t *mutex;
4922  struct rb_thread_struct volatile *mth;
4923  int waiting;
4924  GetMutexPtr(th->locking_mutex, mutex);
4925 
4926  native_mutex_lock(&mutex->lock);
4927  mth = mutex->th;
4928  waiting = mutex->cond_waiting;
4929  native_mutex_unlock(&mutex->lock);
4930  rb_str_catf(msg, " mutex:%p cond:%d", mth, waiting);
4931  }
4932  {
4934  while (list) {
4935  rb_str_catf(msg, "\n depended by: tb_thread_id:%p", list->th);
4936  list = list->next;
4937  }
4938  }
4939  rb_str_catf(msg, "\n ");
4940  rb_str_concat(msg, rb_ary_join(rb_vm_backtrace_str_ary(th, 0, 0), sep));
4941  rb_str_catf(msg, "\n");
4942  }
4943 }
4944 
4945 static void
4947 {
4948  int found = 0;
4949  rb_thread_t *th = 0;
4950 
4951  if (vm_living_thread_num(vm) > vm->sleeper) return;
4952  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
4953  if (patrol_thread && patrol_thread != GET_THREAD()) return;
4954 
4955  list_for_each(&vm->living_threads, th, vmlt_node) {
4957  found = 1;
4958  }
4959  else if (th->locking_mutex) {
4960  rb_mutex_t *mutex;
4961  GetMutexPtr(th->locking_mutex, mutex);
4962 
4963  native_mutex_lock(&mutex->lock);
4964  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
4965  found = 1;
4966  }
4967  native_mutex_unlock(&mutex->lock);
4968  }
4969  if (found)
4970  break;
4971  }
4972 
4973  if (!found) {
4974  VALUE argv[2];
4975  argv[0] = rb_eFatal;
4976  argv[1] = rb_str_new2("No live threads left. Deadlock?");
4977  debug_deadlock_check(vm, argv[1]);
4978  vm->sleeper--;
4979  rb_threadptr_raise(vm->main_thread, 2, argv);
4980  }
4981 }
4982 
4983 static void
4984 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
4985 {
4987  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
4988  long line = rb_sourceline() - 1;
4989  long count;
4990  VALUE num;
4991  if (line >= RARRAY_LEN(coverage)) { /* no longer tracked */
4992  return;
4993  }
4994  num = RARRAY_AREF(coverage, line);
4995  if (!FIXNUM_P(num)) return;
4996  count = FIX2LONG(num) + 1;
4997  if (POSFIXABLE(count)) {
4998  RARRAY_ASET(coverage, line, LONG2FIX(count));
4999  }
5000  }
5001 }
5002 
5003 VALUE
5005 {
5006  return GET_VM()->coverages;
5007 }
5008 
5009 void
5011 {
5012  GET_VM()->coverages = coverages;
5014 }
5015 
5016 /* Make coverage arrays empty so old covered files are no longer tracked. */
5017 static int
5019 {
5020  VALUE coverage = (VALUE)val;
5021  rb_ary_clear(coverage);
5022  return ST_CONTINUE;
5023 }
5024 
5025 void
5027 {
5028  VALUE coverages = rb_get_coverages();
5030  GET_VM()->coverages = Qfalse;
5032 }
5033 
5034 VALUE
5036 {
5038  rb_thread_t *cur_th = GET_THREAD();
5039 
5040  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5041  OBJ_FREEZE_RAW(interrupt_mask);
5042  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5043 
5044  return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5045 }
5046 
5047 void
5048 ruby_kill(rb_pid_t pid, int sig)
5049 {
5050  int err;
5051  rb_thread_t *th = GET_THREAD();
5052 
5053  /*
5054  * When target pid is self, many caller assume signal will be
5055  * delivered immediately and synchronously.
5056  */
5057  {
5058  GVL_UNLOCK_BEGIN();
5059  native_mutex_lock(&th->interrupt_lock);
5060  err = kill(pid, sig);
5061  native_cond_wait(&th->interrupt_cond, &th->interrupt_lock);
5062  native_mutex_unlock(&th->interrupt_lock);
5063  GVL_UNLOCK_END();
5064  }
5065  if (err < 0) {
5066  rb_sys_fail(0);
5067  }
5068 }
#define GetMutexPtr(obj, tobj)
Definition: thread_sync.c:49
#define RBASIC_CLEAR_CLASS(obj)
Definition: internal.h:1312
static int vm_living_thread_num(rb_vm_t *vm)
Definition: thread.c:187
rb_control_frame_t * cfp
Definition: vm_core.h:708
VALUE rb_mutex_lock(VALUE mutex)
Definition: thread_sync.c:241
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:455
rb_thread_list_t * join_list
Definition: vm_core.h:777
#define T_OBJECT
Definition: ruby.h:491
static VALUE sym_never
Definition: thread.c:82
static VALUE thgroup_enclose(VALUE group)
Definition: thread.c:4273
VALUE rb_eStandardError
Definition: error.c:760
static VALUE rb_thread_variable_p(VALUE thread, VALUE key)
Definition: thread.c:3360
#define eKillSignal
Definition: thread.c:94
#define RUBY_EVENT_THREAD_END
Definition: ruby.h:2073
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
Definition: symbol.c:923
VALUE * stack_end
Definition: vm_core.h:786
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:1569
unsigned long running_time_us
Definition: vm_core.h:818
rb_vm_t * vm
Definition: vm_core.h:703
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:797
static VALUE mutex_alloc(VALUE klass)
Definition: thread_sync.c:94
static VALUE thgroup_add(VALUE group, VALUE thread)
Definition: thread.c:4330
void ruby_kill(rb_pid_t pid, int sig)
Definition: thread.c:5048
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1541
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:949
static VALUE rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
Definition: thread.c:1852
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:12
void ruby_thread_stack_overflow(rb_thread_t *th)
Definition: thread.c:2167
#define RARRAY_LEN(a)
Definition: ruby.h:1026
void rb_bug(const char *fmt,...)
Definition: error.c:482
static VALUE rb_thread_priority(VALUE thread)
Definition: thread.c:3395
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4580
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1606
#define fd_init_copy(f)
#define FALSE
Definition: nkf.h:174
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1145
static VALUE rb_thread_s_report_exc(void)
Definition: thread.c:2675
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:3100
static void thread_cleanup_func_before_exec(void *th_ptr)
Definition: thread.c:518
#define INT2NUM(x)
Definition: ruby.h:1538
static VALUE trap(int sig, sighandler_t func, VALUE command)
Definition: signal.c:1210
struct rb_thread_struct * running_thread
Definition: vm_core.h:491
struct timeval * tv
Definition: thread.c:3954
Definition: st.h:79
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Definition: thread.c:368
VALUE rb_make_exception(int argc, const VALUE *argv)
Definition: eval.c:763
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:1549
static VALUE rb_thread_abort_exc_set(VALUE thread, VALUE val)
Definition: thread.c:2647
st_table * local_storage
Definition: vm_core.h:773
int pending_interrupt_queue_checked
Definition: vm_core.h:759
VALUE rb_eSignal
Definition: error.c:758
rb_fdset_t * read
Definition: thread.c:3951
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4720
#define NUM2INT(x)
Definition: ruby.h:684
int count
Definition: encoding.c:56
struct rb_thread_struct::@204 machine
static int max(int a, int b)
Definition: strftime.c:142
static unsigned int hash(str, len) register const char *str
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1716
static VALUE thgroup_enclosed_p(VALUE group)
Definition: thread.c:4292
int rb_thread_check_trap_pending(void)
Definition: thread.c:1226
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:2025
VALUE rb_thread_list(void)
Definition: thread.c:2484
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:863
static VALUE thread_join_sleep(VALUE arg)
Definition: thread.c:859
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4697
#define CLASS_OF(v)
Definition: ruby.h:453
static VALUE rb_thread_variables(VALUE thread)
Definition: thread.c:3331
struct rb_thread_struct * th
Definition: vm_core.h:682
void rb_unblock_function_t(void *)
Definition: intern.h:895
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:3045
static VALUE rb_thread_getname(VALUE thread)
Definition: thread.c:2947
#define st_foreach
Definition: regint.h:186
Definition: id.h:102
rb_unblock_function_t * func
Definition: vm_core.h:674
#define Qtrue
Definition: ruby.h:437
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:202
static void update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
Definition: thread.c:4984
static VALUE thread_s_new(int argc, VALUE *argv, VALUE klass)
Definition: thread.c:768
struct list_node vmlt_node
Definition: vm_core.h:701
static const VALUE * vm_proc_ep(VALUE procval)
Definition: vm_core.h:1329
NORETURN(void ruby_thread_stack_overflow(rb_thread_t *th))
void rb_error_frozen(const char *what)
Definition: error.c:2447
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1169
const char ruby_digitmap[]
Definition: bignum.c:37
#define CLOCK_MONOTONIC
Definition: win32.h:134
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:758
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:1000
static void rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
Definition: vm_core.h:1453
static VALUE threadptr_local_aref(rb_thread_t *th, ID id)
Definition: thread.c:3031
Definition: st.h:99
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1190
VALUE rb_mod_ancestors(VALUE mod)
Definition: class.c:1085
#define OBJ_FREEZE_RAW(x)
Definition: ruby.h:1307
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2142
struct rb_thread_struct volatile * th
Definition: thread_sync.c:11
static struct timeval double2timeval(double d)
Definition: thread.c:1046
#define sysstack_error
Definition: vm_core.h:1486
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:1064
VALUE rb_eTypeError
Definition: error.c:762
VALUE rb_thread_stop(void)
Definition: thread.c:2451
#define TH_JUMP_TAG(th, st)
Definition: eval_intern.h:186
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE passed_block_handler)
Definition: vm.c:1150
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:905
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1070
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1201
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:54
if(len<=MAX_WORD_LENGTH &&len >=MIN_WORD_LENGTH)
Definition: zonetab.h:883
VALUE rb_str_concat(VALUE, VALUE)
Definition: string.c:2890
void rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:1011
struct st_table * rb_hash_tbl_raw(VALUE hash)
Definition: hash.c:490
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:532
unsigned int report_on_exception
Definition: vm_core.h:814
#define rb_fd_zero(f)
Definition: intern.h:352
int kill(int, int)
Definition: win32.c:4724
void rb_threadptr_setup_exception(rb_thread_t *th, VALUE mesg, VALUE cause)
Definition: eval.c:598
static VALUE rb_thread_safe_level(VALUE thread)
Definition: thread.c:2931
static VALUE rb_thread_aset(VALUE self, VALUE id, VALUE val)
Definition: thread.c:3172
VALUE rb_thread_current(void)
Definition: thread.c:2504
#define PRIxVALUE
Definition: ruby.h:133
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1593
#define OBJ_ID_EQL(obj_id, other)
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2207
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2564
static VALUE rb_thread_abort_exc(VALUE thread)
Definition: thread.c:2625
static void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
Definition: thread.c:1293
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1260
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Definition: thread.c:350
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1427
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3487
int rb_thread_alone(void)
Definition: thread.c:3274
VALUE rb_convert_type(VALUE, int, const char *, const char *)
Definition: object.c:2630
#define TH_EXEC_TAG()
Definition: eval_intern.h:180
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Definition: object.c:690
#define T_HASH
Definition: ruby.h:499
static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check)
Definition: thread.c:1073
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:3049
rb_nativethread_lock_t lock
Definition: thread_sync.c:9
#define DATA_PTR(dta)
Definition: ruby.h:1113
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, int *stateptr)
Definition: vm_eval.c:2041
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
Definition: vm_core.h:1551
static size_t thgroup_memsize(const void *ptr)
Definition: thread.c:4188
static VALUE sym_immediate
Definition: thread.c:80
static int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
Definition: thread.c:1275
static void rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
Definition: vm_core.h:1460
void rb_gc_mark(VALUE ptr)
Definition: gc.c:4394
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Definition: hash.c:867
static void thread_shield_mark(void *ptr)
Definition: thread.c:4366
#define T_ARRAY
Definition: ruby.h:498
rb_thread_t * th
Definition: thread.c:100
#define st_delete
Definition: regint.h:182
#define st_lookup
Definition: regint.h:185
#define PUSH_TAG()
Definition: eval_intern.h:146
time_t tv_sec
Definition: missing.h:54
static volatile int system_working
Definition: thread.c:96
static VALUE thread_join(rb_thread_t *target_th, double delay)
Definition: thread.c:886
static VALUE remove_from_join_list(VALUE arg)
Definition: thread.c:838
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2283
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:6102
static int rb_threadptr_dead(rb_thread_t *th)
Definition: thread.c:2812
static int do_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
Definition: thread.c:3707
#define FIXNUM_P(f)
Definition: ruby.h:365
static VALUE rb_thread_alive_p(VALUE thread)
Definition: thread.c:2881
rb_fdset_t * write
Definition: thread.c:3952
static const rb_thread_t * patrol_thread
Definition: thread_sync.c:231
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4708
#define thread_id_str(th)
Definition: thread.c:273
void rb_thread_start_timer_thread(void)
Definition: thread.c:4087
static rb_fdset_t * init_set_fd(int fd, rb_fdset_t *fds)
Definition: thread.c:3938
#define THROW_DATA_P(err)
Definition: internal.h:787
#define RB_WAITFD_OUT
Definition: io.h:49
#define GET_THREAD()
Definition: vm_core.h:1513
VALUE thgroup_default
Definition: vm_core.h:496
#define rb_fd_set(n, f)
Definition: intern.h:353
time_t tv_sec
Definition: missing.h:61
#define sym(x)
Definition: date_core.c:3721
static VALUE rb_thread_stop_p(VALUE thread)
Definition: thread.c:2906
static void thread_cleanup_func(void *th_ptr, int atfork)
Definition: thread.c:529
static double timeofday(void)
Definition: thread.c:1177
static void rb_thread_sleep_deadly_allow_spurious_wakeup(void)
Definition: thread.c:1170
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: st.h:22
int ruby_native_thread_p(void)
Definition: thread.c:4900
static VALUE rb_thread_s_abort_exc_set(VALUE self, VALUE val)
Definition: thread.c:2602
static rb_atomic_t threadptr_get_interrupts(rb_thread_t *th)
Definition: thread.c:2012
#define rb_fd_isset(n, f)
Definition: intern.h:355
#define PRI_THREAD_ID
Definition: thread.c:274
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Definition: ruby.h:1830
void rb_hash_foreach(VALUE hash, int(*func)(ANYARGS), VALUE farg)
Definition: hash.c:402
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2381
static VALUE rb_thread_s_main(VALUE klass)
Definition: thread.c:2538
void rb_exc_raise(VALUE mesg)
Definition: eval.c:620
static void rb_thread_wait_fd_rw(int fd, int read)
Definition: thread.c:3762
struct timeval rb_time_timeval(VALUE time)
Definition: time.c:2292
static VALUE sym_on_blocking
Definition: thread.c:81
#define RHASH(obj)
Definition: internal.h:562
static void rb_thread_schedule_limits(unsigned long limits_us)
Definition: thread.c:1247
#define RB_TYPE_P(obj, type)
Definition: ruby.h:527
void rb_reset_random_seed(void)
Definition: random.c:1560
int rb_thread_fd_writable(int fd)
Definition: thread.c:3788
static void rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
Definition: thread.c:408
static VALUE thgroup_s_alloc(VALUE klass)
Definition: thread.c:4219
#define POSFIXABLE(f)
Definition: ruby.h:366
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
Definition: thread.c:171
#define RUBY_VM_INTERRUPTED_ANY(th)
Definition: vm_core.h:1553
#define TH_POP_TAG()
Definition: eval_intern.h:137
#define MEMZERO(p, type, n)
Definition: ruby.h:1660
#define PRI_TIMET_PREFIX
Definition: ruby.h:143
static VALUE coverage(VALUE fname, int n)
Definition: ripper.c:12176
rb_thread_t * target
Definition: thread.c:833
size_t living_thread_num
Definition: vm_core.h:495
static void terminate_all(rb_vm_t *vm, const rb_thread_t *main_thread)
Definition: thread.c:438
const rb_iseq_t * iseq
Definition: vm_core.h:634
union select_args::@178 as
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:70
fd_set rb_fdset_t
Definition: intern.h:351
#define rb_fd_term(f)
Definition: intern.h:362
static VALUE rb_thread_priority_set(VALUE thread, VALUE prio)
Definition: thread.c:3430
double rb_num2dbl(VALUE)
Definition: object.c:3067
int rb_block_given_p(void)
Definition: eval.c:797
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:1576
#define EXEC_TAG()
Definition: eval_intern.h:183
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:906
VALUE locking_mutex
Definition: vm_core.h:766
static const rb_data_type_t thread_shield_data_type
Definition: thread.c:4371
#define val
long tv_usec
Definition: missing.h:55
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1872
VALUE rb_eRuntimeError
Definition: error.c:761
int fd
Definition: thread.c:101
static VALUE rb_thread_inspect(VALUE thread)
Definition: thread.c:3000
#define RB_WAITFD_PRI
Definition: io.h:48
static ID id_locals
Definition: thread.c:83
#define PRIdVALUE
Definition: ruby.h:130
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *)
Definition: thread.c:2114
#define rb_fd_ptr(f)
Definition: intern.h:359
static int reset_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
Definition: thread.c:5018
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
Definition: hash.c:1112
VALUE rb_ary_new(void)
Definition: array.c:493
void * blocking_region_buffer
Definition: vm_core.h:743
static void thread_do_start(rb_thread_t *th, VALUE args)
Definition: thread.c:571
static VALUE exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
Definition: thread.c:4633
static VALUE thread_create_core(VALUE thval, VALUE args, VALUE(*fn)(ANYARGS))
Definition: thread.c:705
void Init_Thread(void)
Definition: thread.c:4784
#define JUMP_TAG(st)
Definition: eval_intern.h:188
#define NIL_P(v)
Definition: ruby.h:451
static int rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
Definition: thread.c:1660
long tv_nsec
Definition: missing.h:62
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:646
static char msg[50]
Definition: strerror.c:8
static void rb_threadptr_ready(rb_thread_t *th)
Definition: thread.c:2106
int enclosed
Definition: thread.c:4183
#define rb_intern(str)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2734
void rb_thread_atfork_before_exec(void)
Definition: thread.c:4177
#define thread_debug
Definition: thread.c:267
static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
Definition: thread.c:1654
#define OBJ_FROZEN(x)
Definition: ruby.h:1306
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4032
int argc
Definition: ruby.c:183
static void threadptr_check_pending_interrupt_queue(rb_thread_t *th)
Definition: thread.c:1600
void rb_thread_stop_timer_thread(void)
Definition: thread.c:4073
void rb_vm_register_special_exception(enum ruby_special_exceptions sp, VALUE cls, const char *mesg)
Definition: vm.c:2142
rb_thread_status
Definition: vm_core.h:649
static VALUE rb_thread_variable_get(VALUE thread, VALUE key)
Definition: thread.c:3206
#define Qfalse
Definition: ruby.h:436
static VALUE rb_thread_report_exc_set(VALUE thread, VALUE val)
Definition: thread.c:2757
const VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:506
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:552
VALUE rb_proc_location(VALUE self)
Definition: proc.c:1147
static VALUE rb_thread_exit(void)
Definition: thread.c:2357
#define threadptr_initialized(th)
Definition: thread.c:745
RUBY_EXTERN VALUE rb_cModule
Definition: ruby.h:1895
void rb_thread_check_ints(void)
Definition: thread.c:1215
#define RUBY_UBF_PROCESS
Definition: intern.h:901
void rb_exit(int status)
Definition: process.c:3779
void rb_thread_fd_close(int fd)
Definition: thread.c:2226
VALUE rb_thread_shield_new(void)
Definition: thread.c:4410
volatile int sleeper
Definition: vm_core.h:502
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:254
#define rb_str_new2
Definition: intern.h:857
VALUE rb_obj_alloc(VALUE)
Definition: object.c:1845
int err
Definition: win32.c:135
void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo)
Definition: eval_error.c:76
#define EXIT_FAILURE
Definition: eval_intern.h:33
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4455
void rb_thread_atfork(void)
Definition: thread.c:4172
#define POP_TAG()
Definition: eval_intern.h:147
struct list_node wfd_node
Definition: thread.c:99
#define GVL_UNLOCK_BEGIN()
Definition: thread.c:141
static const rb_data_type_t thgroup_data_type
Definition: thread.c:4193
VALUE rb_thread_create(VALUE(*fn)(ANYARGS), void *arg)
Definition: thread.c:823
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:1919
static VALUE thread_s_current(VALUE klass)
Definition: thread.c:2519
#define FD_SET(fd, set)
Definition: win32.h:593
static VALUE rb_cThreadShield
Definition: thread.c:78
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
Definition: thread.c:1195
static VALUE rb_thread_s_report_exc_set(VALUE self, VALUE val)
Definition: thread.c:2712
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:132
#define ALLOC(type)
Definition: ruby.h:1588
static void update_timeval(struct timeval *timeout, double limit)
Definition: thread.c:3694
VALUE read
Definition: io.c:8616
static void rb_vm_living_threads_init(rb_vm_t *vm)
Definition: vm_core.h:1445
static VALUE rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4763
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:900
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1020
static int recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4587
#define RARRAY_CONST_PTR(a)
Definition: ruby.h:1028
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1420
struct rb_unblock_callback oldubf
Definition: thread.c:116
int rb_during_gc(void)
Definition: gc.c:6664
#define rb_thread_set_current(th)
Definition: vm_core.h:1529
int errno
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
Definition: thread_sync.c:331
#define TRUE
Definition: nkf.h:175
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
Definition: thread.c:5035
static int retryable(int e)
Definition: thread.c:3681
#define EXIT_SUCCESS
Definition: error.c:32
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:767
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4426
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1440
int rb_get_next_signal(void)
Definition: signal.c:739
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3795
VALUE rb_to_symbol(VALUE name)
Definition: string.c:9989
#define rb_fd_copy(d, s, n)
Definition: intern.h:356
static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, struct rb_unblock_callback *old, int fail_if_interrupted)
Definition: thread.c:374
#define rb_enc_name(enc)
Definition: encoding.h:171
VALUE rb_class_path(VALUE)
Definition: variable.c:294
VALUE rb_hash_new(void)
Definition: hash.c:441
#define do_select_update()
#define DELAY_INFTY
Definition: thread.c:830
int rb_threadptr_reset_raised(rb_thread_t *th)
Definition: thread.c:2190
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1919
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
Definition: iseq.c:715
struct list_head waiting_fds
Definition: vm_core.h:493
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4309
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
Definition: vm_core.h:1548
#define PRIsVALUE
Definition: ruby.h:135
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:143
unsigned long ID
Definition: ruby.h:86
static VALUE thread_initialize(VALUE thread, VALUE args)
Definition: thread.c:803
handle_interrupt_timing
Definition: thread.c:1607
#define TAG_FATAL
Definition: vm_core.h:170
static void rb_check_deadlock(rb_vm_t *vm)
Definition: thread.c:4946
#define GVL_UNLOCK_END()
Definition: thread.c:146
#define Qnil
Definition: ruby.h:438
void rb_thread_sleep_forever(void)
Definition: thread.c:1156
#define LIKELY(x)
Definition: ffi_common.h:125
static VALUE thread_shield_alloc(VALUE klass)
Definition: thread.c:4378
VALUE group
Definition: thread.c:4184
unsigned long VALUE
Definition: ruby.h:85
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4384
static void threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
Definition: thread.c:4481
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:121
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:558
static VALUE result
Definition: nkf.c:40
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1628
RUBY_EXTERN VALUE rb_cThread
Definition: ruby.h:1909
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:3992
static int keys_i(VALUE key, VALUE value, VALUE ary)
Definition: thread.c:3307
#define RBASIC(obj)
Definition: ruby.h:1204
struct rb_thread_struct * main_thread
Definition: vm_core.h:490
int error
Definition: thread.c:3949
static VALUE rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
Definition: thread.c:1996
VALUE first_proc
Definition: vm_core.h:779
#define FIX2INT(x)
Definition: ruby.h:686
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1587
#define RUBY_EVENT_THREAD_BEGIN
Definition: ruby.h:2072
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:4019
static void rb_thread_shield_waiting_dec(VALUE b)
Definition: thread.c:4400
static const char * thread_status_name(rb_thread_t *th, int detail)
Definition: thread.c:2792
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:131
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4592
void rb_thread_schedule(void)
Definition: thread.c:1264
#define rb_enc_asciicompat(enc)
Definition: encoding.h:239
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
Definition: eval.c:923
VALUE rb_str_new_cstr(const char *)
Definition: string.c:770
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4732
static VALUE thread_value(VALUE self)
Definition: thread.c:1012
static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
Definition: thread.c:400
rb_atomic_t interrupt_flag
Definition: vm_core.h:761
rb_nativethread_cond_t interrupt_cond
Definition: vm_core.h:764
static void timer_thread_function(void *)
Definition: thread.c:4042
void rb_thread_wait_fd(int fd)
Definition: thread.c:3782
static VALUE rb_thread_setname(VALUE thread, VALUE name)
Definition: thread.c:2963
VALUE rb_blocking_function_t(void *)
Definition: intern.h:896
void rb_sys_fail(const char *mesg)
Definition: error.c:2326
static VALUE threadptr_recursive_hash(rb_thread_t *th)
Definition: thread.c:4475
VALUE rb_vm_backtrace_str_ary(rb_thread_t *th, long lev, long n)
Definition: vm_backtrace.c:659
#define MAYBE_UNUSED
Definition: ffi_common.h:32
VALUE rb_thread_main(void)
Definition: thread.c:2525
static VALUE rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:4746
#define StringValueCStr(v)
Definition: ruby.h:571
static VALUE rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
Definition: thread.c:1908
VALUE(* first_func)(ANYARGS)
Definition: vm_core.h:781
enum rb_thread_status status
Definition: vm_core.h:738
static void st_delete_wrap(st_table *table, st_data_t key)
Definition: thread.c:105
void rb_thread_sleep(int sec)
Definition: thread.c:1241
#define rb_fd_max(f)
Definition: intern.h:363
static void debug_deadlock_check(rb_vm_t *vm, VALUE msg)
Definition: thread.c:4909
static VALUE thread_s_pass(VALUE klass)
Definition: thread.c:1562
static VALUE thread_join_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:981
#define RSTRING_PTR(str)
Definition: ruby.h:982
const VALUE * root_lep
Definition: vm_core.h:730
#define thread_start_func_2(th, st, rst)
Definition: thread.c:278
void rb_thread_sleep_deadly(void)
Definition: thread.c:1163
enum rb_thread_status prev_status
Definition: thread.c:115
#define RARRAY_ASET(a, i, v)
Definition: ruby.h:1041
VALUE first_args
Definition: vm_core.h:780
void rb_thread_recycle_stack_release(VALUE *)
Definition: vm.c:2336
void rb_thread_terminate_all(void)
Definition: thread.c:472
rb_encoding * rb_enc_get(VALUE obj)
Definition: encoding.c:860
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4385
static void rb_threadptr_to_kill(rb_thread_t *th)
Definition: thread.c:2002
int size
Definition: encoding.c:57
void rb_reset_coverages(void)
Definition: thread.c:5026
VALUE rb_ident_hash_new(void)
Definition: hash.c:2808
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Definition: hash.c:856
#define INT2FIX(i)
Definition: ruby.h:232
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:2098
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4606
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:55
int rb_sourceline(void)
Definition: vm.c:1261
ID rb_frame_last_func(void)
Definition: eval.c:1018
static VALUE thgroup_list(VALUE group)
Definition: thread.c:4241
VALUE root_svar
Definition: vm_core.h:731
#define RARRAY_AREF(a, i)
Definition: ruby.h:1040
VALUE * stack_start
Definition: vm_core.h:785
#define RUBY_INTERNAL_EVENT_SWITCH
Definition: ruby.h:2082
unsigned long interrupt_mask
Definition: vm_core.h:762
VALUE rb_block_proc(void)
Definition: proc.c:787
#define xmalloc
Definition: defines.h:183
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:71
#define st_init_numtable
Definition: regint.h:178
#define RBASIC_CLASS(obj)
Definition: ruby.h:878
#define ANYARGS
Definition: defines.h:173
unsigned int abort_on_exception
Definition: vm_core.h:813
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2778
struct rb_unblock_callback unblock
Definition: vm_core.h:765
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:160
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:845
static VALUE recursive_list_access(VALUE sym)
Definition: thread.c:4495
#define rb_fd_select(n, rfds, wfds, efds, timeout)
Definition: intern.h:364
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:133
VALUE rb_str_catf(VALUE str, const char *format,...)
Definition: sprintf.c:1480
void rb_thread_reset_timer_thread(void)
Definition: thread.c:4081
rb_nativethread_id_t thread_id
Definition: vm_core.h:734
static VALUE rb_thread_status(VALUE thread)
Definition: thread.c:2850
rb_nativethread_lock_t thread_destruct_lock
Definition: vm_core.h:488
int rb_signal_buff_size(void)
Definition: signal.c:709
static void rb_thread_shield_waiting_inc(VALUE b)
Definition: thread.c:4389
#define rb_fd_clr(n, f)
Definition: intern.h:354
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
#define LONG2FIX(i)
Definition: ruby.h:234
#define RTEST(v)
Definition: ruby.h:450
#define FD_CLR(f, s)
Definition: win32.h:611
rb_thread_t * waiting
Definition: thread.c:833
#define OBJ_INFECT(x, s)
Definition: ruby.h:1304
VALUE rb_mutex_unlock(VALUE mutex)
Definition: thread_sync.c:371
struct rb_encoding_entry * list
Definition: encoding.c:55
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4466
static VALUE rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
Definition: thread.c:1673
VALUE rb_str_cat_cstr(VALUE, const char *)
Definition: string.c:2674
static void recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4557
static VALUE thread_start(VALUE klass, VALUE args)
Definition: thread.c:796
void rb_obj_call_init(VALUE obj, int argc, const VALUE *argv)
Definition: eval.c:1422
unsigned int thread_report_on_exception
Definition: vm_core.h:500
static VALUE threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
Definition: thread.c:3125
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1182
double delay
Definition: thread.c:834
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Definition: thread.c:362
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:1927
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:646
VALUE rb_ary_join(VALUE ary, VALUE sep)
Definition: array.c:2034
#define st_insert
Definition: regint.h:184
int rb_atomic_t
Definition: ruby_atomic.h:120
static VALUE thread_raise_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:2253
#define rb_fd_resize(n, f)
Definition: intern.h:358
#define rb_thread_shield_waiting(b)
Definition: thread.c:4386
static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check)
Definition: thread.c:1114
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
Definition: vm_core.h:696
#define ruby_debug
Definition: ruby.h:1793
#define RUBY_EVENT_COVERAGE
Definition: ruby.h:2079
const char * name
Definition: nkf.c:208
#define xrealloc
Definition: defines.h:186
RUBY_EXTERN VALUE rb_eIOError
Definition: ruby.h:1926
#define ID2SYM(x)
Definition: ruby.h:383
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1434
void rb_threadptr_trap_interrupt(rb_thread_t *th)
Definition: thread.c:432
static VALUE exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
Definition: thread.c:4615
VALUE rb_eFatal
Definition: error.c:759
VALUE local_storage_recursive_hash
Definition: vm_core.h:774
#define rb_fd_init_copy(d, s)
Definition: intern.h:361
VALUE rb_str_new_frozen(VALUE)
Definition: string.c:1123
struct rb_thread_list_struct * next
Definition: vm_core.h:681
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:1552
#define rb_fd_init(f)
Definition: intern.h:360
uint32_t rb_event_flag_t
Definition: ruby.h:2095
static VALUE rb_thread_s_abort_exc(void)
Definition: thread.c:2565
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:3146
static int handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
Definition: thread.c:1729
struct list_head living_threads
Definition: vm_core.h:494
#define rb_fd_dup(d, s)
Definition: intern.h:357
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:342
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
Definition: thread.c:1710
static VALUE rb_thread_aref(VALUE thread, VALUE key)
Definition: thread.c:3117
#define fd_term(f)
rb_fdset_t * except
Definition: thread.c:3953
#define FD_ISSET(f, s)
Definition: win32.h:614
#define RUBY_TYPED_DEFAULT_FREE
Definition: ruby.h:1141
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start))
static VALUE rb_thread_keys(VALUE self)
Definition: thread.c:3294
#define GetThreadShieldPtr(obj)
Definition: thread.c:4383
#define memcpy(d, s, n)
Definition: ffi_common.h:55
static VALUE thread_shield_get_mutex(VALUE self)
Definition: thread.c:4443
#define vsnprintf
Definition: subst.h:7
static void vm_check_ints_blocking(rb_thread_t *th)
Definition: thread.c:173
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:763
void void xfree(void *)
#define RB_WAITFD_IN
Definition: io.h:47
VALUE pending_interrupt_queue
Definition: vm_core.h:757
#define RHASH_EMPTY_P(h)
Definition: ruby.h:1067
VALUE write
Definition: io.c:8616
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1499
static void getclockofday(struct timeval *tp)
Definition: thread.c:1097
static VALUE select_single_cleanup(VALUE ptr)
Definition: thread.c:3980
static VALUE select_single(VALUE ptr)
Definition: thread.c:3958
#define eTerminateSignal
Definition: thread.c:95
VALUE rb_get_coverages(void)
Definition: thread.c:5004
VALUE except
Definition: io.c:8616
static VALUE rb_thread_report_exc(VALUE thread)
Definition: thread.c:2735
#define TAG_RAISE
Definition: vm_core.h:168
VALUE rb_eSystemExit
Definition: error.c:756
#define NULL
Definition: _sdbm.c:102
#define fill_thread_id_string(thid, buf)
Definition: thread.c:271
#define FIX2LONG(x)
Definition: ruby.h:363
#define Qundef
Definition: ruby.h:439
static int thread_keys_i(ID key, VALUE value, VALUE ary)
Definition: thread.c:3267
static void * call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
Definition: thread.c:1307
int rb_notify_fd_close(int fd)
Definition: thread.c:2200
static VALUE rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
Definition: thread.c:3224
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:426
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2390
VALUE rb_class_inherited_p(VALUE mod, VALUE arg)
Definition: object.c:1593
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1515
void rb_set_coverages(VALUE coverages)
Definition: thread.c:5010
ID rb_to_id(VALUE)
Definition: string.c:9979
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
VALUE rb_eThreadError
Definition: eval.c:814
static VALUE rb_thread_key_p(VALUE self, VALUE key)
Definition: thread.c:3250
VALUE rb_eArgError
Definition: error.c:763
static VALUE rb_thread_s_kill(VALUE obj, VALUE th)
Definition: thread.c:2338
static VALUE recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
Definition: thread.c:4522
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2427
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2152
#define TYPEOF_TIMEVAL_TV_SEC
Definition: timev.h:22
char ** argv
Definition: ruby.c:184
int rb_thread_to_be_killed(VALUE thread)
Definition: thread.c:2311
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1233
int rb_threadptr_set_raised(rb_thread_t *th)
Definition: thread.c:2180
static void Init_thread_sync(void)
Definition: thread_sync.c:1237
RUBY_EXTERN void rb_write_error_str(VALUE mesg)
Definition: io.c:7393
#define RUBY_UBF_IO
Definition: intern.h:900
static enum handle_interrupt_timing rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
Definition: thread.c:1615
VALUE rb_obj_class(VALUE)
Definition: object.c:229
#define GET_VM()
Definition: vm_core.h:1507
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Definition: thread.c:356
static ID recursive_key
Definition: thread.c:3028