54 #undef _FORTIFY_SOURCE 55 #undef __USE_FORTIFY_LEVEL 56 #define __USE_FORTIFY_LEVEL 0 68 #ifndef USE_NATIVE_THREAD_PRIORITY 69 #define USE_NATIVE_THREAD_PRIORITY 0 70 #define RUBY_THREAD_PRIORITY_MAX 3 71 #define RUBY_THREAD_PRIORITY_MIN -3 75 #define THREAD_DEBUG 0 94 #define eKillSignal INT2FIX(0) 95 #define eTerminateSignal INT2FIX(1) 112 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION 128 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \ 129 do{(th)->machine.register_stack_end = rb_ia64_bsp();}while(0) 131 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) 133 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \ 135 FLUSH_REGISTER_WINDOWS; \ 136 RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \ 137 setjmp((th)->machine.regs); \ 138 SET_MACHINE_STACK_END(&(th)->machine.stack_end); \ 141 #define GVL_UNLOCK_BEGIN() do { \ 142 rb_thread_t *_th_stored = GET_THREAD(); \ 143 RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \ 144 gvl_release(_th_stored->vm); 146 #define GVL_UNLOCK_END() \ 147 gvl_acquire(_th_stored->vm, _th_stored); \ 148 rb_thread_set_current(_th_stored); \ 152 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P 153 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst)) 155 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst)) 158 #define only_if_constant(expr, notconst) notconst 160 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \ 161 rb_thread_t *__th = GET_THREAD(); \ 162 struct rb_blocking_region_buffer __region; \ 163 if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \ 165 !only_if_constant(fail_if_interrupted, TRUE)) { \ 167 blocking_region_end(__th, &__region); \ 171 #define RUBY_VM_CHECK_INTS_BLOCKING(th) vm_check_ints_blocking(th) 193 #ifdef HAVE_VA_ARGS_MACRO 194 void rb_thread_debug(
const char *file,
int line,
const char *fmt, ...);
195 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__) 196 #define POSITION_FORMAT "%s:%d:" 197 #define POSITION_ARGS ,file, line 199 void rb_thread_debug(
const char *fmt, ...);
200 #define thread_debug rb_thread_debug 201 #define POSITION_FORMAT 202 #define POSITION_ARGS 205 # ifdef NON_SCALAR_THREAD_ID 214 for (i = 0; i <
sizeof(thid); i++) {
215 # ifdef LITTLE_ENDIAN 216 size_t j =
sizeof(thid) - i - 1;
220 unsigned char c = (
unsigned char)((
char *)&thid)[j];
221 buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
222 buf[3 + i * 2] = ruby_digitmap[c & 0xf];
227 # define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string) 228 # define thread_id_str(th) ((th)->thread_id_string) 229 # define PRI_THREAD_ID "s" 232 # if THREAD_DEBUG < 0 233 static int rb_thread_debug_enabled;
244 rb_thread_s_debug(
void)
246 return INT2NUM(rb_thread_debug_enabled);
260 rb_thread_debug_enabled =
RTEST(val) ?
NUM2INT(val) : 0;
264 # define rb_thread_debug_enabled THREAD_DEBUG 267 #define thread_debug if(0)printf 270 #ifndef fill_thread_id_str 271 # define fill_thread_id_string(thid, buf) (thid) 272 # define fill_thread_id_str(th) (void)0 273 # define thread_id_str(th) ((void *)(th)->thread_id) 274 # define PRI_THREAD_ID "p" 278 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st) 281 VALUE *register_stack_start));
287 #define DEBUG_OUT() \ 288 WaitForSingleObject(&debug_mutex, INFINITE); \ 289 printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \ 291 ReleaseMutex(&debug_mutex); 293 #elif defined(HAVE_PTHREAD_H) 296 #define DEBUG_OUT() \ 297 pthread_mutex_lock(&debug_mutex); \ 298 printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \ 299 fill_thread_id_string(pthread_self(), thread_id_string), buf); \ 301 pthread_mutex_unlock(&debug_mutex); 304 #error "unsupported thread type" 308 static int debug_mutex_initialized = 1;
309 static rb_nativethread_lock_t debug_mutex;
313 #ifdef HAVE_VA_ARGS_MACRO
314 const char *file,
int line,
316 const char *fmt, ...)
320 #ifdef NON_SCALAR_THREAD_ID 324 if (!rb_thread_debug_enabled)
return;
326 if (debug_mutex_initialized == 1) {
327 debug_mutex_initialized = 0;
328 native_mutex_initialize(&debug_mutex);
352 native_mutex_initialize(lock);
358 native_mutex_destroy(lock);
364 native_mutex_lock(lock);
370 native_mutex_unlock(lock);
378 if (fail_if_interrupted) {
443 if (th != main_thread) {
449 thread_debug(
"terminate_i: main thread (%p)\n", (
void *)th);
467 if (err)
rb_bug(
"invalid keeping_mutexes: %s", err);
476 volatile int sleeping = 0;
479 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
489 thread_debug(
"rb_thread_terminate_all (main thread: %p)\n", (
void *)th);
524 th->
machine.register_stack_start = th->
machine.register_stack_end = 0;
545 native_thread_destroy(th);
554 native_thread_init_stack(th);
573 native_set_thread_name(th);
599 # ifdef USE_SIGALTSTACK 602 rb_register_sigaltstack(th);
606 rb_bug(
"thread_start_func_2 must not be used for main thread");
608 ruby_thread_set_native(th);
612 th->
machine.register_stack_start = register_stack_start;
616 gvl_acquire(th->vm, th);
618 thread_debug(
"thread start (get lock): %p\n", (
void *)th);
626 errinfo = th->errinfo;
633 else if (th->vm->thread_abort_on_exception ||
637 else if (th->report_on_exception) {
664 if (th->locking_mutex !=
Qfalse) {
665 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
666 (
void *)th, th->locking_mutex);
677 join_list = th->join_list;
685 join_list = join_list->
next;
694 native_mutex_lock(&th->vm->thread_destruct_lock);
696 th->vm->running_thread =
NULL;
697 native_mutex_unlock(&th->vm->thread_destruct_lock);
712 "can't start a new thread (frozen ThreadGroup)");
732 native_cond_initialize(&th->
interrupt_cond, RB_CONDATTR_CLOCK_MONOTONIC);
736 err = native_thread_create(th);
745 #define threadptr_initialized(th) ((th)->first_args != 0) 830 #define DELAY_INFTY 1E30 847 if ((*p)->th ==
th) {
891 if (th == target_th) {
894 if (
GET_VM()->main_thread == target_th) {
927 rb_bug(
"thread_join: Fixnum (%d) should not reach here.",
FIX2INT(err));
931 rb_bug(
"thread_join: THROW_DATA should not reach here.");
938 return target_th->
self;
1037 #if SIGNEDNESS_OF_TIME_T < 0 1038 # define TIMEVAL_SEC_MAX SIGNED_INTEGER_MAX(TYPEOF_TIMEVAL_TV_SEC) 1039 # define TIMEVAL_SEC_MIN SIGNED_INTEGER_MIN(TYPEOF_TIMEVAL_TV_SEC) 1040 #elif SIGNEDNESS_OF_TIME_T > 0 1041 # define TIMEVAL_SEC_MAX ((TYPEOF_TIMEVAL_TV_SEC)(~(unsigned_time_t)0)) 1042 # define TIMEVAL_SEC_MIN ((TYPEOF_TIMEVAL_TV_SEC)0) 1049 const double TIMEVAL_SEC_MAX_PLUS_ONE = (2*(double)(TIMEVAL_SEC_MAX/2+1));
1053 if (TIMEVAL_SEC_MAX_PLUS_ONE <= d) {
1054 time.
tv_sec = TIMEVAL_SEC_MAX;
1057 else if (d <= TIMEVAL_SEC_MIN) {
1058 time.
tv_sec = TIMEVAL_SEC_MIN;
1063 time.
tv_usec = (int)((d - (time_t)d) * 1e6);
1080 while (th->
status == status) {
1085 native_sleep(th, 0);
1090 if (!spurious_check)
1093 th->
status = prev_status;
1099 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 1121 to.
tv_sec = TIMEVAL_SEC_MAX;
1125 if (to.
tv_sec == TIMEVAL_SEC_MAX)
1136 native_sleep(th, &tv);
1149 if (!spurious_check)
1152 th->
status = prev_status;
1172 thread_debug(
"rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1179 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 1256 gvl_yield(th->
vm, th);
1282 thread_debug(
"enter blocking region (%p)\n", (
void *)th);
1284 gvl_release(th->vm);
1295 gvl_acquire(th->
vm, th);
1297 thread_debug(
"leave blocking region (%p)\n", (
void *)th);
1298 unregister_ubf_list(th);
1299 th->blocking_region_buffer = 0;
1313 int saved_errno = 0;
1322 saved_errno =
errno;
1323 }, ubf, data2, fail_if_interrupted);
1325 if (!fail_if_interrupted) {
1329 errno = saved_errno;
1439 volatile int saved_errno = 0;
1451 saved_errno =
errno;
1465 errno = saved_errno;
1512 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1520 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
1625 for (i=0; i<mask_stack_len; i++) {
1626 mask = mask_stack[mask_stack_len-(i+1)];
1628 for (j=0; j<ancestors_len; j++) {
1629 VALUE klass = ancestors_ptr[j];
1683 switch (mask_timing) {
2020 }
while (old != interrupt);
2028 int postponed_job_interrupt = 0;
2034 int timer_interrupt;
2035 int pending_interrupt;
2043 if (postponed_job_interrupt) {
2079 if (timer_interrupt) {
2080 unsigned long limits_us = TIME_QUANTUM_USEC;
2161 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) 2162 #define USE_SIGALTSTACK 2170 #ifdef USE_SIGALTSTACK 2208 if (wfd->
fd == fd) {
2262 if (th == target_th) {
2455 "stopping only thread\n\tnote: use sleep to stop forever");
2801 if (detail)
return "sleep_forever";
2965 #ifdef SET_ANOTHER_THREAD_NAME 2979 #ifdef SET_ANOTHER_THREAD_NAME 2984 #if defined(SET_ANOTHER_THREAD_NAME) 2986 SET_ANOTHER_THREAD_NAME(th->
thread_id, s);
3033 if (
id == recursive_key) {
3120 if (!
id)
return Qnil;
3127 if (
id == recursive_key) {
3131 else if (
NIL_P(val)) {
3369 if (!
RHASH(locals)->ntbl)
3437 #if USE_NATIVE_THREAD_PRIORITY 3439 native_thread_apply_priority(th);
3455 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT) 3492 fds->fdset =
ALLOC(fd_set);
3493 FD_ZERO(fds->fdset);
3499 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3501 if (size <
sizeof(fd_set))
3502 size =
sizeof(fd_set);
3503 dst->maxfd = src->maxfd;
3505 memcpy(dst->fdset, src->fdset, size);
3511 if (fds->fdset)
xfree(fds->fdset);
3520 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3526 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
3527 size_t o = howmany(fds->maxfd, NFDBITS) *
sizeof(fd_mask);
3529 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
3530 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
3533 fds->fdset =
xrealloc(fds->fdset, m);
3534 memset((
char *)fds->fdset + o, 0, m - o);
3536 if (n >= fds->maxfd) fds->maxfd = n + 1;
3549 if (n >= fds->maxfd)
return;
3556 if (n >= fds->maxfd)
return 0;
3557 return FD_ISSET(n, fds->fdset) != 0;
3563 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
3565 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
3567 dst->fdset =
xrealloc(dst->fdset, size);
3568 memcpy(dst->fdset, src, size);
3574 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3576 if (size <
sizeof(fd_set))
3577 size =
sizeof(fd_set);
3578 dst->maxfd = src->maxfd;
3579 dst->fdset =
xrealloc(dst->fdset, size);
3580 memcpy(dst->fdset, src->fdset, size);
3583 #ifdef __native_client__ 3584 int select(
int nfds, fd_set *readfds, fd_set *writefds,
3585 fd_set *exceptfds,
struct timeval *timeout);
3604 return select(n, r, w, e, timeout);
3607 #if defined __GNUC__ && __GNUC__ >= 6 3608 #define rb_fd_no_init(fds) ASSUME(!(fds)->maxfd) 3616 #define FD_ZERO(f) rb_fd_zero(f) 3617 #define FD_SET(i, f) rb_fd_set((i), (f)) 3618 #define FD_CLR(i, f) rb_fd_clr((i), (f)) 3619 #define FD_ISSET(i, f) rb_fd_isset((i), (f)) 3621 #elif defined(_WIN32) 3626 set->capa = FD_SETSIZE;
3627 set->fdset =
ALLOC(fd_set);
3628 FD_ZERO(set->fdset);
3652 for (i = 0; i <
set->fdset->fd_count; i++) {
3653 if (set->fdset->fd_array[i] == s) {
3657 if (set->fdset->fd_count >= (
unsigned)set->capa) {
3658 set->capa = (
set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3659 set->fdset =
xrealloc(set->fdset,
sizeof(
unsigned int) +
sizeof(SOCKET) * set->capa);
3661 set->fdset->fd_array[
set->fdset->fd_count++] = s;
3669 #define FD_ZERO(f) rb_fd_zero(f) 3670 #define FD_SET(i, f) rb_fd_set((i), (f)) 3671 #define FD_CLR(i, f) rb_fd_clr((i), (f)) 3672 #define FD_ISSET(i, f) rb_fd_isset((i), (f)) 3676 #ifndef rb_fd_no_init 3677 #define rb_fd_no_init(fds) (void)(fds) 3683 if (e == EINTR)
return TRUE;
3685 if (e == ERESTART)
return TRUE;
3690 #define restore_fdset(fds1, fds2) \ 3691 ((fds1) ? rb_fd_dup(fds1, fds2) : (void)0) 3699 timeout->
tv_sec = (time_t)d;
3719 #define do_select_update() \ 3720 (restore_fdset(readfds, &orig_read), \ 3721 restore_fdset(writefds, &orig_write), \ 3722 restore_fdset(exceptfds, &orig_except), \ 3723 update_timeval(timeout, limit), \ 3728 limit += (double)timeout->
tv_sec+(
double)timeout->
tv_usec*1e-6;
3729 wait_rest = *timeout;
3730 timeout = &wait_rest;
3733 #define fd_init_copy(f) \ 3734 (f##fds) ? rb_fd_init_copy(&orig_##f, f##fds) : rb_fd_no_init(&orig_##f) 3744 result = native_fd_select(n, readfds, writefds, exceptfds,
3752 #define fd_term(f) if (f##fds) rb_fd_term(&orig_##f) 3767 thread_debug(
"rb_thread_wait_fd_rw(%d, %s)\n", fd, read ?
"read" :
"write");
3778 thread_debug(
"rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ?
"read" :
"write");
3798 if (!read && !write && !except) {
3816 return do_select(max, read, write, except, timeout);
3824 #if defined(HAVE_POLL) && defined(__linux__) 3831 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) 3832 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 3833 #define POLLEX_SET (POLLPRI) 3838 ppoll(
struct pollfd *fds, nfds_t nfds,
3839 const struct timespec *ts,
const sigset_t *sigmask)
3846 if (ts->
tv_sec > INT_MAX/1000)
3849 tmp = (int)(ts->
tv_sec * 1000);
3850 tmp2 = (int)(ts->
tv_nsec / (1000 * 1000));
3851 if (INT_MAX - tmp < tmp2)
3854 timeout_ms = (int)(tmp + tmp2);
3860 return poll(fds, nfds, timeout_ms);
3865 update_timespec(
struct timespec *timeout,
double limit)
3870 timeout->
tv_sec = (long)d;
3890 #define poll_update() \ 3891 (update_timespec(timeout, limit), \ 3903 fds.events = (short)events;
3909 result = ppoll(&fds, 1, timeout,
NULL);
3910 if (result < 0) lerrno =
errno;
3914 }
while (result < 0 &&
retryable(
errno = lerrno) && poll_update());
3915 if (result < 0)
return -1;
3917 if (fds.revents & POLLNVAL) {
3927 if (fds.revents & POLLIN_SET)
3929 if (fds.revents & POLLOUT_SET)
3931 if (fds.revents & POLLEX_SET)
4017 #ifdef USE_CONSERVATIVE_STACK_END 4022 *stack_end_p = &stack_end;
4062 if (vm->prove_profile.enable) {
4065 if (vm->during_gc) {
4075 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4076 native_reset_timer_thread();
4083 native_reset_timer_thread();
4090 rb_thread_create_timer_thread();
4093 #if defined(HAVE_WORKING_FORK) 4109 clear_coverage(
void)
4112 if (
RTEST(coverages)) {
4138 if (th != current_th) {
4139 rb_mutex_abandon_keeping_mutexes(th);
4140 rb_mutex_abandon_locking_mutex(th);
4149 rb_thread_atfork_internal(th, terminate_atfork_i);
4159 if (th != current_th) {
4168 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4190 return sizeof(
struct thgroup);
4355 "can't move from the enclosed thread group");
4374 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4383 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type)) 4384 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19) 4385 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT) 4386 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT) 4414 return thread_shield;
4431 if (!mutex)
return Qfalse;
4524 #if SIZEOF_LONG == SIZEOF_VOIDP 4525 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other)) 4526 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP 4527 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \ 4528 rb_big_eql((obj_id), (other)) : ((obj_id) == (other))) 4534 if (paired_obj_id) {
4569 VALUE other_paired_obj = pair_list;
4591 if (pair_list ==
Qundef) {
4648 if (outer && !outermost) {
4662 if (!
recursive_pop(p.list, p.objid, p.pairid))
goto invalid;
4663 if (!
recursive_pop(p.list, ID2SYM(recursive_key), 0))
goto invalid;
4665 if (result == p.list) {
4787 #define rb_intern(str) rb_intern_const(str) 4811 #if THREAD_DEBUG < 0 4868 recursive_key =
rb_intern(
"__recursive_key__");
4877 gvl_acquire(th->
vm, th);
4881 RB_CONDATTR_CLOCK_MONOTONIC);
4891 rb_thread_create_timer_thread();
4894 (void)native_mutex_trylock;
4914 rb_str_catf(msg,
"\n%d threads, %d sleeps current:%p main thread:%p\n",
4926 native_mutex_lock(&mutex->
lock);
4929 native_mutex_unlock(&mutex->
lock);
4930 rb_str_catf(msg,
" mutex:%p cond:%d", mth, waiting);
4935 rb_str_catf(msg,
"\n depended by: tb_thread_id:%p", list->
th);
4963 native_mutex_lock(&mutex->
lock);
4967 native_mutex_unlock(&mutex->
lock);
4976 argv[1] =
rb_str_new2(
"No live threads left. Deadlock?");
5006 return GET_VM()->coverages;
5012 GET_VM()->coverages = coverages;
5060 err =
kill(pid, sig);
#define GetMutexPtr(obj, tobj)
#define RBASIC_CLEAR_CLASS(obj)
static int vm_living_thread_num(rb_vm_t *vm)
VALUE rb_mutex_lock(VALUE mutex)
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
rb_thread_list_t * join_list
static VALUE thgroup_enclose(VALUE group)
static VALUE rb_thread_variable_p(VALUE thread, VALUE key)
#define RUBY_EVENT_THREAD_END
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
#define RUBY_VM_CHECK_INTS(th)
unsigned long running_time_us
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
static VALUE mutex_alloc(VALUE klass)
static VALUE thgroup_add(VALUE group, VALUE thread)
void ruby_kill(rb_pid_t pid, int sig)
int ruby_thread_has_gvl_p(void)
VALUE rb_ary_pop(VALUE ary)
static VALUE rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
struct rb_mutex_struct * next_mutex
void ruby_thread_stack_overflow(rb_thread_t *th)
void rb_bug(const char *fmt,...)
static VALUE rb_thread_priority(VALUE thread)
int gettimeofday(struct timeval *, struct timezone *)
void rb_postponed_job_flush(rb_vm_t *vm)
#define RUBY_TYPED_FREE_IMMEDIATELY
static VALUE rb_thread_s_report_exc(void)
VALUE rb_obj_id(VALUE obj)
static void thread_cleanup_func_before_exec(void *th_ptr)
static VALUE trap(int sig, sighandler_t func, VALUE command)
struct rb_thread_struct * running_thread
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
VALUE rb_make_exception(int argc, const VALUE *argv)
#define RUBY_VM_SET_INTERRUPT(th)
static VALUE rb_thread_abort_exc_set(VALUE thread, VALUE val)
int pending_interrupt_queue_checked
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
struct rb_thread_struct::@204 machine
static int max(int a, int b)
static unsigned int hash(str, len) register const char *str
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
static VALUE thgroup_enclosed_p(VALUE group)
int rb_thread_check_trap_pending(void)
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
VALUE rb_thread_list(void)
#define GetProcPtr(obj, ptr)
static VALUE thread_join_sleep(VALUE arg)
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
static VALUE rb_thread_variables(VALUE thread)
struct rb_thread_struct * th
void rb_unblock_function_t(void *)
VALUE rb_ary_delete_at(VALUE ary, long pos)
static VALUE rb_thread_getname(VALUE thread)
rb_unblock_function_t * func
int rb_remove_event_hook(rb_event_hook_func_t func)
static void update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
static VALUE thread_s_new(int argc, VALUE *argv, VALUE klass)
struct list_node vmlt_node
static const VALUE * vm_proc_ep(VALUE procval)
NORETURN(void ruby_thread_stack_overflow(rb_thread_t *th))
void rb_error_frozen(const char *what)
#define TypedData_Wrap_Struct(klass, data_type, sval)
const char ruby_digitmap[]
VALUE pending_interrupt_mask_stack
VALUE rb_ary_shift(VALUE ary)
static void rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
static VALUE threadptr_local_aref(rb_thread_t *th, ID id)
#define TypedData_Get_Struct(obj, type, data_type, sval)
VALUE rb_mod_ancestors(VALUE mod)
#define OBJ_FREEZE_RAW(x)
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
struct rb_thread_struct volatile * th
static struct timeval double2timeval(double d)
SOCKET rb_w32_get_osfhandle(int)
VALUE rb_thread_stop(void)
#define TH_JUMP_TAG(th, st)
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE passed_block_handler)
VALUE rb_ary_push(VALUE ary, VALUE item)
#define VM_BLOCK_HANDLER_NONE
void rb_thread_wait_for(struct timeval time)
SSL_METHOD *(* func)(void)
if(len<=MAX_WORD_LENGTH &&len >=MIN_WORD_LENGTH)
VALUE rb_str_concat(VALUE, VALUE)
void rb_signal_exec(rb_thread_t *th, int sig)
struct st_table * rb_hash_tbl_raw(VALUE hash)
VALUE rb_ary_tmp_new(long capa)
unsigned int report_on_exception
void rb_threadptr_setup_exception(rb_thread_t *th, VALUE mesg, VALUE cause)
static VALUE rb_thread_safe_level(VALUE thread)
static VALUE rb_thread_aset(VALUE self, VALUE id, VALUE val)
VALUE rb_thread_current(void)
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
#define OBJ_ID_EQL(obj_id, other)
void rb_raise(VALUE exc, const char *fmt,...)
VALUE rb_thread_alloc(VALUE klass)
static VALUE rb_thread_abort_exc(VALUE thread)
static void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
VALUE rb_ivar_get(VALUE, ID)
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
VALUE rb_ary_clear(VALUE ary)
int rb_thread_alone(void)
VALUE rb_convert_type(VALUE, int, const char *, const char *)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check)
VALUE rb_thread_local_aref(VALUE thread, ID id)
rb_nativethread_lock_t lock
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, int *stateptr)
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
static size_t thgroup_memsize(const void *ptr)
static VALUE sym_immediate
static int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
static void rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
void rb_gc_mark(VALUE ptr)
VALUE rb_hash_lookup(VALUE hash, VALUE key)
static void thread_shield_mark(void *ptr)
static volatile int system_working
static VALUE thread_join(rb_thread_t *target_th, double delay)
static VALUE remove_from_join_list(VALUE arg)
VALUE rb_thread_kill(VALUE thread)
void rb_gc_force_recycle(VALUE obj)
static int rb_threadptr_dead(rb_thread_t *th)
static int do_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
static VALUE rb_thread_alive_p(VALUE thread)
static const rb_thread_t * patrol_thread
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
#define thread_id_str(th)
void rb_thread_start_timer_thread(void)
static rb_fdset_t * init_set_fd(int fd, rb_fdset_t *fds)
#define THROW_DATA_P(err)
static VALUE rb_thread_stop_p(VALUE thread)
static void thread_cleanup_func(void *th_ptr, int atfork)
static double timeofday(void)
static void rb_thread_sleep_deadly_allow_spurious_wakeup(void)
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
int ruby_native_thread_p(void)
static VALUE rb_thread_s_abort_exc_set(VALUE self, VALUE val)
static rb_atomic_t threadptr_get_interrupts(rb_thread_t *th)
#define rb_fd_isset(n, f)
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
void rb_hash_foreach(VALUE hash, int(*func)(ANYARGS), VALUE farg)
VALUE rb_thread_wakeup(VALUE thread)
static VALUE rb_thread_s_main(VALUE klass)
void rb_exc_raise(VALUE mesg)
static void rb_thread_wait_fd_rw(int fd, int read)
struct timeval rb_time_timeval(VALUE time)
static VALUE sym_on_blocking
static void rb_thread_schedule_limits(unsigned long limits_us)
#define RB_TYPE_P(obj, type)
void rb_reset_random_seed(void)
int rb_thread_fd_writable(int fd)
static void rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
static VALUE thgroup_s_alloc(VALUE klass)
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
#define RUBY_VM_INTERRUPTED_ANY(th)
#define MEMZERO(p, type, n)
static VALUE coverage(VALUE fname, int n)
static void terminate_all(rb_vm_t *vm, const rb_thread_t *main_thread)
union select_args::@178 as
#define RUBY_THREAD_PRIORITY_MAX
static VALUE rb_thread_priority_set(VALUE thread, VALUE prio)
int rb_block_given_p(void)
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval)
static const rb_data_type_t thread_shield_data_type
RUBY_EXTERN VALUE rb_cObject
static VALUE rb_thread_inspect(VALUE thread)
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *)
static int reset_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
void * blocking_region_buffer
static void thread_do_start(rb_thread_t *th, VALUE args)
static VALUE exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
static VALUE thread_create_core(VALUE thval, VALUE args, VALUE(*fn)(ANYARGS))
static int rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
static void rb_threadptr_ready(rb_thread_t *th)
void rb_define_const(VALUE, const char *, VALUE)
void rb_thread_atfork_before_exec(void)
static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
void rb_threadptr_check_signal(rb_thread_t *mth)
static void threadptr_check_pending_interrupt_queue(rb_thread_t *th)
void rb_thread_stop_timer_thread(void)
void rb_vm_register_special_exception(enum ruby_special_exceptions sp, VALUE cls, const char *mesg)
static VALUE rb_thread_variable_get(VALUE thread, VALUE key)
static VALUE rb_thread_report_exc_set(VALUE thread, VALUE val)
const VALUE special_exceptions[ruby_special_error_count]
void ruby_thread_init_stack(rb_thread_t *th)
VALUE rb_proc_location(VALUE self)
static VALUE rb_thread_exit(void)
#define threadptr_initialized(th)
RUBY_EXTERN VALUE rb_cModule
void rb_thread_check_ints(void)
void rb_thread_fd_close(int fd)
VALUE rb_thread_shield_new(void)
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
VALUE rb_obj_alloc(VALUE)
void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo)
VALUE rb_thread_shield_release(VALUE self)
void rb_thread_atfork(void)
struct list_node wfd_node
#define GVL_UNLOCK_BEGIN()
static const rb_data_type_t thgroup_data_type
VALUE rb_thread_create(VALUE(*fn)(ANYARGS), void *arg)
void rb_throw_obj(VALUE tag, VALUE value)
static VALUE thread_s_current(VALUE klass)
static VALUE rb_cThreadShield
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
static VALUE rb_thread_s_report_exc_set(VALUE self, VALUE val)
#define ATOMIC_CAS(var, oldval, newval)
static void update_timeval(struct timeval *timeout, double limit)
static void rb_vm_living_threads_init(rb_vm_t *vm)
static VALUE rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval)
static int recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
#define RARRAY_CONST_PTR(a)
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
struct rb_unblock_callback oldubf
#define rb_thread_set_current(th)
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
static int retryable(int e)
struct rb_mutex_struct * keeping_mutexes
VALUE rb_thread_shield_wait(VALUE self)
VALUE rb_sprintf(const char *format,...)
int rb_get_next_signal(void)
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
VALUE rb_to_symbol(VALUE name)
#define rb_fd_copy(d, s, n)
static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, struct rb_unblock_callback *old, int fail_if_interrupted)
VALUE rb_class_path(VALUE)
#define do_select_update()
int rb_threadptr_reset_raised(rb_thread_t *th)
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
struct list_head waiting_fds
unsigned char buf[MIME_BUF_SIZE]
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
static VALUE thread_initialize(VALUE thread, VALUE args)
static void rb_check_deadlock(rb_vm_t *vm)
void rb_thread_sleep_forever(void)
static VALUE thread_shield_alloc(VALUE klass)
#define THREAD_SHIELD_WAITING_MASK
static void threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
#define SAVE_ROOT_JMPBUF(th, stmt)
const VALUE * rb_vm_proc_local_ep(VALUE proc)
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_)
RUBY_EXTERN VALUE rb_cThread
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
static int keys_i(VALUE key, VALUE value, VALUE ary)
struct rb_thread_struct * main_thread
static VALUE rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
#define RUBY_EVENT_THREAD_BEGIN
void rb_gc_set_stack_end(VALUE **stack_end_p)
static void rb_thread_shield_waiting_dec(VALUE b)
static const char * thread_status_name(rb_thread_t *th, int detail)
int clock_gettime(clockid_t, struct timespec *)
void rb_thread_schedule(void)
#define rb_enc_asciicompat(enc)
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
VALUE rb_str_new_cstr(const char *)
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
static VALUE thread_value(VALUE self)
static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
rb_atomic_t interrupt_flag
rb_nativethread_cond_t interrupt_cond
static void timer_thread_function(void *)
void rb_thread_wait_fd(int fd)
static VALUE rb_thread_setname(VALUE thread, VALUE name)
VALUE rb_blocking_function_t(void *)
void rb_sys_fail(const char *mesg)
static VALUE threadptr_recursive_hash(rb_thread_t *th)
VALUE rb_vm_backtrace_str_ary(rb_thread_t *th, long lev, long n)
VALUE rb_thread_main(void)
static VALUE rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
#define StringValueCStr(v)
static VALUE rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
VALUE(* first_func)(ANYARGS)
enum rb_thread_status status
static void st_delete_wrap(st_table *table, st_data_t key)
void rb_thread_sleep(int sec)
static void debug_deadlock_check(rb_vm_t *vm, VALUE msg)
static VALUE thread_s_pass(VALUE klass)
static VALUE thread_join_m(int argc, VALUE *argv, VALUE self)
#define thread_start_func_2(th, st, rst)
void rb_thread_sleep_deadly(void)
enum rb_thread_status prev_status
#define RARRAY_ASET(a, i, v)
void rb_thread_recycle_stack_release(VALUE *)
void rb_thread_terminate_all(void)
rb_encoding * rb_enc_get(VALUE obj)
#define THREAD_SHIELD_WAITING_SHIFT
static void rb_threadptr_to_kill(rb_thread_t *th)
void rb_reset_coverages(void)
VALUE rb_ident_hash_new(void)
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
void rb_thread_execute_interrupts(VALUE thval)
VALUE(* func)(VALUE, VALUE, int)
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
ID rb_frame_last_func(void)
static VALUE thgroup_list(VALUE group)
#define RARRAY_AREF(a, i)
#define RUBY_INTERNAL_EVENT_SWITCH
unsigned long interrupt_mask
VALUE rb_block_proc(void)
#define RUBY_THREAD_PRIORITY_MIN
#define RBASIC_CLASS(obj)
unsigned int abort_on_exception
VALUE rb_thread_group(VALUE thread)
struct rb_unblock_callback unblock
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
VALUE rb_hash_aref(VALUE hash, VALUE key)
static VALUE recursive_list_access(VALUE sym)
#define rb_fd_select(n, rfds, wfds, efds, timeout)
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
VALUE rb_str_catf(VALUE str, const char *format,...)
void rb_thread_reset_timer_thread(void)
rb_nativethread_id_t thread_id
static VALUE rb_thread_status(VALUE thread)
rb_nativethread_lock_t thread_destruct_lock
int rb_signal_buff_size(void)
static void rb_thread_shield_waiting_inc(VALUE b)
RUBY_EXTERN char * strerror(int)
VALUE rb_mutex_unlock(VALUE mutex)
struct rb_encoding_entry * list
VALUE rb_thread_shield_destroy(VALUE self)
static VALUE rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
VALUE rb_str_cat_cstr(VALUE, const char *)
static void recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
static VALUE thread_start(VALUE klass, VALUE args)
void rb_obj_call_init(VALUE obj, int argc, const VALUE *argv)
unsigned int thread_report_on_exception
static VALUE threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
#define TypedData_Make_Struct(klass, type, data_type, sval)
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
VALUE rb_ary_dup(VALUE ary)
#define GetThreadPtr(obj, ptr)
VALUE rb_ary_join(VALUE ary, VALUE sep)
static VALUE thread_raise_m(int argc, VALUE *argv, VALUE self)
#define rb_fd_resize(n, f)
#define rb_thread_shield_waiting(b)
static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check)
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
#define RUBY_EVENT_COVERAGE
RUBY_EXTERN VALUE rb_eIOError
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
void rb_threadptr_trap_interrupt(rb_thread_t *th)
static VALUE exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
VALUE local_storage_recursive_hash
#define rb_fd_init_copy(d, s)
VALUE rb_str_new_frozen(VALUE)
struct rb_thread_list_struct * next
#define RUBY_VM_INTERRUPTED(th)
static VALUE rb_thread_s_abort_exc(void)
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
static int handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
struct list_head living_threads
void rb_vm_gvl_destroy(rb_vm_t *vm)
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
static VALUE rb_thread_aref(VALUE thread, VALUE key)
#define RUBY_TYPED_DEFAULT_FREE
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start))
static VALUE rb_thread_keys(VALUE self)
#define GetThreadShieldPtr(obj)
static VALUE thread_shield_get_mutex(VALUE self)
static void vm_check_ints_blocking(rb_thread_t *th)
rb_nativethread_lock_t interrupt_lock
VALUE pending_interrupt_queue
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
static void getclockofday(struct timeval *tp)
static VALUE select_single_cleanup(VALUE ptr)
static VALUE select_single(VALUE ptr)
VALUE rb_get_coverages(void)
static VALUE rb_thread_report_exc(VALUE thread)
#define fill_thread_id_string(thid, buf)
static int thread_keys_i(ID key, VALUE value, VALUE ary)
static void * call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
int rb_notify_fd_close(int fd)
static VALUE rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
void rb_threadptr_interrupt(rb_thread_t *th)
VALUE rb_thread_wakeup_alive(VALUE thread)
VALUE rb_class_inherited_p(VALUE mod, VALUE arg)
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
void rb_set_coverages(VALUE coverages)
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
static VALUE rb_thread_key_p(VALUE self, VALUE key)
static VALUE rb_thread_s_kill(VALUE obj, VALUE th)
static VALUE recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
VALUE rb_thread_run(VALUE thread)
void rb_threadptr_signal_exit(rb_thread_t *th)
#define TYPEOF_TIMEVAL_TV_SEC
int rb_thread_to_be_killed(VALUE thread)
int rb_thread_interrupted(VALUE thval)
int rb_threadptr_set_raised(rb_thread_t *th)
static void Init_thread_sync(void)
RUBY_EXTERN void rb_write_error_str(VALUE mesg)
static enum handle_interrupt_timing rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
VALUE rb_obj_class(VALUE)
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)