Ruby  2.4.2p198(2017-09-14revision59899)
cont.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  cont.c -
4 
5  $Author: nagachika $
6  created at: Thu May 23 09:03:43 2007
7 
8  Copyright (C) 2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #include "internal.h"
13 #include "vm_core.h"
14 #include "gc.h"
15 #include "eval_intern.h"
16 
17 /* FIBER_USE_NATIVE enables Fiber performance improvement using system
18  * dependent method such as make/setcontext on POSIX system or
19  * CreateFiber() API on Windows.
20  * This hack make Fiber context switch faster (x2 or more).
21  * However, it decrease maximum number of Fiber. For example, on the
22  * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
23  *
24  * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
25  * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
26  */
27 
28 #if !defined(FIBER_USE_NATIVE)
29 # if defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT)
30 # if 0
31 # elif defined(__NetBSD__)
32 /* On our experience, NetBSD doesn't support using setcontext() and pthread
33  * simultaneously. This is because pthread_self(), TLS and other information
34  * are represented by stack pointer (higher bits of stack pointer).
35  * TODO: check such constraint on configure.
36  */
37 # define FIBER_USE_NATIVE 0
38 # elif defined(__sun)
39 /* On Solaris because resuming any Fiber caused SEGV, for some reason.
40  */
41 # define FIBER_USE_NATIVE 0
42 # elif defined(__ia64)
43 /* At least, Linux/ia64's getcontext(3) doesn't save register window.
44  */
45 # define FIBER_USE_NATIVE 0
46 # elif defined(__GNU__)
47 /* GNU/Hurd doesn't fully support getcontext, setcontext, makecontext
48  * and swapcontext functions. Disabling their usage till support is
49  * implemented. More info at
50  * http://darnassus.sceen.net/~hurd-web/open_issues/glibc/#getcontext
51  */
52 # define FIBER_USE_NATIVE 0
53 # else
54 # define FIBER_USE_NATIVE 1
55 # endif
56 # elif defined(_WIN32)
57 # define FIBER_USE_NATIVE 1
58 # endif
59 #endif
60 #if !defined(FIBER_USE_NATIVE)
61 #define FIBER_USE_NATIVE 0
62 #endif
63 
64 #if FIBER_USE_NATIVE
65 #ifndef _WIN32
66 #include <unistd.h>
67 #include <sys/mman.h>
68 #include <ucontext.h>
69 #endif
70 #define RB_PAGE_SIZE (pagesize)
71 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
72 static long pagesize;
73 #endif /*FIBER_USE_NATIVE*/
74 
75 #define CAPTURE_JUST_VALID_VM_STACK 1
76 
81 };
82 
83 typedef struct rb_context_struct {
85  int argc;
86  VALUE self;
89 #ifdef CAPTURE_JUST_VALID_VM_STACK
90  size_t vm_stack_slen; /* length of stack (head of th->stack) */
91  size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
92 #endif
93  struct {
96  size_t stack_size;
97 #ifdef __ia64
98  VALUE *register_stack;
99  VALUE *register_stack_src;
100  int register_stack_size;
101 #endif
102  } machine;
103  rb_thread_t saved_thread; /* selected properties of GET_THREAD() (see cont_save_thread) */
107 } rb_context_t;
108 
113 };
114 
115 #if FIBER_USE_NATIVE && !defined(_WIN32)
116 #define MAX_MACHINE_STACK_CACHE 10
117 static int machine_stack_cache_index = 0;
118 typedef struct machine_stack_cache_struct {
119  void *ptr;
120  size_t size;
121 } machine_stack_cache_t;
122 static machine_stack_cache_t machine_stack_cache[MAX_MACHINE_STACK_CACHE];
123 static machine_stack_cache_t terminated_machine_stack;
124 #endif
125 
130  /* If a fiber invokes "transfer",
131  * then this fiber can't "resume" any more after that.
132  * You shouldn't mix "transfer" and "resume".
133  */
135 
136 #if FIBER_USE_NATIVE
137 #ifdef _WIN32
138  void *fib_handle;
139 #else
140  ucontext_t context;
141  /* Because context.uc_stack.ss_sp and context.uc_stack.ss_size
142  * are not necessarily valid after makecontext() or swapcontext(),
143  * they are saved in these variables for later use.
144  */
145  void *ss_sp;
146  size_t ss_size;
147 #endif
148 #endif
149 };
150 
155 
156 #define GetContPtr(obj, ptr) \
157  TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
158 
159 #define GetFiberPtr(obj, ptr) do {\
160  TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
161  if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
162 } while (0)
163 
164 NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
165 
166 #define THREAD_MUST_BE_RUNNING(th) do { \
167  if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \
168  } while (0)
169 
170 static void
171 cont_mark(void *ptr)
172 {
173  RUBY_MARK_ENTER("cont");
174  if (ptr) {
175  rb_context_t *cont = ptr;
176  rb_gc_mark(cont->value);
177 
180 
181  if (cont->vm_stack) {
182 #ifdef CAPTURE_JUST_VALID_VM_STACK
184  cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
185 #else
187  cont->vm_stack, cont->saved_thread.stack_size);
188 #endif
189  }
190 
191  if (cont->machine.stack) {
192  if (cont->type == CONTINUATION_CONTEXT) {
193  /* cont */
195  cont->machine.stack + cont->machine.stack_size);
196  }
197  else {
198  /* fiber */
199  rb_thread_t *th;
200  rb_fiber_t *fib = (rb_fiber_t*)cont;
201  GetThreadPtr(cont->saved_thread.self, th);
202  if ((th->fiber != fib) && fib->status == RUNNING) {
204  cont->machine.stack + cont->machine.stack_size);
205  }
206  }
207  }
208 #ifdef __ia64
209  if (cont->machine.register_stack) {
210  rb_gc_mark_locations(cont->machine.register_stack,
211  cont->machine.register_stack + cont->machine.register_stack_size);
212  }
213 #endif
214  }
215  RUBY_MARK_LEAVE("cont");
216 }
217 
218 static void
219 cont_free(void *ptr)
220 {
221  RUBY_FREE_ENTER("cont");
222  if (ptr) {
223  rb_context_t *cont = ptr;
225 #if FIBER_USE_NATIVE
226  if (cont->type == CONTINUATION_CONTEXT) {
227  /* cont */
228  ruby_xfree(cont->ensure_array);
230  }
231  else {
232  /* fiber */
233  rb_fiber_t *fib = (rb_fiber_t*)cont;
234  const rb_thread_t *const th = GET_THREAD();
235 #ifdef _WIN32
236  if (th && th->fiber != fib && cont->type != ROOT_FIBER_CONTEXT) {
237  /* don't delete root fiber handle */
238  if (fib->fib_handle) {
239  DeleteFiber(fib->fib_handle);
240  }
241  }
242 #else /* not WIN32 */
243  if (th && th->fiber != fib) {
244  if (fib->ss_sp) {
245  if (cont->type == ROOT_FIBER_CONTEXT) {
246  rb_bug("Illegal root fiber parameter");
247  }
248  munmap((void*)fib->ss_sp, fib->ss_size);
249  }
250  }
251  else {
252  /* It may reached here when finalize */
253  /* TODO examine whether it is a bug */
254  /* rb_bug("cont_free: release self"); */
255  }
256 #endif
257  }
258 #else /* not FIBER_USE_NATIVE */
259  ruby_xfree(cont->ensure_array);
261 #endif
262 #ifdef __ia64
263  RUBY_FREE_UNLESS_NULL(cont->machine.register_stack);
264 #endif
266 
267  /* free rb_cont_t or rb_fiber_t */
268  ruby_xfree(ptr);
269  }
270  RUBY_FREE_LEAVE("cont");
271 }
272 
273 static size_t
274 cont_memsize(const void *ptr)
275 {
276  const rb_context_t *cont = ptr;
277  size_t size = 0;
278 
279  size = sizeof(*cont);
280  if (cont->vm_stack) {
281 #ifdef CAPTURE_JUST_VALID_VM_STACK
282  size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
283 #else
284  size_t n = cont->saved_thread.stack_size;
285 #endif
286  size += n * sizeof(*cont->vm_stack);
287  }
288 
289  if (cont->machine.stack) {
290  size += cont->machine.stack_size * sizeof(*cont->machine.stack);
291  }
292 #ifdef __ia64
293  if (cont->machine.register_stack) {
294  size += cont->machine.register_stack_size * sizeof(*cont->machine.register_stack);
295  }
296 #endif
297  return size;
298 }
299 
300 void
302 {
303  if (fib)
304  rb_gc_mark(fib->cont.self);
305 }
306 
307 static void
308 fiber_mark(void *ptr)
309 {
310  RUBY_MARK_ENTER("cont");
311  if (ptr) {
312  rb_fiber_t *fib = ptr;
313  rb_fiber_mark_self(fib->prev);
314  cont_mark(&fib->cont);
315  }
316  RUBY_MARK_LEAVE("cont");
317 }
318 
319 static void
320 fiber_free(void *ptr)
321 {
322  RUBY_FREE_ENTER("fiber");
323  if (ptr) {
324  rb_fiber_t *fib = ptr;
325  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
328  }
329 
330  cont_free(&fib->cont);
331  }
332  RUBY_FREE_LEAVE("fiber");
333 }
334 
335 static size_t
336 fiber_memsize(const void *ptr)
337 {
338  const rb_fiber_t *fib = ptr;
339  size_t size = 0;
340 
341  size = sizeof(*fib);
342  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
345  }
346  size += cont_memsize(&fib->cont);
347  return size;
348 }
349 
350 VALUE
352 {
353  if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
354  return Qtrue;
355  }
356  else {
357  return Qfalse;
358  }
359 }
360 
361 static void
363 {
364  size_t size;
365 
367 #ifdef __ia64
368  th->machine.register_stack_end = rb_ia64_bsp();
369 #endif
370 
371  if (th->machine.stack_start > th->machine.stack_end) {
372  size = cont->machine.stack_size = th->machine.stack_start - th->machine.stack_end;
373  cont->machine.stack_src = th->machine.stack_end;
374  }
375  else {
376  size = cont->machine.stack_size = th->machine.stack_end - th->machine.stack_start;
377  cont->machine.stack_src = th->machine.stack_start;
378  }
379 
380  if (cont->machine.stack) {
381  REALLOC_N(cont->machine.stack, VALUE, size);
382  }
383  else {
384  cont->machine.stack = ALLOC_N(VALUE, size);
385  }
386 
388  MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
389 
390 #ifdef __ia64
391  rb_ia64_flushrs();
392  size = cont->machine.register_stack_size = th->machine.register_stack_end - th->machine.register_stack_start;
393  cont->machine.register_stack_src = th->machine.register_stack_start;
394  if (cont->machine.register_stack) {
395  REALLOC_N(cont->machine.register_stack, VALUE, size);
396  }
397  else {
398  cont->machine.register_stack = ALLOC_N(VALUE, size);
399  }
400 
401  MEMCPY(cont->machine.register_stack, cont->machine.register_stack_src, VALUE, size);
402 #endif
403 }
404 
405 static const rb_data_type_t cont_data_type = {
406  "continuation",
409 };
410 
411 static inline void
413 {
414  rb_thread_t *sth = &cont->saved_thread;
415 
416  /* save thread context */
417  sth->stack = th->stack;
418  sth->stack_size = th->stack_size;
419  sth->local_storage = th->local_storage;
420  sth->cfp = th->cfp;
421  sth->safe_level = th->safe_level;
422  sth->raised_flag = th->raised_flag;
423  sth->state = th->state;
424  sth->status = th->status;
425  sth->tag = th->tag;
426  sth->protect_tag = th->protect_tag;
427  sth->errinfo = th->errinfo;
428  sth->first_proc = th->first_proc;
429  sth->root_lep = th->root_lep;
430  sth->root_svar = th->root_svar;
431  sth->ensure_list = th->ensure_list;
432 
433  sth->trace_arg = th->trace_arg;
434 
435  /* saved_thread->machine.stack_(start|end) should be NULL */
436  /* because it may happen GC afterward */
437  sth->machine.stack_start = 0;
438  sth->machine.stack_end = 0;
439 #ifdef __ia64
440  sth->machine.register_stack_start = 0;
441  sth->machine.register_stack_end = 0;
442 #endif
443 }
444 
445 static void
447 {
448  /* save thread context */
449  cont_save_thread(cont, th);
450  cont->saved_thread.self = th->self;
452  cont->saved_thread.fiber = th->fiber;
453  cont->saved_thread.local_storage = 0;
456 }
457 
458 static rb_context_t *
460 {
462  volatile VALUE contval;
463  rb_thread_t *th = GET_THREAD();
464 
466  contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
467  cont->self = contval;
468  cont_init(cont, th);
469  return cont;
470 }
471 
472 static VALUE
473 cont_capture(volatile int *volatile stat)
474 {
475  rb_context_t *volatile cont;
476  rb_thread_t *th = GET_THREAD();
477  volatile VALUE contval;
478 
481  cont = cont_new(rb_cContinuation);
482  contval = cont->self;
483 
484 #ifdef CAPTURE_JUST_VALID_VM_STACK
485  cont->vm_stack_slen = th->cfp->sp - th->stack;
486  cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
487  cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
488  MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
489  MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
490 #else
491  cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
492  MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
493 #endif
494  cont->saved_thread.stack = NULL;
495 
496  cont_save_machine_stack(th, cont);
497 
498  /* backup ensure_list to array for search in another context */
499  {
500  rb_ensure_list_t *p;
501  int size = 0;
502  rb_ensure_entry_t *entry;
503  for (p=th->ensure_list; p; p=p->next)
504  size++;
505  entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
506  for (p=th->ensure_list; p; p=p->next) {
507  if (!p->entry.marker)
508  p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
509  *entry++ = p->entry;
510  }
511  entry->marker = 0;
512  }
513 
514  if (ruby_setjmp(cont->jmpbuf)) {
515  VALUE value;
516 
517  VAR_INITIALIZED(cont);
518  value = cont->value;
519  if (cont->argc == -1) rb_exc_raise(value);
520  cont->value = Qnil;
521  *stat = 1;
522  return value;
523  }
524  else {
525  *stat = 0;
526  return contval;
527  }
528 }
529 
530 static inline void
532 {
533  rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
534 
535  /* restore thread context */
536  if (cont->type == CONTINUATION_CONTEXT) {
537  /* continuation */
538  rb_fiber_t *fib;
539 
540  th->fiber = sth->fiber;
541  fib = th->fiber ? th->fiber : th->root_fiber;
542 
543  if (fib && fib->cont.saved_thread.stack) {
545  th->stack = fib->cont.saved_thread.stack;
546  }
547 #ifdef CAPTURE_JUST_VALID_VM_STACK
548  MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
549  MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
550  cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
551 #else
552  MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
553 #endif
554  }
555  else {
556  /* fiber */
557  th->stack = sth->stack;
558  sth->stack = NULL;
559  th->stack_size = sth->stack_size;
560  th->local_storage = sth->local_storage;
561  th->local_storage_recursive_hash = sth->local_storage_recursive_hash;
562  th->local_storage_recursive_hash_for_trace = sth->local_storage_recursive_hash_for_trace;
563  th->fiber = (rb_fiber_t*)cont;
564  }
565 
566  th->cfp = sth->cfp;
567  th->safe_level = sth->safe_level;
568  th->raised_flag = sth->raised_flag;
569  th->state = sth->state;
570  th->status = sth->status;
571  th->tag = sth->tag;
572  th->protect_tag = sth->protect_tag;
573  th->errinfo = sth->errinfo;
574  th->first_proc = sth->first_proc;
575  th->root_lep = sth->root_lep;
576  th->root_svar = sth->root_svar;
577  th->ensure_list = sth->ensure_list;
578  VM_ASSERT(th->stack != NULL);
579  VM_ASSERT(sth->status == THREAD_RUNNABLE);
580 }
581 
582 #if FIBER_USE_NATIVE
583 #ifdef _WIN32
584 static void
585 fiber_set_stack_location(void)
586 {
587  rb_thread_t *th = GET_THREAD();
588  VALUE *ptr;
589 
590  SET_MACHINE_STACK_END(&ptr);
591  th->machine.stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
592 }
593 
594 static VOID CALLBACK
595 fiber_entry(void *arg)
596 {
597  fiber_set_stack_location();
598  rb_fiber_start();
599 }
600 #else /* _WIN32 */
601 
602 /*
603  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
604  * if MAP_STACK is passed.
605  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
606  */
607 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
608 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
609 #else
610 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
611 #endif
612 
613 static char*
614 fiber_machine_stack_alloc(size_t size)
615 {
616  char *ptr;
617 
618  if (machine_stack_cache_index > 0) {
619  if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
620  ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
621  machine_stack_cache_index--;
622  machine_stack_cache[machine_stack_cache_index].ptr = NULL;
623  machine_stack_cache[machine_stack_cache_index].size = 0;
624  }
625  else{
626  /* TODO handle multiple machine stack size */
627  rb_bug("machine_stack_cache size is not canonicalized");
628  }
629  }
630  else {
631  void *page;
633 
634  errno = 0;
635  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
636  if (ptr == MAP_FAILED) {
637  rb_raise(rb_eFiberError, "can't alloc machine stack to fiber: %s", strerror(errno));
638  }
639 
640  /* guard page setup */
641  page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
642  if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
643  rb_raise(rb_eFiberError, "mprotect failed");
644  }
645  }
646 
647  return ptr;
648 }
649 #endif
650 
651 static void
652 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
653 {
654  rb_thread_t *sth = &fib->cont.saved_thread;
655 
656 #ifdef _WIN32
657 # if defined(_MSC_VER) && _MSC_VER <= 1200
658 # define CreateFiberEx(cs, stacksize, flags, entry, param) \
659  CreateFiber((stacksize), (entry), (param))
660 # endif
661  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
662  if (!fib->fib_handle) {
663  /* try to release unnecessary fibers & retry to create */
664  rb_gc();
665  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
666  if (!fib->fib_handle) {
667  rb_raise(rb_eFiberError, "can't create fiber");
668  }
669  }
670  sth->machine.stack_maxsize = size;
671 #else /* not WIN32 */
672  ucontext_t *context = &fib->context;
673  char *ptr;
675 
676  getcontext(context);
677  ptr = fiber_machine_stack_alloc(size);
678  context->uc_link = NULL;
679  context->uc_stack.ss_sp = ptr;
680  context->uc_stack.ss_size = size;
681  fib->ss_sp = ptr;
682  fib->ss_size = size;
683  makecontext(context, rb_fiber_start, 0);
684  sth->machine.stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
685  sth->machine.stack_maxsize = size - RB_PAGE_SIZE;
686 #endif
687 #ifdef __ia64
688  sth->machine.register_stack_maxsize = sth->machine.stack_maxsize;
689 #endif
690 }
691 
692 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
693 
694 static void
695 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
696 {
697  rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
698 
699  if (newfib->status != RUNNING) {
700  fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size);
701  }
702 
703  /* restore thread context */
704  cont_restore_thread(&newfib->cont);
705  th->machine.stack_maxsize = sth->machine.stack_maxsize;
706  if (sth->machine.stack_end && (newfib != oldfib)) {
707  rb_bug("fiber_setcontext: sth->machine.stack_end has non zero value");
708  }
709 
710  /* save oldfib's machine stack */
711  if (oldfib->status != TERMINATED) {
714  if (STACK_DIR_UPPER(0, 1)) {
716  oldfib->cont.machine.stack = th->machine.stack_end;
717  }
718  else {
720  oldfib->cont.machine.stack = th->machine.stack_start;
721  }
722  }
723  /* exchange machine_stack_start between oldfib and newfib */
725  th->machine.stack_start = sth->machine.stack_start;
726  /* oldfib->machine.stack_end should be NULL */
727  oldfib->cont.saved_thread.machine.stack_end = 0;
728 #ifndef _WIN32
729  if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib) {
730  rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
731  }
732 #endif
733  /* swap machine context */
734 #ifdef _WIN32
735  SwitchToFiber(newfib->fib_handle);
736 #else
737  swapcontext(&oldfib->context, &newfib->context);
738 #endif
739 }
740 #endif
741 
743 
744 static void
746 {
747  cont_restore_thread(cont);
748 
749  /* restore machine stack */
750 #ifdef _M_AMD64
751  {
752  /* workaround for x64 SEH */
753  jmp_buf buf;
754  setjmp(buf);
755  ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
756  ((_JUMP_BUFFER*)(&buf))->Frame;
757  }
758 #endif
759  if (cont->machine.stack_src) {
761  MEMCPY(cont->machine.stack_src, cont->machine.stack,
762  VALUE, cont->machine.stack_size);
763  }
764 
765 #ifdef __ia64
766  if (cont->machine.register_stack_src) {
767  MEMCPY(cont->machine.register_stack_src, cont->machine.register_stack,
768  VALUE, cont->machine.register_stack_size);
769  }
770 #endif
771 
772  ruby_longjmp(cont->jmpbuf, 1);
773 }
774 
776 
777 #ifdef __ia64
778 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
779 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
780 static volatile int C(a), C(b), C(c), C(d), C(e);
781 static volatile int C(f), C(g), C(h), C(i), C(j);
782 static volatile int C(k), C(l), C(m), C(n), C(o);
783 static volatile int C(p), C(q), C(r), C(s), C(t);
784 #if 0
785 {/* the above lines make cc-mode.el confused so much */}
786 #endif
787 int rb_dummy_false = 0;
788 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
789 static void
790 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
791 {
792  if (rb_dummy_false) {
793  /* use registers as much as possible */
794  E(a) = E(b) = E(c) = E(d) = E(e) =
795  E(f) = E(g) = E(h) = E(i) = E(j) =
796  E(k) = E(l) = E(m) = E(n) = E(o) =
797  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
798  E(a) = E(b) = E(c) = E(d) = E(e) =
799  E(f) = E(g) = E(h) = E(i) = E(j) =
800  E(k) = E(l) = E(m) = E(n) = E(o) =
801  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
802  }
803  if (curr_bsp < cont->machine.register_stack_src+cont->machine.register_stack_size) {
804  register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
805  }
806  cont_restore_0(cont, vp);
807 }
808 #undef C
809 #undef E
810 #endif
811 
812 static void
813 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
814 {
815  if (cont->machine.stack_src) {
816 #ifdef HAVE_ALLOCA
817 #define STACK_PAD_SIZE 1
818 #else
819 #define STACK_PAD_SIZE 1024
820 #endif
821  VALUE space[STACK_PAD_SIZE];
822 
823 #if !STACK_GROW_DIRECTION
824  if (addr_in_prev_frame > &space[0]) {
825  /* Stack grows downward */
826 #endif
827 #if STACK_GROW_DIRECTION <= 0
828  volatile VALUE *const end = cont->machine.stack_src;
829  if (&space[0] > end) {
830 # ifdef HAVE_ALLOCA
831  volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
832  space[0] = *sp;
833 # else
834  cont_restore_0(cont, &space[0]);
835 # endif
836  }
837 #endif
838 #if !STACK_GROW_DIRECTION
839  }
840  else {
841  /* Stack grows upward */
842 #endif
843 #if STACK_GROW_DIRECTION >= 0
844  volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
845  if (&space[STACK_PAD_SIZE] < end) {
846 # ifdef HAVE_ALLOCA
847  volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
848  space[0] = *sp;
849 # else
850  cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
851 # endif
852  }
853 #endif
854 #if !STACK_GROW_DIRECTION
855  }
856 #endif
857  }
858  cont_restore_1(cont);
859 }
860 #ifdef __ia64
861 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
862 #endif
863 
864 /*
865  * Document-class: Continuation
866  *
867  * Continuation objects are generated by Kernel#callcc,
868  * after having +require+d <i>continuation</i>. They hold
869  * a return address and execution context, allowing a nonlocal return
870  * to the end of the <code>callcc</code> block from anywhere within a
871  * program. Continuations are somewhat analogous to a structured
872  * version of C's <code>setjmp/longjmp</code> (although they contain
873  * more state, so you might consider them closer to threads).
874  *
875  * For instance:
876  *
877  * require "continuation"
878  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
879  * callcc{|cc| $cc = cc}
880  * puts(message = arr.shift)
881  * $cc.call unless message =~ /Max/
882  *
883  * <em>produces:</em>
884  *
885  * Freddie
886  * Herbie
887  * Ron
888  * Max
889  *
890  * Also you can call callcc in other methods:
891  *
892  * require "continuation"
893  *
894  * def g
895  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
896  * cc = callcc { |cc| cc }
897  * puts arr.shift
898  * return cc, arr.size
899  * end
900  *
901  * def f
902  * c, size = g
903  * c.call(c) if size > 1
904  * end
905  *
906  * f
907  *
908  * This (somewhat contrived) example allows the inner loop to abandon
909  * processing early:
910  *
911  * require "continuation"
912  * callcc {|cont|
913  * for i in 0..4
914  * print "\n#{i}: "
915  * for j in i*5...(i+1)*5
916  * cont.call() if j == 17
917  * printf "%3d", j
918  * end
919  * end
920  * }
921  * puts
922  *
923  * <em>produces:</em>
924  *
925  * 0: 0 1 2 3 4
926  * 1: 5 6 7 8 9
927  * 2: 10 11 12 13 14
928  * 3: 15 16
929  */
930 
931 /*
932  * call-seq:
933  * callcc {|cont| block } -> obj
934  *
935  * Generates a Continuation object, which it passes to
936  * the associated block. You need to <code>require
937  * 'continuation'</code> before using this method. Performing a
938  * <em>cont</em><code>.call</code> will cause the #callcc
939  * to return (as will falling through the end of the block). The
940  * value returned by the #callcc is the value of the
941  * block, or the value passed to <em>cont</em><code>.call</code>. See
942  * class Continuation for more details. Also see
943  * Kernel#throw for an alternative mechanism for
944  * unwinding a call stack.
945  */
946 
947 static VALUE
949 {
950  volatile int called;
951  volatile VALUE val = cont_capture(&called);
952 
953  if (called) {
954  return val;
955  }
956  else {
957  return rb_yield(val);
958  }
959 }
960 
961 static VALUE
963 {
964  switch (argc) {
965  case 0:
966  return Qnil;
967  case 1:
968  return argv[0];
969  default:
970  return rb_ary_new4(argc, argv);
971  }
972 }
973 
974 /* CAUTION!! : Currently, error in rollback_func is not supported */
975 /* same as rb_protect if set rollback_func to NULL */
976 void
978 {
979  st_table **table_p = &GET_VM()->ensure_rollback_table;
980  if (UNLIKELY(*table_p == NULL)) {
981  *table_p = st_init_numtable();
982  }
983  st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
984 }
985 
986 static inline VALUE
988 {
989  st_table *table = GET_VM()->ensure_rollback_table;
990  st_data_t val;
991  if (table && st_lookup(table, (st_data_t)ensure_func, &val))
992  return (VALUE) val;
993  return Qundef;
994 }
995 
996 
997 static inline void
999 {
1000  rb_ensure_list_t *p;
1001  rb_ensure_entry_t *entry;
1002  size_t i;
1003  size_t cur_size;
1004  size_t target_size;
1005  size_t base_point;
1006  VALUE (*func)(ANYARGS);
1007 
1008  cur_size = 0;
1009  for (p=current; p; p=p->next)
1010  cur_size++;
1011  target_size = 0;
1012  for (entry=target; entry->marker; entry++)
1013  target_size++;
1014 
1015  /* search common stack point */
1016  p = current;
1017  base_point = cur_size;
1018  while (base_point) {
1019  if (target_size >= base_point &&
1020  p->entry.marker == target[target_size - base_point].marker)
1021  break;
1022  base_point --;
1023  p = p->next;
1024  }
1025 
1026  /* rollback function check */
1027  for (i=0; i < target_size - base_point; i++) {
1028  if (!lookup_rollback_func(target[i].e_proc)) {
1029  rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
1030  }
1031  }
1032  /* pop ensure stack */
1033  while (cur_size > base_point) {
1034  /* escape from ensure block */
1035  (*current->entry.e_proc)(current->entry.data2);
1036  current = current->next;
1037  cur_size--;
1038  }
1039  /* push ensure stack */
1040  while (i--) {
1041  func = (VALUE (*)(ANYARGS)) lookup_rollback_func(target[i].e_proc);
1042  if ((VALUE)func != Qundef) {
1043  (*func)(target[i].data2);
1044  }
1045  }
1046 }
1047 
1048 /*
1049  * call-seq:
1050  * cont.call(args, ...)
1051  * cont[args, ...]
1052  *
1053  * Invokes the continuation. The program continues from the end of the
1054  * <code>callcc</code> block. If no arguments are given, the original
1055  * <code>callcc</code> returns <code>nil</code>. If one argument is
1056  * given, <code>callcc</code> returns it. Otherwise, an array
1057  * containing <i>args</i> is returned.
1058  *
1059  * callcc {|cont| cont.call } #=> nil
1060  * callcc {|cont| cont.call 1 } #=> 1
1061  * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1062  */
1063 
1064 static VALUE
1066 {
1067  rb_context_t *cont;
1068  rb_thread_t *th = GET_THREAD();
1069  GetContPtr(contval, cont);
1070 
1071  if (cont->saved_thread.self != th->self) {
1072  rb_raise(rb_eRuntimeError, "continuation called across threads");
1073  }
1074  if (cont->saved_thread.protect_tag != th->protect_tag) {
1075  rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
1076  }
1077  if (cont->saved_thread.fiber) {
1078  if (th->fiber != cont->saved_thread.fiber) {
1079  rb_raise(rb_eRuntimeError, "continuation called across fiber");
1080  }
1081  }
1082  rollback_ensure_stack(contval, th->ensure_list, cont->ensure_array);
1083 
1084  cont->argc = argc;
1085  cont->value = make_passing_arg(argc, argv);
1086 
1087  /* restore `tracing' context. see [Feature #4347] */
1088  th->trace_arg = cont->saved_thread.trace_arg;
1089  cont_restore_0(cont, &contval);
1090  return Qnil; /* unreachable */
1091 }
1092 
1093 /*********/
1094 /* fiber */
1095 /*********/
1096 
1097 /*
1098  * Document-class: Fiber
1099  *
1100  * Fibers are primitives for implementing light weight cooperative
1101  * concurrency in Ruby. Basically they are a means of creating code blocks
1102  * that can be paused and resumed, much like threads. The main difference
1103  * is that they are never preempted and that the scheduling must be done by
1104  * the programmer and not the VM.
1105  *
1106  * As opposed to other stackless light weight concurrency models, each fiber
1107  * comes with a stack. This enables the fiber to be paused from deeply
1108  * nested function calls within the fiber block. See the ruby(1)
1109  * manpage to configure the size of the fiber stack(s).
1110  *
1111  * When a fiber is created it will not run automatically. Rather it must
1112  * be explicitly asked to run using the <code>Fiber#resume</code> method.
1113  * The code running inside the fiber can give up control by calling
1114  * <code>Fiber.yield</code> in which case it yields control back to caller
1115  * (the caller of the <code>Fiber#resume</code>).
1116  *
1117  * Upon yielding or termination the Fiber returns the value of the last
1118  * executed expression
1119  *
1120  * For instance:
1121  *
1122  * fiber = Fiber.new do
1123  * Fiber.yield 1
1124  * 2
1125  * end
1126  *
1127  * puts fiber.resume
1128  * puts fiber.resume
1129  * puts fiber.resume
1130  *
1131  * <em>produces</em>
1132  *
1133  * 1
1134  * 2
1135  * FiberError: dead fiber called
1136  *
1137  * The <code>Fiber#resume</code> method accepts an arbitrary number of
1138  * parameters, if it is the first call to <code>resume</code> then they
1139  * will be passed as block arguments. Otherwise they will be the return
1140  * value of the call to <code>Fiber.yield</code>
1141  *
1142  * Example:
1143  *
1144  * fiber = Fiber.new do |first|
1145  * second = Fiber.yield first + 2
1146  * end
1147  *
1148  * puts fiber.resume 10
1149  * puts fiber.resume 14
1150  * puts fiber.resume 18
1151  *
1152  * <em>produces</em>
1153  *
1154  * 12
1155  * 14
1156  * FiberError: dead fiber called
1157  *
1158  */
1159 
1160 static const rb_data_type_t fiber_data_type = {
1161  "fiber",
1163  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1164 };
1165 
1166 static VALUE
1168 {
1169  return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1170 }
1171 
1172 static rb_fiber_t*
1174 {
1175  rb_fiber_t *fib;
1176  rb_thread_t *th = GET_THREAD();
1177 
1178  if (DATA_PTR(fibval) != 0) {
1179  rb_raise(rb_eRuntimeError, "cannot initialize twice");
1180  }
1181 
1183  fib = ZALLOC(rb_fiber_t);
1184  fib->cont.self = fibval;
1185  fib->cont.type = FIBER_CONTEXT;
1186  cont_init(&fib->cont, th);
1187  fib->prev = NULL;
1188  fib->status = CREATED;
1189 
1190  DATA_PTR(fibval) = fib;
1191 
1192  return fib;
1193 }
1194 
1197  const rb_iseq_t *iseq,
1198  VALUE type,
1199  VALUE self,
1200  VALUE specval,
1201  VALUE cref_or_me,
1202  const VALUE *pc,
1203  VALUE *sp,
1204  int local_size,
1205  int stack_max);
1206 
1207 static VALUE
1208 fiber_init(VALUE fibval, VALUE proc)
1209 {
1210  rb_fiber_t *fib = fiber_t_alloc(fibval);
1211  rb_context_t *cont = &fib->cont;
1212  rb_thread_t *th = &cont->saved_thread;
1213  rb_thread_t *cth = GET_THREAD();
1214 
1215  /* initialize cont */
1216  cont->vm_stack = 0;
1217 
1218  th->stack = NULL;
1219  th->stack_size = 0;
1220 
1221  th->stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
1222  th->stack = ALLOC_N(VALUE, th->stack_size);
1223  th->cfp = (void *)(th->stack + th->stack_size);
1224 
1225  rb_vm_push_frame(th,
1226  NULL,
1228  Qnil, /* self */
1230  0, /* specval */
1231  NULL, /* pc */
1232  th->stack, /* sp */
1233  0, /* local_size */
1234  0);
1235 
1236  th->tag = 0;
1240 
1241  th->first_proc = proc;
1242 
1243 #if !FIBER_USE_NATIVE
1244  MEMCPY(&cont->jmpbuf, &cth->root_jmpbuf, rb_jmpbuf_t, 1);
1245 #endif
1246 
1247  return fibval;
1248 }
1249 
1250 /* :nodoc: */
1251 static VALUE
1253 {
1254  return fiber_init(fibval, rb_block_proc());
1255 }
1256 
1257 VALUE
1259 {
1261 }
1262 
1263 static void rb_fiber_terminate(rb_fiber_t *fib);
1264 
1265 void
1267 {
1268  rb_thread_t *th = GET_THREAD();
1269  rb_fiber_t *fib = th->fiber;
1270  rb_proc_t *proc;
1271  int state;
1272 
1273  TH_PUSH_TAG(th);
1274  if ((state = EXEC_TAG()) == 0) {
1275  rb_context_t *cont = &VAR_FROM_MEMORY(fib)->cont;
1276  int argc;
1277  const VALUE *argv, args = cont->value;
1278  GetProcPtr(cont->saved_thread.first_proc, proc);
1279  argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
1280  cont->value = Qnil;
1281  th->errinfo = Qnil;
1283  th->root_svar = Qfalse;
1284  fib->status = RUNNING;
1285 
1286  EXEC_EVENT_HOOK(th, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1287  cont->value = rb_vm_invoke_proc(th, proc, argc, argv, VM_BLOCK_HANDLER_NONE);
1288  }
1289  TH_POP_TAG();
1290 
1291  if (state) {
1292  if (state == TAG_RAISE || state == TAG_FATAL) {
1294  }
1295  else {
1297  if (!NIL_P(err))
1299  }
1301  }
1302 
1303  rb_fiber_terminate(fib);
1304  rb_bug("rb_fiber_start: unreachable");
1305 }
1306 
1307 static rb_fiber_t *
1309 {
1310  rb_fiber_t *fib;
1311  /* no need to allocate vm stack */
1313  fib->cont.type = ROOT_FIBER_CONTEXT;
1314 #if FIBER_USE_NATIVE
1315 #ifdef _WIN32
1316  fib->fib_handle = ConvertThreadToFiber(0);
1317 #endif
1318 #endif
1319  fib->status = RUNNING;
1320 
1321  th->root_fiber = th->fiber = fib;
1322  return fib;
1323 }
1324 
1325 static inline rb_fiber_t*
1327 {
1328  rb_thread_t *th = GET_THREAD();
1329  if (th->fiber == 0) {
1330  rb_fiber_t *fib = root_fiber_alloc(th);
1331  /* Running thread object has stack management responsibility */
1332  fib->cont.saved_thread.stack = NULL;
1333  }
1334  return th->fiber;
1335 }
1336 
1337 static inline rb_fiber_t*
1339 {
1340  rb_fiber_t *fib = fiber_current();
1341  rb_fiber_t *prev = fib->prev;
1342 
1343  if (!prev) {
1344  rb_fiber_t *root_fiber = GET_THREAD()->root_fiber;
1345 
1346  if (root_fiber == fib) {
1347  rb_raise(rb_eFiberError, "can't yield from root fiber");
1348  }
1349  return root_fiber;
1350  }
1351  else {
1352  fib->prev = NULL;
1353  return prev;
1354  }
1355 }
1356 
1357 VALUE
1359 {
1360  return fiber_current()->cont.self;
1361 }
1362 
1363 static inline VALUE
1365 {
1366  rb_fiber_t *fib;
1367 
1368  if (th->fiber) {
1369  fib = th->fiber;
1370  cont_save_thread(&fib->cont, th);
1371  }
1372  else {
1373  /* create root fiber */
1374  fib = root_fiber_alloc(th);
1375  }
1376 
1377 #if FIBER_USE_NATIVE
1378  fiber_setcontext(next_fib, fib);
1379  /* restored */
1380 #ifndef _WIN32
1381  if (terminated_machine_stack.ptr) {
1382  if (machine_stack_cache_index < MAX_MACHINE_STACK_CACHE) {
1383  machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
1384  machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
1385  machine_stack_cache_index++;
1386  }
1387  else {
1388  if (terminated_machine_stack.ptr != fib->cont.machine.stack) {
1389  munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
1390  }
1391  else {
1392  rb_bug("terminated fiber resumed");
1393  }
1394  }
1395  terminated_machine_stack.ptr = NULL;
1396  terminated_machine_stack.size = 0;
1397  }
1398 #endif /* not _WIN32 */
1399  fib = th->fiber;
1400  if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1401  return fib->cont.value;
1402 
1403 #else /* FIBER_USE_NATIVE */
1404  cont_save_machine_stack(th, &fib->cont);
1405  if (ruby_setjmp(fib->cont.jmpbuf)) {
1406  /* restored */
1407  fib = th->fiber;
1408  if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1409  if (next_fib->cont.value == Qundef) {
1410  cont_restore_0(&next_fib->cont, &next_fib->cont.value);
1411  rb_bug("rb_fiber_resume: unreachable");
1412  }
1413  return fib->cont.value;
1414  }
1415  else {
1416  VALUE undef = Qundef;
1417  cont_restore_0(&next_fib->cont, &undef);
1418  rb_bug("rb_fiber_resume: unreachable");
1419  }
1420 #endif /* FIBER_USE_NATIVE */
1421 }
1422 
1423 static inline VALUE
1424 fiber_switch(rb_fiber_t *fib, int argc, const VALUE *argv, int is_resume)
1425 {
1426  VALUE value;
1427  rb_context_t *cont = &fib->cont;
1428  rb_thread_t *th = GET_THREAD();
1429 
1430  if (th->fiber == fib) {
1431  /* ignore fiber context switch
1432  * because destination fiber is same as current fiber
1433  */
1434  return make_passing_arg(argc, argv);
1435  }
1436 
1437  if (cont->saved_thread.self != th->self) {
1438  rb_raise(rb_eFiberError, "fiber called across threads");
1439  }
1440  else if (cont->saved_thread.protect_tag != th->protect_tag) {
1441  rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
1442  }
1443  else if (fib->status == TERMINATED) {
1444  value = rb_exc_new2(rb_eFiberError, "dead fiber called");
1445 
1446  if (th->fiber->status != TERMINATED) rb_exc_raise(value);
1447 
1448  /* th->fiber is also dead => switch to root fiber */
1449  /* (this means we're being called from rb_fiber_terminate, */
1450  /* and the terminated fiber's return_fiber() is already dead) */
1451  cont = &th->root_fiber->cont;
1452  cont->argc = -1;
1453  cont->value = value;
1454 #if FIBER_USE_NATIVE
1455  fiber_setcontext(th->root_fiber, th->fiber);
1456 #else
1457  cont_restore_0(cont, &value);
1458 #endif
1459  /* unreachable */
1460  }
1461 
1462  if (is_resume) {
1463  fib->prev = fiber_current();
1464  }
1465  else {
1466  /* restore `tracing' context. see [Feature #4347] */
1467  th->trace_arg = cont->saved_thread.trace_arg;
1468  }
1469 
1470  cont->argc = argc;
1471  cont->value = make_passing_arg(argc, argv);
1472 
1473  value = fiber_store(fib, th);
1474  RUBY_VM_CHECK_INTS(th);
1475 
1476  EXEC_EVENT_HOOK(th, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1477 
1478  return value;
1479 }
1480 
1481 VALUE
1482 rb_fiber_transfer(VALUE fibval, int argc, const VALUE *argv)
1483 {
1484  rb_fiber_t *fib;
1485  GetFiberPtr(fibval, fib);
1486  return fiber_switch(fib, argc, argv, 0);
1487 }
1488 
1489 static void
1491 {
1492  VALUE value = fib->cont.value;
1493  fib->status = TERMINATED;
1494 #if FIBER_USE_NATIVE && !defined(_WIN32)
1495  /* Ruby must not switch to other thread until storing terminated_machine_stack */
1496  terminated_machine_stack.ptr = fib->ss_sp;
1497  terminated_machine_stack.size = fib->ss_size / sizeof(VALUE);
1498  fib->ss_sp = NULL;
1499  fib->context.uc_stack.ss_sp = NULL;
1500  fib->cont.machine.stack = NULL;
1501  fib->cont.machine.stack_size = 0;
1502 #endif
1503  fiber_switch(return_fiber(), 1, &value, 0);
1504 }
1505 
1506 VALUE
1507 rb_fiber_resume(VALUE fibval, int argc, const VALUE *argv)
1508 {
1509  rb_fiber_t *fib;
1510  GetFiberPtr(fibval, fib);
1511 
1512  if (fib->prev != 0 || fib->cont.type == ROOT_FIBER_CONTEXT) {
1513  rb_raise(rb_eFiberError, "double resume");
1514  }
1515  if (fib->transferred != 0) {
1516  rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
1517  }
1518 
1519  return fiber_switch(fib, argc, argv, 1);
1520 }
1521 
1522 VALUE
1524 {
1525  return fiber_switch(return_fiber(), argc, argv, 0);
1526 }
1527 
1528 void
1530 {
1531  rb_thread_t *th;
1532 
1533  GetThreadPtr(thval, th);
1534  if (th->root_fiber && th->root_fiber != th->fiber) {
1536  }
1537 }
1538 
1539 /*
1540  * call-seq:
1541  * fiber.alive? -> true or false
1542  *
1543  * Returns true if the fiber can still be resumed (or transferred
1544  * to). After finishing execution of the fiber block this method will
1545  * always return false. You need to <code>require 'fiber'</code>
1546  * before using this method.
1547  */
1548 VALUE
1550 {
1551  rb_fiber_t *fib;
1552  GetFiberPtr(fibval, fib);
1553  return fib->status != TERMINATED ? Qtrue : Qfalse;
1554 }
1555 
1556 /*
1557  * call-seq:
1558  * fiber.resume(args, ...) -> obj
1559  *
1560  * Resumes the fiber from the point at which the last <code>Fiber.yield</code>
1561  * was called, or starts running it if it is the first call to
1562  * <code>resume</code>. Arguments passed to resume will be the value of
1563  * the <code>Fiber.yield</code> expression or will be passed as block
1564  * parameters to the fiber's block if this is the first <code>resume</code>.
1565  *
1566  * Alternatively, when resume is called it evaluates to the arguments passed
1567  * to the next <code>Fiber.yield</code> statement inside the fiber's block
1568  * or to the block value if it runs to completion without any
1569  * <code>Fiber.yield</code>
1570  */
1571 static VALUE
1573 {
1574  return rb_fiber_resume(fib, argc, argv);
1575 }
1576 
1577 /*
1578  * call-seq:
1579  * fiber.transfer(args, ...) -> obj
1580  *
1581  * Transfer control to another fiber, resuming it from where it last
1582  * stopped or starting it if it was not resumed before. The calling
1583  * fiber will be suspended much like in a call to
1584  * <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
1585  * before using this method.
1586  *
1587  * The fiber which receives the transfer call is treats it much like
1588  * a resume call. Arguments passed to transfer are treated like those
1589  * passed to resume.
1590  *
1591  * You cannot resume a fiber that transferred control to another one.
1592  * This will cause a double resume error. You need to transfer control
1593  * back to this fiber before it can yield and resume.
1594  *
1595  * Example:
1596  *
1597  * fiber1 = Fiber.new do
1598  * puts "In Fiber 1"
1599  * Fiber.yield
1600  * end
1601  *
1602  * fiber2 = Fiber.new do
1603  * puts "In Fiber 2"
1604  * fiber1.transfer
1605  * puts "Never see this message"
1606  * end
1607  *
1608  * fiber3 = Fiber.new do
1609  * puts "In Fiber 3"
1610  * end
1611  *
1612  * fiber2.resume
1613  * fiber3.resume
1614  *
1615  * <em>produces</em>
1616  *
1617  * In fiber 2
1618  * In fiber 1
1619  * In fiber 3
1620  *
1621  */
1622 static VALUE
1624 {
1625  rb_fiber_t *fib;
1626  GetFiberPtr(fibval, fib);
1627  fib->transferred = 1;
1628  return fiber_switch(fib, argc, argv, 0);
1629 }
1630 
1631 /*
1632  * call-seq:
1633  * Fiber.yield(args, ...) -> obj
1634  *
1635  * Yields control back to the context that resumed the fiber, passing
1636  * along any arguments that were passed to it. The fiber will resume
1637  * processing at this point when <code>resume</code> is called next.
1638  * Any arguments passed to the next <code>resume</code> will be the
1639  * value that this <code>Fiber.yield</code> expression evaluates to.
1640  */
1641 static VALUE
1643 {
1644  return rb_fiber_yield(argc, argv);
1645 }
1646 
1647 /*
1648  * call-seq:
1649  * Fiber.current() -> fiber
1650  *
1651  * Returns the current fiber. You need to <code>require 'fiber'</code>
1652  * before using this method. If you are not running in the context of
1653  * a fiber this method will return the root fiber.
1654  */
1655 static VALUE
1657 {
1658  return rb_fiber_current();
1659 }
1660 
1661 
1662 
1663 /*
1664  * Document-class: FiberError
1665  *
1666  * Raised when an invalid operation is attempted on a Fiber, in
1667  * particular when attempting to call/resume a dead fiber,
1668  * attempting to yield from the root fiber, or calling a fiber across
1669  * threads.
1670  *
1671  * fiber = Fiber.new{}
1672  * fiber.resume #=> nil
1673  * fiber.resume #=> FiberError: dead fiber called
1674  */
1675 
1676 void
1678 {
1679 #if FIBER_USE_NATIVE
1680  rb_thread_t *th = GET_THREAD();
1681 
1682 #ifdef _WIN32
1683  SYSTEM_INFO info;
1684  GetSystemInfo(&info);
1685  pagesize = info.dwPageSize;
1686 #else /* not WIN32 */
1687  pagesize = sysconf(_SC_PAGESIZE);
1688 #endif
1690 #endif
1691 
1692  rb_cFiber = rb_define_class("Fiber", rb_cObject);
1696  rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
1698 }
1699 
1701 
1702 void
1704 {
1705  rb_cContinuation = rb_define_class("Continuation", rb_cObject);
1710  rb_define_global_function("callcc", rb_callcc, 0);
1711 }
1712 
1713 void
1715 {
1719 }
1720 
void rb_gc(void)
Definition: gc.c:6656
rb_control_frame_t * cfp
Definition: vm_core.h:708
VALUE * vm_stack
Definition: cont.c:88
VALUE rb_eStandardError
Definition: error.c:760
VALUE * stack_end
Definition: vm_core.h:786
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:1569
rb_vm_t * vm
Definition: vm_core.h:703
struct rb_ensure_entry entry
Definition: vm_core.h:693
#define rb_exc_new2
Definition: intern.h:245
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:166
#define GetContPtr(obj, ptr)
Definition: cont.c:156
void rb_bug(const char *fmt,...)
Definition: error.c:482
#define ruby_longjmp(env, val)
Definition: eval_intern.h:60
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1145
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:770
static VALUE rb_cContinuation
Definition: cont.c:152
Definition: st.h:79
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:1549
st_table * local_storage
Definition: vm_core.h:773
struct rb_thread_struct::@204 machine
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:681
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1716
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:863
VALUE self
Definition: cont.c:86
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:287
#define CLASS_OF(v)
Definition: ruby.h:453
static VALUE lookup_rollback_func(VALUE(*ensure_func)(ANYARGS))
Definition: cont.c:987
#define Qtrue
Definition: ruby.h:437
VALUE rb_fiber_resume(VALUE fibval, int argc, const VALUE *argv)
Definition: cont.c:1507
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1169
void rb_fiber_reset_root_local_storage(VALUE thval)
Definition: cont.c:1529
static VALUE rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
Definition: cont.c:1572
static const rb_data_type_t cont_data_type
Definition: cont.c:151
#define GetFiberPtr(obj, ptr)
Definition: cont.c:159
VALUE data2
Definition: vm_core.h:688
static rb_fiber_t * root_fiber_alloc(rb_thread_t *th)
Definition: cont.c:1308
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE passed_block_handler)
Definition: vm.c:1150
enum context_type type
Definition: cont.c:84
static void rb_fiber_terminate(rb_fiber_t *fib)
Definition: cont.c:1490
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1070
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:54
size_t fiber_machine_stack_size
Definition: vm_core.h:554
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:532
#define STACK_UPPER(x, a, b)
Definition: gc.h:77
static VALUE fiber_switch(rb_fiber_t *fib, int argc, const VALUE *argv, int is_resume)
Definition: cont.c:1424
VALUE rb_fiber_alive_p(VALUE fibval)
Definition: cont.c:1549
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1593
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2207
static void cont_save_thread(rb_context_t *cont, rb_thread_t *th)
Definition: cont.c:412
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:54
VALUE rb_fiber_current(void)
Definition: cont.c:1358
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
static rb_fiber_t * fiber_current(void)
Definition: cont.c:1326
static VALUE fiber_init(VALUE fibval, VALUE proc)
Definition: cont.c:1208
rb_fiber_t * root_fiber
Definition: vm_core.h:805
#define DATA_PTR(dta)
Definition: ruby.h:1113
void rb_gc_mark(VALUE ptr)
Definition: gc.c:4394
#define st_lookup
Definition: regint.h:185
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:775
rb_jmpbuf_t jmpbuf
Definition: cont.c:104
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
Definition: class.c:1745
Definition: vm_core.h:685
static const rb_data_type_t fiber_data_type
Definition: cont.c:151
static VALUE cont_capture(volatile int *volatile stat)
Definition: cont.c:473
VALUE value
Definition: cont.c:87
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1533
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:4008
static void cont_free(void *ptr)
Definition: cont.c:219
struct rb_context_struct::@2 machine
rb_thread_t saved_thread
Definition: cont.c:103
struct rb_context_struct rb_context_t
#define GET_THREAD()
Definition: vm_core.h:1513
static VALUE rb_eFiberError
Definition: cont.c:154
VALUE * stack
Definition: cont.c:94
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: st.h:22
void rb_exc_raise(VALUE mesg)
Definition: eval.c:620
size_t vm_stack_clen
Definition: cont.c:91
VALUE * stack
Definition: vm_core.h:706
rb_fiber_t * fiber
Definition: vm_core.h:804
static size_t fiber_memsize(const void *ptr)
Definition: cont.c:336
#define TH_POP_TAG()
Definition: eval_intern.h:137
enum fiber_status status
Definition: cont.c:129
#define ALLOC_N(type, n)
Definition: ruby.h:1587
#define EXEC_TAG()
Definition: eval_intern.h:183
#define val
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1872
VALUE rb_eRuntimeError
Definition: error.c:761
rb_ensure_entry_t * ensure_array
Definition: cont.c:105
size_t st_memsize(const st_table *tab)
Definition: st.c:674
size_t fiber_vm_stack_size
Definition: vm_core.h:553
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:720
VALUE(* e_proc)(ANYARGS)
Definition: vm_core.h:687
Definition: cont.c:110
static VALUE rb_fiber_init(VALUE fibval)
Definition: cont.c:1252
static void cont_restore_1(rb_context_t *cont)
Definition: cont.c:745
#define NIL_P(v)
Definition: ruby.h:451
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:646
static VALUE fiber_alloc(VALUE klass)
Definition: cont.c:1167
VALUE tag
Definition: vm_core.h:663
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:53
VALUE rb_fiber_new(VALUE(*func)(ANYARGS), VALUE obj)
Definition: cont.c:1258
#define Qfalse
Definition: ruby.h:436
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:1714
void rb_fiber_start(void)
Definition: cont.c:1266
#define ALLOCA_N(type, n)
Definition: ruby.h:1593
static void cont_mark(void *ptr)
Definition: cont.c:171
static VALUE rb_callcc(VALUE self)
Definition: cont.c:948
#define MEMCPY(p1, p2, type, n)
Definition: ruby.h:1661
#define rb_ary_new4
Definition: intern.h:92
static VALUE rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
Definition: cont.c:1623
int err
Definition: win32.c:135
static VALUE rb_fiber_s_current(VALUE klass)
Definition: cont.c:1656
Definition: cont.c:111
size_t vm_stack_slen
Definition: cont.c:90
#define ZALLOC(type)
Definition: ruby.h:1590
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1020
int transferred
Definition: cont.c:134
#define RARRAY_CONST_PTR(a)
Definition: ruby.h:1028
#define REALLOC_N(var, type, n)
Definition: ruby.h:1591
int errno
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:85
void rb_vm_stack_to_heap(rb_thread_t *th)
Definition: vm.c:729
#define VM_ASSERT(expr)
Definition: vm_core.h:54
RUBY_SYMBOL_EXPORT_BEGIN void ruby_Init_Continuation_body(void)
Definition: cont.c:1703
static rb_context_t * cont_new(VALUE klass)
Definition: cont.c:459
VALUE rb_fiber_yield(int argc, const VALUE *argv)
Definition: cont.c:1523
#define RUBY_SYMBOL_EXPORT_END
Definition: missing.h:49
void ruby_xfree(void *x)
Definition: gc.c:8017
static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval)
Definition: cont.c:1065
void Init_Cont(void)
Definition: cont.c:1677
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4309
static VALUE rb_cFiber
Definition: cont.c:153
#define TAG_FATAL
Definition: vm_core.h:170
#define Qnil
Definition: ruby.h:438
static VALUE fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
Definition: cont.c:1364
unsigned long VALUE
Definition: ruby.h:85
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:84
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:558
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1628
rb_context_t cont
Definition: cont.c:127
RUBY_JMP_BUF rb_jmpbuf_t
Definition: vm_core.h:656
VALUE first_proc
Definition: vm_core.h:779
#define RUBY_SYMBOL_EXPORT_BEGIN
Definition: missing.h:48
static void fiber_mark(void *ptr)
Definition: cont.c:308
void rb_thread_mark(void *th)
Definition: vm.c:2352
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:131
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:11
struct rb_ensure_list * next
Definition: vm_core.h:692
static void cont_init(rb_context_t *cont, rb_thread_t *th)
Definition: cont.c:446
static VALUE rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
Definition: cont.c:1642
#define ruby_setjmp(env)
Definition: eval_intern.h:59
#define C(c, s)
enum rb_thread_status status
Definition: vm_core.h:738
#define RUBY_FREE_UNLESS_NULL(ptr)
Definition: gc.h:64
const VALUE * root_lep
Definition: vm_core.h:730
NOINLINE(static VALUE cont_capture(volatile int *volatile stat))
static VALUE make_passing_arg(int argc, const VALUE *argv)
Definition: cont.c:962
VALUE * stack_src
Definition: cont.c:95
static void cont_restore_thread(rb_context_t *cont)
Definition: cont.c:531
int size
Definition: encoding.c:57
#define f
#define VAR_FROM_MEMORY(var)
Definition: eval_intern.h:154
VALUE root_svar
Definition: vm_core.h:731
VALUE * stack_start
Definition: vm_core.h:785
VALUE rb_block_proc(void)
Definition: proc.c:787
#define st_init_numtable
Definition: regint.h:178
#define ANYARGS
Definition: defines.h:173
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:56
VALUE marker
Definition: vm_core.h:686
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:55
#define VAR_INITIALIZED(var)
Definition: eval_intern.h:155
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:351
#define STACK_PAD_SIZE
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
Definition: proc.c:2661
static rb_fiber_t * fiber_t_alloc(VALUE fibval)
Definition: cont.c:1173
void rb_fiber_mark_self(rb_fiber_t *fib)
Definition: cont.c:301
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1182
struct rb_vm_struct::@201 default_params
#define UNLIKELY(x)
Definition: ffi_common.h:126
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:646
rb_ensure_list_t * ensure_list
Definition: vm_core.h:809
rb_jmpbuf_t root_jmpbuf
Definition: vm_core.h:806
#define st_insert
Definition: regint.h:184
static void cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
Definition: cont.c:813
size_t stack_size
Definition: vm_core.h:707
VALUE local_storage_recursive_hash
Definition: vm_core.h:774
#define RUBY_EVENT_FIBER_SWITCH
Definition: ruby.h:2074
struct rb_vm_tag * tag
Definition: vm_core.h:769
#define st_free_table
Definition: regint.h:188
rb_control_frame_t * rb_vm_push_frame(rb_thread_t *th, const rb_iseq_t *iseq, VALUE type, VALUE self, VALUE specval, VALUE cref_or_me, const VALUE *pc, VALUE *sp, int local_size, int stack_max)
void ruby_register_rollback_func_for_ensure(VALUE(*ensure_func)(ANYARGS), VALUE(*rollback_func)(ANYARGS))
Definition: cont.c:977
context_type
Definition: cont.c:77
size_t stack_size
Definition: cont.c:96
static void fiber_free(void *ptr)
Definition: cont.c:320
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1399
static size_t cont_memsize(const void *ptr)
Definition: cont.c:274
size_t stack_maxsize
Definition: vm_core.h:787
#define stat(path, st)
Definition: win32.h:183
static void cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
Definition: cont.c:362
fiber_status
Definition: cont.c:109
#define TAG_RAISE
Definition: vm_core.h:168
NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)))
static void rollback_ensure_stack(VALUE self, rb_ensure_list_t *current, rb_ensure_entry_t *target)
Definition: cont.c:998
#define NULL
Definition: _sdbm.c:102
#define Qundef
Definition: ruby.h:439
struct rb_fiber_struct * prev
Definition: cont.c:128
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1515
static rb_fiber_t * return_fiber(void)
Definition: cont.c:1338
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:801
char ** argv
Definition: ruby.c:184
rb_ensure_list_t * ensure_list
Definition: cont.c:106
VALUE rb_fiber_transfer(VALUE fibval, int argc, const VALUE *argv)
Definition: cont.c:1482
#define GET_VM()
Definition: vm_core.h:1507