Ruby  2.7.0p0(2019-12-25revision647ee6f091eafcce70ffb75ddf7e121e192ab217)
cont.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  cont.c -
4 
5  $Author$
6  created at: Thu May 23 09:03:43 2007
7 
8  Copyright (C) 2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #include "internal.h"
13 #include "vm_core.h"
14 #include "gc.h"
15 #include "eval_intern.h"
16 #include "mjit.h"
17 
18 #include COROUTINE_H
19 
20 #ifndef _WIN32
21 #include <unistd.h>
22 #include <sys/mman.h>
23 #endif
24 
25 static const int DEBUG = 0;
26 
27 #define RB_PAGE_SIZE (pagesize)
28 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
29 static long pagesize;
30 
31 static const rb_data_type_t cont_data_type, fiber_data_type;
32 static VALUE rb_cContinuation;
33 static VALUE rb_cFiber;
34 static VALUE rb_eFiberError;
35 #ifdef RB_EXPERIMENTAL_FIBER_POOL
36 static VALUE rb_cFiberPool;
37 #endif
38 
39 #define CAPTURE_JUST_VALID_VM_STACK 1
40 
41 // Defined in `coroutine/$arch/Context.h`:
42 #ifdef COROUTINE_LIMITED_ADDRESS_SPACE
43 #define FIBER_POOL_ALLOCATION_FREE
44 #define FIBER_POOL_INITIAL_SIZE 8
45 #define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32
46 #else
47 #define FIBER_POOL_INITIAL_SIZE 32
48 #define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024
49 #endif
50 
54 };
55 
58 #ifdef CAPTURE_JUST_VALID_VM_STACK
59  size_t slen; /* length of stack (head of ec->vm_stack) */
60  size_t clen; /* length of control frames (tail of ec->vm_stack) */
61 #endif
62 };
63 
64 struct fiber_pool;
65 
66 // Represents a single stack.
68  // A pointer to the memory allocation (lowest address) for the stack.
69  void * base;
70 
71  // The current stack pointer, taking into account the direction of the stack.
72  void * current;
73 
74  // The size of the stack excluding any guard pages.
75  size_t size;
76 
77  // The available stack capacity w.r.t. the current stack offset.
78  size_t available;
79 
80  // The pool this stack should be allocated from.
81  struct fiber_pool * pool;
82 
83  // If the stack is allocated, the allocation it came from.
85 };
86 
87 // A linked list of vacant (unused) stacks.
88 // This structure is stored in the first page of a stack if it is not in use.
89 // @sa fiber_pool_vacancy_pointer
91  // Details about the vacant stack:
93 
94  // The vacancy linked list.
95 #ifdef FIBER_POOL_ALLOCATION_FREE
96  struct fiber_pool_vacancy * previous;
97 #endif
99 };
100 
101 // Manages singly linked list of mapped regions of memory which contains 1 more more stack:
102 //
103 // base = +-------------------------------+-----------------------+ +
104 // |VM Stack |VM Stack | | |
105 // | | | | |
106 // | | | | |
107 // +-------------------------------+ | |
108 // |Machine Stack |Machine Stack | | |
109 // | | | | |
110 // | | | | |
111 // | | | . . . . | | size
112 // | | | | |
113 // | | | | |
114 // | | | | |
115 // | | | | |
116 // | | | | |
117 // +-------------------------------+ | |
118 // |Guard Page |Guard Page | | |
119 // +-------------------------------+-----------------------+ v
120 //
121 // +------------------------------------------------------->
122 //
123 // count
124 //
126  // A pointer to the memory mapped region.
127  void * base;
128 
129  // The size of the individual stacks.
130  size_t size;
131 
132  // The stride of individual stacks (including any guard pages or other accounting details).
133  size_t stride;
134 
135  // The number of stacks that were allocated.
136  size_t count;
137 
138 #ifdef FIBER_POOL_ALLOCATION_FREE
139  // The number of stacks used in this allocation.
140  size_t used;
141 #endif
142 
143  struct fiber_pool * pool;
144 
145  // The allocation linked list.
146 #ifdef FIBER_POOL_ALLOCATION_FREE
147  struct fiber_pool_allocation * previous;
148 #endif
150 };
151 
152 // A fiber pool manages vacant stacks to reduce the overhead of creating fibers.
153 struct fiber_pool {
154  // A singly-linked list of allocations which contain 1 or more stacks each.
156 
157  // Provides O(1) stack "allocation":
159 
160  // The size of the stack allocations (excluding any guard page).
161  size_t size;
162 
163  // The total number of stacks that have been allocated in this pool.
164  size_t count;
165 
166  // The initial number of stacks to allocate.
168 
169  // Whether to madvise(free) the stack or not:
171 
172  // The number of stacks that have been used in this pool.
173  size_t used;
174 
175  // The amount to allocate for the vm_stack:
177 };
178 
179 typedef struct rb_context_struct {
181  int argc;
182  int kw_splat;
183  VALUE self;
185 
187 
188  struct {
191  size_t stack_size;
192  } machine;
196  /* Pointer to MJIT info about the continuation. */
198 } rb_context_t;
199 
200 
201 /*
202  * Fiber status:
203  * [Fiber.new] ------> FIBER_CREATED
204  * | [Fiber#resume]
205  * v
206  * +--> FIBER_RESUMED ----+
207  * [Fiber#resume] | | [Fiber.yield] |
208  * | v |
209  * +-- FIBER_SUSPENDED | [Terminate]
210  * |
211  * FIBER_TERMINATED <-+
212  */
218 };
219 
220 #define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
221 #define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
222 #define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
223 #define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
224 #define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
225 
230  BITFIELD(enum fiber_status, status, 2);
231  /* If a fiber invokes by "transfer",
232  * then this fiber can't be invoked by "resume" any more after that.
233  * You shouldn't mix "transfer" and "resume".
234  */
235  unsigned int transferred : 1;
236 
239 };
240 
241 static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
242 
243 /*
244  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
245  * if MAP_STACK is passed.
246  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
247  */
248 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
249 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
250 #else
251 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
252 #endif
253 
254 #define ERRNOMSG strerror(errno)
255 
256 // Locates the stack vacancy details for the given stack.
257 // Requires that fiber_pool_vacancy fits within one page.
258 inline static struct fiber_pool_vacancy *
259 fiber_pool_vacancy_pointer(void * base, size_t size)
260 {
262 
263  return (struct fiber_pool_vacancy *)(
264  (char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
265  );
266 }
267 
268 // Reset the current stack pointer and available size of the given stack.
269 inline static void
270 fiber_pool_stack_reset(struct fiber_pool_stack * stack)
271 {
273 
274  stack->current = (char*)stack->base + STACK_DIR_UPPER(0, stack->size);
276 }
277 
278 // A pointer to the base of the current unused portion of the stack.
279 inline static void *
280 fiber_pool_stack_base(struct fiber_pool_stack * stack)
281 {
283 
285 
287 }
288 
289 // Allocate some memory from the stack. Used to allocate vm_stack inline with machine stack.
290 // @sa fiber_initialize_coroutine
291 inline static void *
292 fiber_pool_stack_alloca(struct fiber_pool_stack * stack, size_t offset)
293 {
295 
296  if (DEBUG) fprintf(stderr, "fiber_pool_stack_alloca(%p): %"PRIuSIZE"/%"PRIuSIZE"\n", (void*)stack, offset, stack->available);
297  VM_ASSERT(stack->available >= offset);
298 
299  // The pointer to the memory being allocated:
300  void * pointer = STACK_DIR_UPPER(stack->current, (char*)stack->current - offset);
301 
302  // Move the stack pointer:
303  stack->current = STACK_DIR_UPPER((char*)stack->current + offset, (char*)stack->current - offset);
304  stack->available -= offset;
305 
306  return pointer;
307 }
308 
309 // Reset the current stack pointer and available size of the given stack.
310 inline static void
311 fiber_pool_vacancy_reset(struct fiber_pool_vacancy * vacancy)
312 {
313  fiber_pool_stack_reset(&vacancy->stack);
314 
315  // Consume one page of the stack because it's used for the vacancy list:
316  fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
317 }
318 
319 inline static struct fiber_pool_vacancy *
320 fiber_pool_vacancy_push(struct fiber_pool_vacancy * vacancy, struct fiber_pool_vacancy * head)
321 {
322  vacancy->next = head;
323 
324 #ifdef FIBER_POOL_ALLOCATION_FREE
325  if (head) {
326  head->previous = vacancy;
327  }
328 #endif
329 
330  return vacancy;
331 }
332 
333 #ifdef FIBER_POOL_ALLOCATION_FREE
334 static void
335 fiber_pool_vacancy_remove(struct fiber_pool_vacancy * vacancy)
336 {
337  if (vacancy->next) {
338  vacancy->next->previous = vacancy->previous;
339  }
340 
341  if (vacancy->previous) {
342  vacancy->previous->next = vacancy->next;
343  }
344  else {
345  // It's the head of the list:
346  vacancy->stack.pool->vacancies = vacancy->next;
347  }
348 }
349 
350 inline static struct fiber_pool_vacancy *
351 fiber_pool_vacancy_pop(struct fiber_pool * pool)
352 {
353  struct fiber_pool_vacancy * vacancy = pool->vacancies;
354 
355  if (vacancy) {
356  fiber_pool_vacancy_remove(vacancy);
357  }
358 
359  return vacancy;
360 }
361 #else
362 inline static struct fiber_pool_vacancy *
363 fiber_pool_vacancy_pop(struct fiber_pool * pool)
364 {
365  struct fiber_pool_vacancy * vacancy = pool->vacancies;
366 
367  if (vacancy) {
368  pool->vacancies = vacancy->next;
369  }
370 
371  return vacancy;
372 }
373 #endif
374 
375 // Initialize the vacant stack. The [base, size] allocation should not include the guard page.
376 // @param base The pointer to the lowest address of the allocated memory.
377 // @param size The size of the allocated memory.
378 inline static struct fiber_pool_vacancy *
379 fiber_pool_vacancy_initialize(struct fiber_pool * fiber_pool, struct fiber_pool_vacancy * vacancies, void * base, size_t size)
380 {
381  struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, size);
382 
383  vacancy->stack.base = base;
384  vacancy->stack.size = size;
385 
386  fiber_pool_vacancy_reset(vacancy);
387 
388  vacancy->stack.pool = fiber_pool;
389 
390  return fiber_pool_vacancy_push(vacancy, vacancies);
391 }
392 
393 // Allocate a maximum of count stacks, size given by stride.
394 // @param count the number of stacks to allocate / were allocated.
395 // @param stride the size of the individual stacks.
396 // @return [void *] the allocated memory or NULL if allocation failed.
397 inline static void *
398 fiber_pool_allocate_memory(size_t * count, size_t stride)
399 {
400  // We use a divide-by-2 strategy to try and allocate memory. We are trying
401  // to allocate `count` stacks. In normal situation, this won't fail. But
402  // if we ran out of address space, or we are allocating more memory than
403  // the system would allow (e.g. overcommit * physical memory + swap), we
404  // divide count by two and try again. This condition should only be
405  // encountered in edge cases, but we handle it here gracefully.
406  while (*count > 1) {
407 #if defined(_WIN32)
408  void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
409 
410  if (!base) {
411  *count = (*count) >> 1;
412  }
413  else {
414  return base;
415  }
416 #else
417  errno = 0;
418  void * base = mmap(NULL, (*count)*stride, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
419 
420  if (base == MAP_FAILED) {
421  // If the allocation fails, count = count / 2, and try again.
422  *count = (*count) >> 1;
423  }
424  else {
425  return base;
426  }
427 #endif
428  }
429 
430  return NULL;
431 }
432 
433 // Given an existing fiber pool, expand it by the specified number of stacks.
434 // @param count the maximum number of stacks to allocate.
435 // @return the allocated fiber pool.
436 // @sa fiber_pool_allocation_free
437 static struct fiber_pool_allocation *
438 fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
439 {
441 
442  size_t size = fiber_pool->size;
443  size_t stride = size + RB_PAGE_SIZE;
444 
445  // Allocate the memory required for the stacks:
446  void * base = fiber_pool_allocate_memory(&count, stride);
447 
448  if (base == NULL) {
449  rb_raise(rb_eFiberError, "can't alloc machine stack to fiber (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", count, size, ERRNOMSG);
450  }
451 
452  struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
453  struct fiber_pool_allocation * allocation = RB_ALLOC(struct fiber_pool_allocation);
454 
455  // Initialize fiber pool allocation:
456  allocation->base = base;
457  allocation->size = size;
458  allocation->stride = stride;
459  allocation->count = count;
460 #ifdef FIBER_POOL_ALLOCATION_FREE
461  allocation->used = 0;
462 #endif
463  allocation->pool = fiber_pool;
464 
465  if (DEBUG) {
466  fprintf(stderr, "fiber_pool_expand(%"PRIuSIZE"): %p, %"PRIuSIZE"/%"PRIuSIZE" x [%"PRIuSIZE":%"PRIuSIZE"]\n",
468  }
469 
470  // Iterate over all stacks, initializing the vacancy list:
471  for (size_t i = 0; i < count; i += 1) {
472  void * base = (char*)allocation->base + (stride * i);
473  void * page = (char*)base + STACK_DIR_UPPER(size, 0);
474 
475 #if defined(_WIN32)
476  DWORD old_protect;
477 
478  if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
479  VirtualFree(allocation->base, 0, MEM_RELEASE);
480  rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
481  }
482 #else
483  if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
484  munmap(allocation->base, count*stride);
485  rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
486  }
487 #endif
488 
489  vacancies = fiber_pool_vacancy_initialize(
490  fiber_pool, vacancies,
491  (char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
492  size
493  );
494 
495 #ifdef FIBER_POOL_ALLOCATION_FREE
496  vacancies->stack.allocation = allocation;
497 #endif
498  }
499 
500  // Insert the allocation into the head of the pool:
501  allocation->next = fiber_pool->allocations;
502 
503 #ifdef FIBER_POOL_ALLOCATION_FREE
504  if (allocation->next) {
505  allocation->next->previous = allocation;
506  }
507 
508  allocation->previous = NULL;
509 #endif
510 
511  fiber_pool->allocations = allocation;
512  fiber_pool->vacancies = vacancies;
513  fiber_pool->count += count;
514 
515  return allocation;
516 }
517 
518 // Initialize the specified fiber pool with the given number of stacks.
519 // @param vm_stack_size The size of the vm stack to allocate.
520 static void
521 fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t count, size_t vm_stack_size)
522 {
523  VM_ASSERT(vm_stack_size < size);
524 
528  fiber_pool->count = 0;
530  fiber_pool->free_stacks = 1;
531  fiber_pool->used = 0;
532 
533  fiber_pool->vm_stack_size = vm_stack_size;
534 
535  fiber_pool_expand(fiber_pool, count);
536 }
537 
538 #ifdef FIBER_POOL_ALLOCATION_FREE
539 // Free the list of fiber pool allocations.
540 static void
541 fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
542 {
544 
545  VM_ASSERT(allocation->used == 0);
546 
547  if (DEBUG) fprintf(stderr, "fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE"\n", allocation, allocation->base, allocation->count);
548 
549  size_t i;
550  for (i = 0; i < allocation->count; i += 1) {
551  void * base = (char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
552 
553  struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
554 
555  // Pop the vacant stack off the free list:
556  fiber_pool_vacancy_remove(vacancy);
557  }
558 
559 #ifdef _WIN32
560  VirtualFree(allocation->base, 0, MEM_RELEASE);
561 #else
562  munmap(allocation->base, allocation->stride * allocation->count);
563 #endif
564 
565  if (allocation->previous) {
566  allocation->previous->next = allocation->next;
567  }
568  else {
569  // We are the head of the list, so update the pool:
570  allocation->pool->allocations = allocation->next;
571  }
572 
573  if (allocation->next) {
574  allocation->next->previous = allocation->previous;
575  }
576 
577  allocation->pool->count -= allocation->count;
578 
579  ruby_xfree(allocation);
580 }
581 #endif
582 
583 // Acquire a stack from the given fiber pool. If none are available, allocate more.
584 static struct fiber_pool_stack
585 fiber_pool_stack_acquire(struct fiber_pool * fiber_pool) {
586  struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pop(fiber_pool);
587 
588  if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
589 
590  if (!vacancy) {
591  const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
592  const size_t minimum = fiber_pool->initial_count;
593 
594  size_t count = fiber_pool->count;
595  if (count > maximum) count = maximum;
596  if (count < minimum) count = minimum;
597 
598  fiber_pool_expand(fiber_pool, count);
599 
600  // The free list should now contain some stacks:
602 
603  vacancy = fiber_pool_vacancy_pop(fiber_pool);
604  }
605 
606  VM_ASSERT(vacancy);
607  VM_ASSERT(vacancy->stack.base);
608 
609  // Take the top item from the free list:
610  fiber_pool->used += 1;
611 
612 #ifdef FIBER_POOL_ALLOCATION_FREE
613  vacancy->stack.allocation->used += 1;
614 #endif
615 
616  fiber_pool_stack_reset(&vacancy->stack);
617 
618  return vacancy->stack;
619 }
620 
621 // We advise the operating system that the stack memory pages are no longer being used.
622 // This introduce some performance overhead but allows system to relaim memory when there is pressure.
623 static inline void
624 fiber_pool_stack_free(struct fiber_pool_stack * stack)
625 {
626  void * base = fiber_pool_stack_base(stack);
627  size_t size = stack->available;
628 
629  // If this is not true, the vacancy information will almost certainly be destroyed:
631 
632  if (DEBUG) fprintf(stderr, "fiber_pool_stack_free: %p+%"PRIuSIZE" [base=%p, size=%"PRIuSIZE"]\n", base, size, stack->base, stack->size);
633 
634 #if VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
635  // This immediately discards the pages and the memory is reset to zero.
636  madvise(base, size, MADV_DONTNEED);
637 #elif defined(MADV_FREE_REUSABLE)
638  madvise(base, size, MADV_FREE_REUSABLE);
639 #elif defined(MADV_FREE)
640  madvise(base, size, MADV_FREE);
641 #elif defined(MADV_DONTNEED)
642  madvise(base, size, MADV_DONTNEED);
643 #elif defined(_WIN32)
644  VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
645  // Not available in all versions of Windows.
646  //DiscardVirtualMemory(base, size);
647 #endif
648 }
649 
650 // Release and return a stack to the vacancy list.
651 static void
652 fiber_pool_stack_release(struct fiber_pool_stack * stack)
653 {
654  struct fiber_pool * pool = stack->pool;
655  struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
656 
657  if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
658 
659  // Copy the stack details into the vacancy area:
660  vacancy->stack = *stack;
661  // After this point, be careful about updating/using state in stack, since it's copied to the vacancy area.
662 
663  // Reset the stack pointers and reserve space for the vacancy data:
664  fiber_pool_vacancy_reset(vacancy);
665 
666  // Push the vacancy into the vancancies list:
667  pool->vacancies = fiber_pool_vacancy_push(vacancy, stack->pool->vacancies);
668  pool->used -= 1;
669 
670 #ifdef FIBER_POOL_ALLOCATION_FREE
671  struct fiber_pool_allocation * allocation = stack->allocation;
672 
673  allocation->used -= 1;
674 
675  // Release address space and/or dirty memory:
676  if (allocation->used == 0) {
677  fiber_pool_allocation_free(allocation);
678  }
679  else if (stack->pool->free_stacks) {
680  fiber_pool_stack_free(&vacancy->stack);
681  }
682 #else
683  // This is entirely optional, but clears the dirty flag from the stack memory, so it won't get swapped to disk when there is memory pressure:
684  if (stack->pool->free_stacks) {
685  fiber_pool_stack_free(&vacancy->stack);
686  }
687 #endif
688 }
689 
690 static COROUTINE
691 fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
692 {
693  rb_fiber_start();
694 }
695 
696 // Initialize a fiber's coroutine's machine stack and vm stack.
697 static VALUE *
698 fiber_initialize_coroutine(rb_fiber_t *fiber, size_t * vm_stack_size)
699 {
700  struct fiber_pool * fiber_pool = fiber->stack.pool;
701  rb_execution_context_t *sec = &fiber->cont.saved_ec;
702  void * vm_stack = NULL;
703 
705 
706  fiber->stack = fiber_pool_stack_acquire(fiber_pool);
707  vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
709 
710 #ifdef COROUTINE_PRIVATE_STACK
711  coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, sec->machine.stack_start);
712  // The stack for this execution context is still the main machine stack, so don't adjust it.
713  // If this is not managed correctly, you will fail in `rb_ec_stack_check`.
714 
715  // We limit the machine stack usage to the fiber stack size.
716  if (sec->machine.stack_maxsize > fiber->stack.available) {
717  sec->machine.stack_maxsize = fiber->stack.available;
718  }
719 #else
720  coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
721 
722  // The stack for this execution context is the one we allocated:
723  sec->machine.stack_start = fiber->stack.current;
724  sec->machine.stack_maxsize = fiber->stack.available;
725 #endif
726 
727  return vm_stack;
728 }
729 
730 // Release the stack from the fiber, it's execution context, and return it to the fiber pool.
731 static void
732 fiber_stack_release(rb_fiber_t * fiber)
733 {
734  rb_execution_context_t *ec = &fiber->cont.saved_ec;
735 
736  if (DEBUG) fprintf(stderr, "fiber_stack_release: %p, stack.base=%p\n", (void*)fiber, fiber->stack.base);
737 
738  // Return the stack back to the fiber pool if it wasn't already:
739  if (fiber->stack.base) {
740  fiber_pool_stack_release(&fiber->stack);
741  fiber->stack.base = NULL;
742  }
743 
744  // The stack is no longer associated with this execution context:
746 }
747 
748 static const char *
749 fiber_status_name(enum fiber_status s)
750 {
751  switch (s) {
752  case FIBER_CREATED: return "created";
753  case FIBER_RESUMED: return "resumed";
754  case FIBER_SUSPENDED: return "suspended";
755  case FIBER_TERMINATED: return "terminated";
756  }
757  VM_UNREACHABLE(fiber_status_name);
758  return NULL;
759 }
760 
761 static void
762 fiber_verify(const rb_fiber_t *fiber)
763 {
764 #if VM_CHECK_MODE > 0
765  VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
766 
767  switch (fiber->status) {
768  case FIBER_RESUMED:
769  VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
770  break;
771  case FIBER_SUSPENDED:
772  VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
773  break;
774  case FIBER_CREATED:
775  case FIBER_TERMINATED:
776  /* TODO */
777  break;
778  default:
779  VM_UNREACHABLE(fiber_verify);
780  }
781 #endif
782 }
783 
784 inline static void
785 fiber_status_set(rb_fiber_t *fiber, enum fiber_status s)
786 {
787  // if (DEBUG) fprintf(stderr, "fiber: %p, status: %s -> %s\n", (void *)fiber, fiber_status_name(fiber->status), fiber_status_name(s));
788  VM_ASSERT(!FIBER_TERMINATED_P(fiber));
789  VM_ASSERT(fiber->status != s);
790  fiber_verify(fiber);
791  fiber->status = s;
792 }
793 
794 static inline void
795 ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
796 {
797  rb_execution_context_t *ec = &fiber->cont.saved_ec;
798 
800 
801  /*
802  * timer-thread may set trap interrupt on previous th->ec at any time;
803  * ensure we do not delay (or lose) the trap interrupt handling.
804  */
805  if (th->vm->main_thread == th && rb_signal_buff_size() > 0) {
807  }
808 
809  VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
810 }
811 
812 static rb_context_t *
813 cont_ptr(VALUE obj)
814 {
815  rb_context_t *cont;
816 
817  TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, cont);
818 
819  return cont;
820 }
821 
822 static rb_fiber_t *
823 fiber_ptr(VALUE obj)
824 {
825  rb_fiber_t *fiber;
826 
827  TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, fiber);
828  if (!fiber) rb_raise(rb_eFiberError, "uninitialized fiber");
829 
830  return fiber;
831 }
832 
833 NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
834 
835 #define THREAD_MUST_BE_RUNNING(th) do { \
836  if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
837  } while (0)
838 
839 static VALUE
840 cont_thread_value(const rb_context_t *cont)
841 {
842  return cont->saved_ec.thread_ptr->self;
843 }
844 
845 static void
846 cont_compact(void *ptr)
847 {
848  rb_context_t *cont = ptr;
849 
850  if (cont->self) {
851  cont->self = rb_gc_location(cont->self);
852  }
853  cont->value = rb_gc_location(cont->value);
855 }
856 
857 static void
858 cont_mark(void *ptr)
859 {
860  rb_context_t *cont = ptr;
861 
862  RUBY_MARK_ENTER("cont");
863  if (cont->self) {
864  rb_gc_mark_movable(cont->self);
865  }
866  rb_gc_mark_movable(cont->value);
867 
869  rb_gc_mark(cont_thread_value(cont));
870 
871  if (cont->saved_vm_stack.ptr) {
872 #ifdef CAPTURE_JUST_VALID_VM_STACK
874  cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
875 #else
877  cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
878 #endif
879  }
880 
881  if (cont->machine.stack) {
882  if (cont->type == CONTINUATION_CONTEXT) {
883  /* cont */
885  cont->machine.stack + cont->machine.stack_size);
886  }
887  else {
888  /* fiber */
889  const rb_fiber_t *fiber = (rb_fiber_t*)cont;
890 
891  if (!FIBER_TERMINATED_P(fiber)) {
893  cont->machine.stack + cont->machine.stack_size);
894  }
895  }
896  }
897 
898  RUBY_MARK_LEAVE("cont");
899 }
900 
901 static int
902 fiber_is_root_p(const rb_fiber_t *fiber)
903 {
904  return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
905 }
906 
907 static void
908 cont_free(void *ptr)
909 {
910  rb_context_t *cont = ptr;
911 
912  RUBY_FREE_ENTER("cont");
913 
914  if (cont->type == CONTINUATION_CONTEXT) {
916  ruby_xfree(cont->ensure_array);
918  }
919  else {
920  rb_fiber_t *fiber = (rb_fiber_t*)cont;
921  coroutine_destroy(&fiber->context);
922  if (!fiber_is_root_p(fiber)) {
923  fiber_stack_release(fiber);
924  }
925  }
926 
928 
929  if (mjit_enabled && cont->mjit_cont != NULL) {
930  mjit_cont_free(cont->mjit_cont);
931  }
932  /* free rb_cont_t or rb_fiber_t */
933  ruby_xfree(ptr);
934  RUBY_FREE_LEAVE("cont");
935 }
936 
937 static size_t
938 cont_memsize(const void *ptr)
939 {
940  const rb_context_t *cont = ptr;
941  size_t size = 0;
942 
943  size = sizeof(*cont);
944  if (cont->saved_vm_stack.ptr) {
945 #ifdef CAPTURE_JUST_VALID_VM_STACK
946  size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
947 #else
948  size_t n = cont->saved_ec.vm_stack_size;
949 #endif
950  size += n * sizeof(*cont->saved_vm_stack.ptr);
951  }
952 
953  if (cont->machine.stack) {
954  size += cont->machine.stack_size * sizeof(*cont->machine.stack);
955  }
956 
957  return size;
958 }
959 
960 void
962 {
963  if (fiber->cont.self) {
964  fiber->cont.self = rb_gc_location(fiber->cont.self);
965  }
966  else {
968  }
969 }
970 
971 void
973 {
974  if (fiber->cont.self) {
975  rb_gc_mark_movable(fiber->cont.self);
976  }
977  else {
979  }
980 }
981 
982 static void
983 fiber_compact(void *ptr)
984 {
985  rb_fiber_t *fiber = ptr;
986  fiber->first_proc = rb_gc_location(fiber->first_proc);
987 
988  if (fiber->prev) rb_fiber_update_self(fiber->prev);
989 
990  cont_compact(&fiber->cont);
991  fiber_verify(fiber);
992 }
993 
994 static void
995 fiber_mark(void *ptr)
996 {
997  rb_fiber_t *fiber = ptr;
998  RUBY_MARK_ENTER("cont");
999  fiber_verify(fiber);
1001  if (fiber->prev) rb_fiber_mark_self(fiber->prev);
1002  cont_mark(&fiber->cont);
1003  RUBY_MARK_LEAVE("cont");
1004 }
1005 
1006 static void
1007 fiber_free(void *ptr)
1008 {
1009  rb_fiber_t *fiber = ptr;
1010  RUBY_FREE_ENTER("fiber");
1011 
1012  //if (DEBUG) fprintf(stderr, "fiber_free: %p[%p]\n", fiber, fiber->stack.base);
1013 
1014  if (fiber->cont.saved_ec.local_storage) {
1016  }
1017 
1018  cont_free(&fiber->cont);
1019  RUBY_FREE_LEAVE("fiber");
1020 }
1021 
1022 static size_t
1023 fiber_memsize(const void *ptr)
1024 {
1025  const rb_fiber_t *fiber = ptr;
1026  size_t size = sizeof(*fiber);
1027  const rb_execution_context_t *saved_ec = &fiber->cont.saved_ec;
1028  const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1029 
1030  /*
1031  * vm.c::thread_memsize already counts th->ec->local_storage
1032  */
1033  if (saved_ec->local_storage && fiber != th->root_fiber) {
1034  size += st_memsize(saved_ec->local_storage);
1035  }
1036  size += cont_memsize(&fiber->cont);
1037  return size;
1038 }
1039 
1040 VALUE
1042 {
1043  if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
1044  return Qtrue;
1045  }
1046  else {
1047  return Qfalse;
1048  }
1049 }
1050 
1051 static void
1052 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
1053 {
1054  size_t size;
1055 
1057 
1058  if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
1060  cont->machine.stack_src = th->ec->machine.stack_end;
1061  }
1062  else {
1064  cont->machine.stack_src = th->ec->machine.stack_start;
1065  }
1066 
1067  if (cont->machine.stack) {
1068  REALLOC_N(cont->machine.stack, VALUE, size);
1069  }
1070  else {
1071  cont->machine.stack = ALLOC_N(VALUE, size);
1072  }
1073 
1075  MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
1076 }
1077 
1078 static const rb_data_type_t cont_data_type = {
1079  "continuation",
1080  {cont_mark, cont_free, cont_memsize, cont_compact},
1082 };
1083 
1084 static inline void
1085 cont_save_thread(rb_context_t *cont, rb_thread_t *th)
1086 {
1087  rb_execution_context_t *sec = &cont->saved_ec;
1088 
1090 
1091  /* save thread context */
1092  *sec = *th->ec;
1093 
1094  /* saved_ec->machine.stack_end should be NULL */
1095  /* because it may happen GC afterward */
1096  sec->machine.stack_end = NULL;
1097 }
1098 
1099 static void
1100 cont_init(rb_context_t *cont, rb_thread_t *th)
1101 {
1102  /* save thread context */
1103  cont_save_thread(cont, th);
1104  cont->saved_ec.thread_ptr = th;
1105  cont->saved_ec.local_storage = NULL;
1108  if (mjit_enabled) {
1109  cont->mjit_cont = mjit_cont_new(&cont->saved_ec);
1110  }
1111 }
1112 
1113 static rb_context_t *
1114 cont_new(VALUE klass)
1115 {
1116  rb_context_t *cont;
1117  volatile VALUE contval;
1118  rb_thread_t *th = GET_THREAD();
1119 
1121  contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
1122  cont->self = contval;
1123  cont_init(cont, th);
1124  return cont;
1125 }
1126 
1127 #if 0
1128 void
1129 show_vm_stack(const rb_execution_context_t *ec)
1130 {
1131  VALUE *p = ec->vm_stack;
1132  while (p < ec->cfp->sp) {
1133  fprintf(stderr, "%3d ", (int)(p - ec->vm_stack));
1134  rb_obj_info_dump(*p);
1135  p++;
1136  }
1137 }
1138 
1139 void
1140 show_vm_pcs(const rb_control_frame_t *cfp,
1141  const rb_control_frame_t *end_of_cfp)
1142 {
1143  int i=0;
1144  while (cfp != end_of_cfp) {
1145  int pc = 0;
1146  if (cfp->iseq) {
1147  pc = cfp->pc - cfp->iseq->body->iseq_encoded;
1148  }
1149  fprintf(stderr, "%2d pc: %d\n", i++, pc);
1151  }
1152 }
1153 #endif
1155 #ifdef __clang__
1156 COMPILER_WARNING_IGNORED(-Wduplicate-decl-specifier)
1157 #endif
1158 static VALUE
1159 cont_capture(volatile int *volatile stat)
1160 {
1161  rb_context_t *volatile cont;
1162  rb_thread_t *th = GET_THREAD();
1163  volatile VALUE contval;
1164  const rb_execution_context_t *ec = th->ec;
1165 
1167  rb_vm_stack_to_heap(th->ec);
1168  cont = cont_new(rb_cContinuation);
1169  contval = cont->self;
1170 
1171 #ifdef CAPTURE_JUST_VALID_VM_STACK
1172  cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
1173  cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
1175  MEMCPY(cont->saved_vm_stack.ptr,
1176  ec->vm_stack,
1177  VALUE, cont->saved_vm_stack.slen);
1179  (VALUE*)ec->cfp,
1180  VALUE,
1181  cont->saved_vm_stack.clen);
1182 #else
1185 #endif
1186  // At this point, `cfp` is valid but `vm_stack` should be cleared:
1187  rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
1188  VM_ASSERT(cont->saved_ec.cfp != NULL);
1189  cont_save_machine_stack(th, cont);
1190 
1191  /* backup ensure_list to array for search in another context */
1192  {
1193  rb_ensure_list_t *p;
1194  int size = 0;
1195  rb_ensure_entry_t *entry;
1196  for (p=th->ec->ensure_list; p; p=p->next)
1197  size++;
1198  entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
1199  for (p=th->ec->ensure_list; p; p=p->next) {
1200  if (!p->entry.marker)
1201  p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
1202  *entry++ = p->entry;
1203  }
1204  entry->marker = 0;
1205  }
1206 
1207  if (ruby_setjmp(cont->jmpbuf)) {
1208  VALUE value;
1209 
1210  VAR_INITIALIZED(cont);
1211  value = cont->value;
1212  if (cont->argc == -1) rb_exc_raise(value);
1213  cont->value = Qnil;
1214  *stat = 1;
1215  return value;
1216  }
1217  else {
1218  *stat = 0;
1219  return contval;
1220  }
1221 }
1223 
1224 static inline void
1225 fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
1226 {
1227  ec_switch(th, fiber);
1228  VM_ASSERT(th->ec->fiber_ptr == fiber);
1229 }
1230 
1231 static inline void
1232 cont_restore_thread(rb_context_t *cont)
1233 {
1234  rb_thread_t *th = GET_THREAD();
1235 
1236  /* restore thread context */
1237  if (cont->type == CONTINUATION_CONTEXT) {
1238  /* continuation */
1239  rb_execution_context_t *sec = &cont->saved_ec;
1240  rb_fiber_t *fiber = NULL;
1241 
1242  if (sec->fiber_ptr != NULL) {
1243  fiber = sec->fiber_ptr;
1244  }
1245  else if (th->root_fiber) {
1246  fiber = th->root_fiber;
1247  }
1248 
1249  if (fiber && th->ec != &fiber->cont.saved_ec) {
1250  ec_switch(th, fiber);
1251  }
1252 
1253  if (th->ec->trace_arg != sec->trace_arg) {
1254  rb_raise(rb_eRuntimeError, "can't call across trace_func");
1255  }
1256 
1257  /* copy vm stack */
1258 #ifdef CAPTURE_JUST_VALID_VM_STACK
1259  MEMCPY(th->ec->vm_stack,
1260  cont->saved_vm_stack.ptr,
1261  VALUE, cont->saved_vm_stack.slen);
1262  MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
1263  cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1264  VALUE, cont->saved_vm_stack.clen);
1265 #else
1266  MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size);
1267 #endif
1268  /* other members of ec */
1269 
1270  th->ec->cfp = sec->cfp;
1271  th->ec->raised_flag = sec->raised_flag;
1272  th->ec->tag = sec->tag;
1273  th->ec->protect_tag = sec->protect_tag;
1274  th->ec->root_lep = sec->root_lep;
1275  th->ec->root_svar = sec->root_svar;
1276  th->ec->ensure_list = sec->ensure_list;
1277  th->ec->errinfo = sec->errinfo;
1278 
1279  VM_ASSERT(th->ec->vm_stack != NULL);
1280  }
1281  else {
1282  /* fiber */
1283  fiber_restore_thread(th, (rb_fiber_t*)cont);
1284  }
1285 }
1286 
1287 NOINLINE(static void fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber));
1288 
1289 static void
1290 fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
1291 {
1292  rb_thread_t *th = GET_THREAD();
1293 
1294  /* save old_fiber's machine stack - to ensure efficient garbage collection */
1295  if (!FIBER_TERMINATED_P(old_fiber)) {
1298  if (STACK_DIR_UPPER(0, 1)) {
1299  old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1300  old_fiber->cont.machine.stack = th->ec->machine.stack_end;
1301  }
1302  else {
1303  old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1304  old_fiber->cont.machine.stack = th->ec->machine.stack_start;
1305  }
1306  }
1307 
1308  /* exchange machine_stack_start between old_fiber and new_fiber */
1310 
1311  /* old_fiber->machine.stack_end should be NULL */
1312  old_fiber->cont.saved_ec.machine.stack_end = NULL;
1313 
1314  /* restore thread context */
1315  fiber_restore_thread(th, new_fiber);
1316 
1317  // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] -> %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
1318 
1319  /* swap machine context */
1320  coroutine_transfer(&old_fiber->context, &new_fiber->context);
1321 
1322  // It's possible to get here, and new_fiber is already freed.
1323  // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] <- %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
1324 }
1325 
1326 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
1327 
1328 static void
1329 cont_restore_1(rb_context_t *cont)
1330 {
1331  cont_restore_thread(cont);
1332 
1333  /* restore machine stack */
1334 #ifdef _M_AMD64
1335  {
1336  /* workaround for x64 SEH */
1337  jmp_buf buf;
1338  setjmp(buf);
1339  _JUMP_BUFFER *bp = (void*)&cont->jmpbuf;
1340  bp->Frame = ((_JUMP_BUFFER*)((void*)&buf))->Frame;
1341  }
1342 #endif
1343  if (cont->machine.stack_src) {
1345  MEMCPY(cont->machine.stack_src, cont->machine.stack,
1346  VALUE, cont->machine.stack_size);
1347  }
1348 
1349  ruby_longjmp(cont->jmpbuf, 1);
1350 }
1351 
1352 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
1353 
1354 static void
1355 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
1356 {
1357  if (cont->machine.stack_src) {
1358 #ifdef HAVE_ALLOCA
1359 #define STACK_PAD_SIZE 1
1360 #else
1361 #define STACK_PAD_SIZE 1024
1362 #endif
1363  VALUE space[STACK_PAD_SIZE];
1364 
1365 #if !STACK_GROW_DIRECTION
1366  if (addr_in_prev_frame > &space[0]) {
1367  /* Stack grows downward */
1368 #endif
1369 #if STACK_GROW_DIRECTION <= 0
1370  volatile VALUE *const end = cont->machine.stack_src;
1371  if (&space[0] > end) {
1372 # ifdef HAVE_ALLOCA
1373  volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
1374  space[0] = *sp;
1375 # else
1376  cont_restore_0(cont, &space[0]);
1377 # endif
1378  }
1379 #endif
1380 #if !STACK_GROW_DIRECTION
1381  }
1382  else {
1383  /* Stack grows upward */
1384 #endif
1385 #if STACK_GROW_DIRECTION >= 0
1386  volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
1387  if (&space[STACK_PAD_SIZE] < end) {
1388 # ifdef HAVE_ALLOCA
1389  volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
1390  space[0] = *sp;
1391 # else
1392  cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1393 # endif
1394  }
1395 #endif
1396 #if !STACK_GROW_DIRECTION
1397  }
1398 #endif
1399  }
1400  cont_restore_1(cont);
1401 }
1402 
1403 /*
1404  * Document-class: Continuation
1405  *
1406  * Continuation objects are generated by Kernel#callcc,
1407  * after having +require+d <i>continuation</i>. They hold
1408  * a return address and execution context, allowing a nonlocal return
1409  * to the end of the #callcc block from anywhere within a
1410  * program. Continuations are somewhat analogous to a structured
1411  * version of C's <code>setjmp/longjmp</code> (although they contain
1412  * more state, so you might consider them closer to threads).
1413  *
1414  * For instance:
1415  *
1416  * require "continuation"
1417  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1418  * callcc{|cc| $cc = cc}
1419  * puts(message = arr.shift)
1420  * $cc.call unless message =~ /Max/
1421  *
1422  * <em>produces:</em>
1423  *
1424  * Freddie
1425  * Herbie
1426  * Ron
1427  * Max
1428  *
1429  * Also you can call callcc in other methods:
1430  *
1431  * require "continuation"
1432  *
1433  * def g
1434  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1435  * cc = callcc { |cc| cc }
1436  * puts arr.shift
1437  * return cc, arr.size
1438  * end
1439  *
1440  * def f
1441  * c, size = g
1442  * c.call(c) if size > 1
1443  * end
1444  *
1445  * f
1446  *
1447  * This (somewhat contrived) example allows the inner loop to abandon
1448  * processing early:
1449  *
1450  * require "continuation"
1451  * callcc {|cont|
1452  * for i in 0..4
1453  * print "#{i}: "
1454  * for j in i*5...(i+1)*5
1455  * cont.call() if j == 17
1456  * printf "%3d", j
1457  * end
1458  * end
1459  * }
1460  * puts
1461  *
1462  * <em>produces:</em>
1463  *
1464  * 0: 0 1 2 3 4
1465  * 1: 5 6 7 8 9
1466  * 2: 10 11 12 13 14
1467  * 3: 15 16
1468  */
1469 
1470 /*
1471  * call-seq:
1472  * callcc {|cont| block } -> obj
1473  *
1474  * Generates a Continuation object, which it passes to
1475  * the associated block. You need to <code>require
1476  * 'continuation'</code> before using this method. Performing a
1477  * <em>cont</em><code>.call</code> will cause the #callcc
1478  * to return (as will falling through the end of the block). The
1479  * value returned by the #callcc is the value of the
1480  * block, or the value passed to <em>cont</em><code>.call</code>. See
1481  * class Continuation for more details. Also see
1482  * Kernel#throw for an alternative mechanism for
1483  * unwinding a call stack.
1484  */
1485 
1486 static VALUE
1487 rb_callcc(VALUE self)
1488 {
1489  volatile int called;
1490  volatile VALUE val = cont_capture(&called);
1491 
1492  if (called) {
1493  return val;
1494  }
1495  else {
1496  return rb_yield(val);
1497  }
1498 }
1499 
1500 static VALUE
1501 make_passing_arg(int argc, const VALUE *argv)
1502 {
1503  switch (argc) {
1504  case -1:
1505  return argv[0];
1506  case 0:
1507  return Qnil;
1508  case 1:
1509  return argv[0];
1510  default:
1511  return rb_ary_new4(argc, argv);
1512  }
1513 }
1514 
1515 typedef VALUE e_proc(VALUE);
1516 
1517 /* CAUTION!! : Currently, error in rollback_func is not supported */
1518 /* same as rb_protect if set rollback_func to NULL */
1519 void
1521 {
1522  st_table **table_p = &GET_VM()->ensure_rollback_table;
1523  if (UNLIKELY(*table_p == NULL)) {
1524  *table_p = st_init_numtable();
1525  }
1526  st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
1527 }
1528 
1529 static inline e_proc *
1530 lookup_rollback_func(e_proc *ensure_func)
1531 {
1532  st_table *table = GET_VM()->ensure_rollback_table;
1533  st_data_t val;
1534  if (table && st_lookup(table, (st_data_t)ensure_func, &val))
1535  return (e_proc *) val;
1536  return (e_proc *) Qundef;
1537 }
1538 
1539 
1540 static inline void
1541 rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
1542 {
1543  rb_ensure_list_t *p;
1544  rb_ensure_entry_t *entry;
1545  size_t i, j;
1546  size_t cur_size;
1547  size_t target_size;
1548  size_t base_point;
1549  e_proc *func;
1550 
1551  cur_size = 0;
1552  for (p=current; p; p=p->next)
1553  cur_size++;
1554  target_size = 0;
1555  for (entry=target; entry->marker; entry++)
1556  target_size++;
1557 
1558  /* search common stack point */
1559  p = current;
1560  base_point = cur_size;
1561  while (base_point) {
1562  if (target_size >= base_point &&
1563  p->entry.marker == target[target_size - base_point].marker)
1564  break;
1565  base_point --;
1566  p = p->next;
1567  }
1568 
1569  /* rollback function check */
1570  for (i=0; i < target_size - base_point; i++) {
1571  if (!lookup_rollback_func(target[i].e_proc)) {
1572  rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
1573  }
1574  }
1575  /* pop ensure stack */
1576  while (cur_size > base_point) {
1577  /* escape from ensure block */
1578  (*current->entry.e_proc)(current->entry.data2);
1579  current = current->next;
1580  cur_size--;
1581  }
1582  /* push ensure stack */
1583  for (j = 0; j < i; j++) {
1584  func = lookup_rollback_func(target[i - j - 1].e_proc);
1585  if ((VALUE)func != Qundef) {
1586  (*func)(target[i - j - 1].data2);
1587  }
1588  }
1589 }
1590 
1591 /*
1592  * call-seq:
1593  * cont.call(args, ...)
1594  * cont[args, ...]
1595  *
1596  * Invokes the continuation. The program continues from the end of
1597  * the #callcc block. If no arguments are given, the original #callcc
1598  * returns +nil+. If one argument is given, #callcc returns
1599  * it. Otherwise, an array containing <i>args</i> is returned.
1600  *
1601  * callcc {|cont| cont.call } #=> nil
1602  * callcc {|cont| cont.call 1 } #=> 1
1603  * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1604  */
1605 
1606 static VALUE
1607 rb_cont_call(int argc, VALUE *argv, VALUE contval)
1608 {
1609  rb_context_t *cont = cont_ptr(contval);
1610  rb_thread_t *th = GET_THREAD();
1611 
1612  if (cont_thread_value(cont) != th->self) {
1613  rb_raise(rb_eRuntimeError, "continuation called across threads");
1614  }
1615  if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
1616  rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
1617  }
1618  if (cont->saved_ec.fiber_ptr) {
1619  if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
1620  rb_raise(rb_eRuntimeError, "continuation called across fiber");
1621  }
1622  }
1623  rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
1624 
1625  cont->argc = argc;
1626  cont->value = make_passing_arg(argc, argv);
1627 
1628  cont_restore_0(cont, &contval);
1629  return Qnil; /* unreachable */
1630 }
1631 
1632 /*********/
1633 /* fiber */
1634 /*********/
1635 
1636 /*
1637  * Document-class: Fiber
1638  *
1639  * Fibers are primitives for implementing light weight cooperative
1640  * concurrency in Ruby. Basically they are a means of creating code blocks
1641  * that can be paused and resumed, much like threads. The main difference
1642  * is that they are never preempted and that the scheduling must be done by
1643  * the programmer and not the VM.
1644  *
1645  * As opposed to other stackless light weight concurrency models, each fiber
1646  * comes with a stack. This enables the fiber to be paused from deeply
1647  * nested function calls within the fiber block. See the ruby(1)
1648  * manpage to configure the size of the fiber stack(s).
1649  *
1650  * When a fiber is created it will not run automatically. Rather it must
1651  * be explicitly asked to run using the Fiber#resume method.
1652  * The code running inside the fiber can give up control by calling
1653  * Fiber.yield in which case it yields control back to caller (the
1654  * caller of the Fiber#resume).
1655  *
1656  * Upon yielding or termination the Fiber returns the value of the last
1657  * executed expression
1658  *
1659  * For instance:
1660  *
1661  * fiber = Fiber.new do
1662  * Fiber.yield 1
1663  * 2
1664  * end
1665  *
1666  * puts fiber.resume
1667  * puts fiber.resume
1668  * puts fiber.resume
1669  *
1670  * <em>produces</em>
1671  *
1672  * 1
1673  * 2
1674  * FiberError: dead fiber called
1675  *
1676  * The Fiber#resume method accepts an arbitrary number of parameters,
1677  * if it is the first call to #resume then they will be passed as
1678  * block arguments. Otherwise they will be the return value of the
1679  * call to Fiber.yield
1680  *
1681  * Example:
1682  *
1683  * fiber = Fiber.new do |first|
1684  * second = Fiber.yield first + 2
1685  * end
1686  *
1687  * puts fiber.resume 10
1688  * puts fiber.resume 1_000_000
1689  * puts fiber.resume "The fiber will be dead before I can cause trouble"
1690  *
1691  * <em>produces</em>
1692  *
1693  * 12
1694  * 1000000
1695  * FiberError: dead fiber called
1696  *
1697  */
1698 
1699 static const rb_data_type_t fiber_data_type = {
1700  "fiber",
1701  {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
1703 };
1704 
1705 static VALUE
1706 fiber_alloc(VALUE klass)
1707 {
1708  return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1709 }
1710 
1711 static rb_fiber_t*
1712 fiber_t_alloc(VALUE fiber_value)
1713 {
1714  rb_fiber_t *fiber;
1715  rb_thread_t *th = GET_THREAD();
1716 
1717  if (DATA_PTR(fiber_value) != 0) {
1718  rb_raise(rb_eRuntimeError, "cannot initialize twice");
1719  }
1720 
1722  fiber = ZALLOC(rb_fiber_t);
1723  fiber->cont.self = fiber_value;
1724  fiber->cont.type = FIBER_CONTEXT;
1725  cont_init(&fiber->cont, th);
1726 
1727  fiber->cont.saved_ec.fiber_ptr = fiber;
1729 
1730  fiber->prev = NULL;
1731 
1732  /* fiber->status == 0 == CREATED
1733  * So that we don't need to set status: fiber_status_set(fiber, FIBER_CREATED); */
1734  VM_ASSERT(FIBER_CREATED_P(fiber));
1735 
1736  DATA_PTR(fiber_value) = fiber;
1737 
1738  return fiber;
1739 }
1740 
1741 static VALUE
1742 fiber_initialize(VALUE self, VALUE proc, struct fiber_pool * fiber_pool)
1743 {
1744  rb_fiber_t *fiber = fiber_t_alloc(self);
1745 
1746  fiber->first_proc = proc;
1747  fiber->stack.base = NULL;
1748  fiber->stack.pool = fiber_pool;
1749 
1750  return self;
1751 }
1752 
1753 static void
1754 fiber_prepare_stack(rb_fiber_t *fiber)
1755 {
1756  rb_context_t *cont = &fiber->cont;
1757  rb_execution_context_t *sec = &cont->saved_ec;
1758 
1759  size_t vm_stack_size = 0;
1760  VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
1761 
1762  /* initialize cont */
1763  cont->saved_vm_stack.ptr = NULL;
1764  rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size / sizeof(VALUE));
1765 
1766  sec->tag = NULL;
1767  sec->local_storage = NULL;
1770 }
1771 
1772 /* :nodoc: */
1773 static VALUE
1774 rb_fiber_initialize(int argc, VALUE* argv, VALUE self)
1775 {
1776  return fiber_initialize(self, rb_block_proc(), &shared_fiber_pool);
1777 }
1778 
1779 VALUE
1781 {
1782  return fiber_initialize(fiber_alloc(rb_cFiber), rb_proc_new(func, obj), &shared_fiber_pool);
1783 }
1784 
1785 static void rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt);
1786 
1787 #define PASS_KW_SPLAT (rb_empty_keyword_given_p() ? RB_PASS_EMPTY_KEYWORDS : rb_keyword_given_p())
1788 
1789 void
1791 {
1792  rb_thread_t * volatile th = GET_THREAD();
1793  rb_fiber_t *fiber = th->ec->fiber_ptr;
1794  rb_proc_t *proc;
1795  enum ruby_tag_type state;
1796  int need_interrupt = TRUE;
1797 
1799  VM_ASSERT(FIBER_RESUMED_P(fiber));
1800 
1801  EC_PUSH_TAG(th->ec);
1802  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1803  rb_context_t *cont = &VAR_FROM_MEMORY(fiber)->cont;
1804  int argc;
1805  const VALUE *argv, args = cont->value;
1806  int kw_splat = cont->kw_splat;
1807  GetProcPtr(fiber->first_proc, proc);
1808  argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
1809  cont->value = Qnil;
1810  th->ec->errinfo = Qnil;
1811  th->ec->root_lep = rb_vm_proc_local_ep(fiber->first_proc);
1812  th->ec->root_svar = Qfalse;
1813 
1814  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1815  rb_adjust_argv_kw_splat(&argc, &argv, &kw_splat);
1816  cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE);
1817  }
1818  EC_POP_TAG();
1819 
1820  if (state) {
1821  VALUE err = th->ec->errinfo;
1822  VM_ASSERT(FIBER_RESUMED_P(fiber));
1823 
1824  if (state == TAG_RAISE || state == TAG_FATAL) {
1826  }
1827  else {
1829  if (!NIL_P(err)) {
1831  }
1832  }
1833  need_interrupt = TRUE;
1834  }
1835 
1836  rb_fiber_terminate(fiber, need_interrupt);
1838 }
1839 
1840 static rb_fiber_t *
1841 root_fiber_alloc(rb_thread_t *th)
1842 {
1843  VALUE fiber_value = fiber_alloc(rb_cFiber);
1844  rb_fiber_t *fiber = th->ec->fiber_ptr;
1845 
1846  VM_ASSERT(DATA_PTR(fiber_value) == NULL);
1847  VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
1848  VM_ASSERT(fiber->status == FIBER_RESUMED);
1849 
1850  th->root_fiber = fiber;
1851  DATA_PTR(fiber_value) = fiber;
1852  fiber->cont.self = fiber_value;
1853 
1854 #ifdef COROUTINE_PRIVATE_STACK
1855  fiber->stack = fiber_pool_stack_acquire(&shared_fiber_pool);
1856  coroutine_initialize_main(&fiber->context, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, th->ec->machine.stack_start);
1857 #else
1858  coroutine_initialize_main(&fiber->context);
1859 #endif
1860 
1861  return fiber;
1862 }
1863 
1864 void
1866 {
1867  rb_fiber_t *fiber = ruby_mimmalloc(sizeof(rb_fiber_t));
1868  if (!fiber) {
1869  rb_bug("%s", strerror(errno)); /* ... is it possible to call rb_bug here? */
1870  }
1871  MEMZERO(fiber, rb_fiber_t, 1);
1872  fiber->cont.type = FIBER_CONTEXT;
1873  fiber->cont.saved_ec.fiber_ptr = fiber;
1874  fiber->cont.saved_ec.thread_ptr = th;
1875  fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
1876  th->ec = &fiber->cont.saved_ec;
1877 }
1878 
1879 void
1881 {
1882  if (th->root_fiber) {
1883  /* ignore. A root fiber object will free th->ec */
1884  }
1885  else {
1887  VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
1888  fiber_free(th->ec->fiber_ptr);
1889 
1892  }
1893  th->ec = NULL;
1894  }
1895 }
1896 
1897 void
1899 {
1900  rb_fiber_t *fiber = th->ec->fiber_ptr;
1901 
1902  fiber->status = FIBER_TERMINATED;
1903 
1904  // The vm_stack is `alloca`ed on the thread stack, so it's gone too:
1905  rb_ec_clear_vm_stack(th->ec);
1906 }
1907 
1908 static inline rb_fiber_t*
1909 fiber_current(void)
1910 {
1912  if (ec->fiber_ptr->cont.self == 0) {
1913  root_fiber_alloc(rb_ec_thread_ptr(ec));
1914  }
1915  return ec->fiber_ptr;
1916 }
1917 
1918 static inline rb_fiber_t*
1919 return_fiber(void)
1920 {
1921  rb_fiber_t *fiber = fiber_current();
1922  rb_fiber_t *prev = fiber->prev;
1923 
1924  if (!prev) {
1925  rb_thread_t *th = GET_THREAD();
1926  rb_fiber_t *root_fiber = th->root_fiber;
1927 
1928  VM_ASSERT(root_fiber != NULL);
1929 
1930  if (root_fiber == fiber) {
1931  rb_raise(rb_eFiberError, "can't yield from root fiber");
1932  }
1933  return root_fiber;
1934  }
1935  else {
1936  fiber->prev = NULL;
1937  return prev;
1938  }
1939 }
1940 
1941 VALUE
1943 {
1944  return fiber_current()->cont.self;
1945 }
1946 
1947 // Prepare to execute next_fiber on the given thread.
1948 static inline VALUE
1949 fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
1950 {
1951  rb_fiber_t *fiber;
1952 
1953  if (th->ec->fiber_ptr != NULL) {
1954  fiber = th->ec->fiber_ptr;
1955  }
1956  else {
1957  /* create root fiber */
1958  fiber = root_fiber_alloc(th);
1959  }
1960 
1961  if (FIBER_CREATED_P(next_fiber)) {
1962  fiber_prepare_stack(next_fiber);
1963  }
1964 
1965  VM_ASSERT(FIBER_RESUMED_P(fiber) || FIBER_TERMINATED_P(fiber));
1966  VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
1967 
1968  if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
1969 
1970  fiber_status_set(next_fiber, FIBER_RESUMED);
1971  fiber_setcontext(next_fiber, fiber);
1972 
1973  fiber = th->ec->fiber_ptr;
1974 
1975  /* Raise an exception if that was the result of executing the fiber */
1976  if (fiber->cont.argc == -1) rb_exc_raise(fiber->cont.value);
1977 
1978  return fiber->cont.value;
1979 }
1980 
1981 static inline VALUE
1982 fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int is_resume, int kw_splat)
1983 {
1984  VALUE value;
1985  rb_context_t *cont = &fiber->cont;
1986  rb_thread_t *th = GET_THREAD();
1987 
1988  /* make sure the root_fiber object is available */
1989  if (th->root_fiber == NULL) root_fiber_alloc(th);
1990 
1991  if (th->ec->fiber_ptr == fiber) {
1992  /* ignore fiber context switch
1993  * because destination fiber is same as current fiber
1994  */
1995  return make_passing_arg(argc, argv);
1996  }
1997 
1998  if (cont_thread_value(cont) != th->self) {
1999  rb_raise(rb_eFiberError, "fiber called across threads");
2000  }
2001  else if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
2002  rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
2003  }
2004  else if (FIBER_TERMINATED_P(fiber)) {
2005  value = rb_exc_new2(rb_eFiberError, "dead fiber called");
2006 
2007  if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
2008  rb_exc_raise(value);
2009  VM_UNREACHABLE(fiber_switch);
2010  }
2011  else {
2012  /* th->ec->fiber_ptr is also dead => switch to root fiber */
2013  /* (this means we're being called from rb_fiber_terminate, */
2014  /* and the terminated fiber's return_fiber() is already dead) */
2016 
2017  cont = &th->root_fiber->cont;
2018  cont->argc = -1;
2019  cont->value = value;
2020 
2021  fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
2022 
2023  VM_UNREACHABLE(fiber_switch);
2024  }
2025  }
2026 
2027  if (is_resume) {
2028  fiber->prev = fiber_current();
2029  }
2030 
2031  VM_ASSERT(FIBER_RUNNABLE_P(fiber));
2032 
2033  cont->argc = argc;
2034  cont->kw_splat = kw_splat;
2035  cont->value = make_passing_arg(argc, argv);
2036 
2037  value = fiber_store(fiber, th);
2038 
2039  if (is_resume && FIBER_TERMINATED_P(fiber)) {
2040  fiber_stack_release(fiber);
2041  }
2042 
2043  RUBY_VM_CHECK_INTS(th->ec);
2044 
2045  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2046 
2047  return value;
2048 }
2049 
2050 VALUE
2051 rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
2052 {
2053  return fiber_switch(fiber_ptr(fiber_value), argc, argv, 0, RB_NO_KEYWORDS);
2054 }
2055 
2056 void
2058 {
2059  fiber_status_set(fiber, FIBER_TERMINATED);
2060 }
2061 
2062 static void
2063 rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt)
2064 {
2065  VALUE value = fiber->cont.value;
2066  rb_fiber_t *next_fiber;
2067 
2068  VM_ASSERT(FIBER_RESUMED_P(fiber));
2069  rb_fiber_close(fiber);
2070 
2071  coroutine_destroy(&fiber->context);
2072 
2073  fiber->cont.machine.stack = NULL;
2074  fiber->cont.machine.stack_size = 0;
2075 
2076  next_fiber = return_fiber();
2077  if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
2078  fiber_switch(next_fiber, 1, &value, 0, RB_NO_KEYWORDS);
2079 }
2080 
2081 VALUE
2082 rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
2083 {
2084  rb_fiber_t *fiber = fiber_ptr(fiber_value);
2085 
2086  if (argc == -1 && FIBER_CREATED_P(fiber)) {
2087  rb_raise(rb_eFiberError, "cannot raise exception on unborn fiber");
2088  }
2089 
2090  if (fiber->prev != 0 || fiber_is_root_p(fiber)) {
2091  rb_raise(rb_eFiberError, "double resume");
2092  }
2093 
2094  if (fiber->transferred != 0) {
2095  rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
2096  }
2097 
2098  return fiber_switch(fiber, argc, argv, 1, kw_splat);
2099 }
2100 
2101 VALUE
2102 rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
2103 {
2104  return rb_fiber_resume_kw(fiber_value, argc, argv, RB_NO_KEYWORDS);
2105 }
2106 
2107 VALUE
2108 rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
2109 {
2110  return fiber_switch(return_fiber(), argc, argv, 0, kw_splat);
2111 }
2112 
2113 VALUE
2115 {
2116  return fiber_switch(return_fiber(), argc, argv, 0, RB_NO_KEYWORDS);
2117 }
2118 
2119 void
2121 {
2122  if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
2124  }
2125 }
2126 
2127 /*
2128  * call-seq:
2129  * fiber.alive? -> true or false
2130  *
2131  * Returns true if the fiber can still be resumed (or transferred
2132  * to). After finishing execution of the fiber block this method will
2133  * always return false. You need to <code>require 'fiber'</code>
2134  * before using this method.
2135  */
2136 VALUE
2138 {
2139  return FIBER_TERMINATED_P(fiber_ptr(fiber_value)) ? Qfalse : Qtrue;
2140 }
2141 
2142 /*
2143  * call-seq:
2144  * fiber.resume(args, ...) -> obj
2145  *
2146  * Resumes the fiber from the point at which the last Fiber.yield was
2147  * called, or starts running it if it is the first call to
2148  * #resume. Arguments passed to resume will be the value of the
2149  * Fiber.yield expression or will be passed as block parameters to
2150  * the fiber's block if this is the first #resume.
2151  *
2152  * Alternatively, when resume is called it evaluates to the arguments passed
2153  * to the next Fiber.yield statement inside the fiber's block
2154  * or to the block value if it runs to completion without any
2155  * Fiber.yield
2156  */
2157 static VALUE
2158 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fiber)
2159 {
2160  return rb_fiber_resume_kw(fiber, argc, argv, PASS_KW_SPLAT);
2161 }
2162 
2163 /*
2164  * call-seq:
2165  * fiber.raise -> obj
2166  * fiber.raise(string) -> obj
2167  * fiber.raise(exception [, string [, array]]) -> obj
2168  *
2169  * Raises an exception in the fiber at the point at which the last
2170  * Fiber.yield was called, or at the start if neither +resume+
2171  * nor +raise+ were called before.
2172  *
2173  * With no arguments, raises a +RuntimeError+. With a single +String+
2174  * argument, raises a +RuntimeError+ with the string as a message. Otherwise,
2175  * the first parameter should be the name of an +Exception+ class (or an
2176  * object that returns an +Exception+ object when sent an +exception+
2177  * message). The optional second parameter sets the message associated with
2178  * the exception, and the third parameter is an array of callback information.
2179  * Exceptions are caught by the +rescue+ clause of <code>begin...end</code>
2180  * blocks.
2181  */
2182 static VALUE
2183 rb_fiber_raise(int argc, VALUE *argv, VALUE fiber)
2184 {
2186  return rb_fiber_resume_kw(fiber, -1, &exc, RB_NO_KEYWORDS);
2187 }
2188 
2189 /*
2190  * call-seq:
2191  * fiber.transfer(args, ...) -> obj
2192  *
2193  * Transfer control to another fiber, resuming it from where it last
2194  * stopped or starting it if it was not resumed before. The calling
2195  * fiber will be suspended much like in a call to
2196  * Fiber.yield. You need to <code>require 'fiber'</code>
2197  * before using this method.
2198  *
2199  * The fiber which receives the transfer call is treats it much like
2200  * a resume call. Arguments passed to transfer are treated like those
2201  * passed to resume.
2202  *
2203  * You cannot call +resume+ on a fiber that has been transferred to.
2204  * If you call +transfer+ on a fiber, and later call +resume+ on the
2205  * the fiber, a +FiberError+ will be raised. Once you call +transfer+ on
2206  * a fiber, the only way to resume processing the fiber is to
2207  * call +transfer+ on it again.
2208  *
2209  * Example:
2210  *
2211  * fiber1 = Fiber.new do
2212  * puts "In Fiber 1"
2213  * Fiber.yield
2214  * puts "In Fiber 1 again"
2215  * end
2216  *
2217  * fiber2 = Fiber.new do
2218  * puts "In Fiber 2"
2219  * fiber1.transfer
2220  * puts "Never see this message"
2221  * end
2222  *
2223  * fiber3 = Fiber.new do
2224  * puts "In Fiber 3"
2225  * end
2226  *
2227  * fiber2.resume
2228  * fiber3.resume
2229  * fiber1.resume rescue (p $!)
2230  * fiber1.transfer
2231  *
2232  * <em>produces</em>
2233  *
2234  * In Fiber 2
2235  * In Fiber 1
2236  * In Fiber 3
2237  * #<FiberError: cannot resume transferred Fiber>
2238  * In Fiber 1 again
2239  *
2240  */
2241 static VALUE
2242 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fiber_value)
2243 {
2244  rb_fiber_t *fiber = fiber_ptr(fiber_value);
2245  fiber->transferred = 1;
2246  return fiber_switch(fiber, argc, argv, 0, PASS_KW_SPLAT);
2247 }
2248 
2249 /*
2250  * call-seq:
2251  * Fiber.yield(args, ...) -> obj
2252  *
2253  * Yields control back to the context that resumed the fiber, passing
2254  * along any arguments that were passed to it. The fiber will resume
2255  * processing at this point when #resume is called next.
2256  * Any arguments passed to the next #resume will be the value that
2257  * this Fiber.yield expression evaluates to.
2258  */
2259 static VALUE
2260 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
2261 {
2263 }
2264 
2265 /*
2266  * call-seq:
2267  * Fiber.current() -> fiber
2268  *
2269  * Returns the current fiber. You need to <code>require 'fiber'</code>
2270  * before using this method. If you are not running in the context of
2271  * a fiber this method will return the root fiber.
2272  */
2273 static VALUE
2274 rb_fiber_s_current(VALUE klass)
2275 {
2276  return rb_fiber_current();
2277 }
2278 
2279 /*
2280  * call-seq:
2281  * fiber.to_s -> string
2282  *
2283  * Returns fiber information string.
2284  *
2285  */
2286 
2287 static VALUE
2288 fiber_to_s(VALUE fiber_value)
2289 {
2290  const rb_fiber_t *fiber = fiber_ptr(fiber_value);
2291  const rb_proc_t *proc;
2292  char status_info[0x20];
2293 
2294  if (fiber->transferred) {
2295  snprintf(status_info, 0x20, " (%s, transferred)", fiber_status_name(fiber->status));
2296  }
2297  else {
2298  snprintf(status_info, 0x20, " (%s)", fiber_status_name(fiber->status));
2299  }
2300 
2301  if (!rb_obj_is_proc(fiber->first_proc)) {
2302  VALUE str = rb_any_to_s(fiber_value);
2303  strlcat(status_info, ">", sizeof(status_info));
2305  rb_str_cat_cstr(str, status_info);
2306  return str;
2307  }
2308  GetProcPtr(fiber->first_proc, proc);
2309  return rb_block_to_s(fiber_value, &proc->block, status_info);
2310 }
2311 
2312 #ifdef HAVE_WORKING_FORK
2313 void
2314 rb_fiber_atfork(rb_thread_t *th)
2315 {
2316  if (th->root_fiber) {
2317  if (&th->root_fiber->cont.saved_ec != th->ec) {
2318  th->root_fiber = th->ec->fiber_ptr;
2319  }
2320  th->root_fiber->prev = 0;
2321  }
2322 }
2323 #endif
2324 
2325 #ifdef RB_EXPERIMENTAL_FIBER_POOL
2326 static void
2327 fiber_pool_free(void *ptr)
2328 {
2329  struct fiber_pool * fiber_pool = ptr;
2330  RUBY_FREE_ENTER("fiber_pool");
2331 
2332  fiber_pool_free_allocations(fiber_pool->allocations);
2334 
2335  RUBY_FREE_LEAVE("fiber_pool");
2336 }
2337 
2338 static size_t
2339 fiber_pool_memsize(const void *ptr)
2340 {
2341  const struct fiber_pool * fiber_pool = ptr;
2342  size_t size = sizeof(*fiber_pool);
2343 
2345 
2346  return size;
2347 }
2348 
2349 static const rb_data_type_t FiberPoolDataType = {
2350  "fiber_pool",
2351  {NULL, fiber_pool_free, fiber_pool_memsize,},
2353 };
2354 
2355 static VALUE
2356 fiber_pool_alloc(VALUE klass)
2357 {
2358  struct fiber_pool * fiber_pool = RB_ALLOC(struct fiber_pool);
2359 
2360  return TypedData_Wrap_Struct(klass, &FiberPoolDataType, fiber_pool);
2361 }
2362 
2363 static VALUE
2364 rb_fiber_pool_initialize(int argc, VALUE* argv, VALUE self)
2365 {
2366  rb_thread_t *th = GET_THREAD();
2368  struct fiber_pool * fiber_pool = NULL;
2369 
2370  // Maybe these should be keyword arguments.
2371  rb_scan_args(argc, argv, "03", &size, &count, &vm_stack_size);
2372 
2373  if (NIL_P(size)) {
2375  }
2376 
2377  if (NIL_P(count)) {
2378  count = INT2NUM(128);
2379  }
2380 
2381  if (NIL_P(vm_stack_size)) {
2383  }
2384 
2385  TypedData_Get_Struct(self, struct fiber_pool, &FiberPoolDataType, fiber_pool);
2386 
2387  fiber_pool_initialize(fiber_pool, NUM2SIZET(size), NUM2SIZET(count), NUM2SIZET(vm_stack_size));
2388 
2389  return self;
2390 }
2391 #endif
2392 
2393 /*
2394  * Document-class: FiberError
2395  *
2396  * Raised when an invalid operation is attempted on a Fiber, in
2397  * particular when attempting to call/resume a dead fiber,
2398  * attempting to yield from the root fiber, or calling a fiber across
2399  * threads.
2400  *
2401  * fiber = Fiber.new{}
2402  * fiber.resume #=> nil
2403  * fiber.resume #=> FiberError: dead fiber called
2404  */
2405 
2406 void
2408 {
2409  rb_thread_t *th = GET_THREAD();
2411  size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
2412  size_t stack_size = machine_stack_size + vm_stack_size;
2413 
2414 #ifdef _WIN32
2415  SYSTEM_INFO info;
2416  GetSystemInfo(&info);
2417  pagesize = info.dwPageSize;
2418 #else /* not WIN32 */
2419  pagesize = sysconf(_SC_PAGESIZE);
2420 #endif
2422 
2423  fiber_pool_initialize(&shared_fiber_pool, stack_size, FIBER_POOL_INITIAL_SIZE, vm_stack_size);
2424 
2425  char * fiber_shared_fiber_pool_free_stacks = getenv("RUBY_SHARED_FIBER_POOL_FREE_STACKS");
2426  if (fiber_shared_fiber_pool_free_stacks) {
2427  shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
2428  }
2429 
2430  rb_cFiber = rb_define_class("Fiber", rb_cObject);
2431  rb_define_alloc_func(rb_cFiber, fiber_alloc);
2432  rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
2433  rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
2434  rb_define_method(rb_cFiber, "initialize", rb_fiber_initialize, -1);
2435  rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
2436  rb_define_method(rb_cFiber, "raise", rb_fiber_raise, -1);
2437  rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
2438  rb_define_alias(rb_cFiber, "inspect", "to_s");
2439 
2440 #ifdef RB_EXPERIMENTAL_FIBER_POOL
2441  rb_cFiberPool = rb_define_class("Pool", rb_cFiber);
2442  rb_define_alloc_func(rb_cFiberPool, fiber_pool_alloc);
2443  rb_define_method(rb_cFiberPool, "initialize", rb_fiber_pool_initialize, -1);
2444 #endif
2445 }
2446 
2448 
2449 void
2451 {
2452  rb_cContinuation = rb_define_class("Continuation", rb_cObject);
2453  rb_undef_alloc_func(rb_cContinuation);
2454  rb_undef_method(CLASS_OF(rb_cContinuation), "new");
2455  rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
2456  rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
2457  rb_define_global_function("callcc", rb_callcc, 0);
2458 }
2459 
2460 void
2462 {
2463  rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
2464  rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
2465  rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
2466 }
2467 
RB_ALLOC
#define RB_ALLOC(type)
Definition: ruby.h:1658
FIBER_TERMINATED_P
#define FIBER_TERMINATED_P(fiber)
Definition: cont.c:223
rb_execution_context_struct::raised_flag
uint8_t raised_flag
Definition: vm_core.h:878
NOINLINE
NOINLINE(static VALUE cont_capture(volatile int *volatile stat))
rb_execution_context_struct::protect_tag
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:850
rb_ec_set_vm_stack
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2676
rb_context_struct::saved_vm_stack
struct cont_saved_vm_stack saved_vm_stack
Definition: cont.c:186
UNLIKELY
#define UNLIKELY(x)
Definition: ffi_common.h:126
RUBY_SYMBOL_EXPORT_END
#define RUBY_SYMBOL_EXPORT_END
Definition: missing.h:49
ruby_xfree
void ruby_xfree(void *x)
Definition: gc.c:10150
rb_define_class
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:649
rb_fiber_struct::first_proc
VALUE first_proc
Definition: cont.c:228
ruby::backward::cxxanyargs::rb_proc_new
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
Definition: cxxanyargs.hpp:324
fiber_pool_stack::size
size_t size
Definition: cont.c:75
TypedData_Make_Struct
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1244
TRUE
#define TRUE
Definition: nkf.h:175
stat
Definition: rb_mjit_min_header-2.7.0.h:2384
FIBER_RUNNABLE_P
#define FIBER_RUNNABLE_P(fiber)
Definition: cont.c:224
fiber_pool_stack::available
size_t available
Definition: cont.c:78
ruby_current_execution_context_ptr
rb_execution_context_t * ruby_current_execution_context_ptr
Definition: vm.c:373
strlcat
RUBY_EXTERN size_t strlcat(char *, const char *, size_t)
Definition: strlcat.c:31
mjit_cont_free
void mjit_cont_free(struct mjit_cont *cont)
ruby_mimmalloc
void * ruby_mimmalloc(size_t size)
Definition: gc.c:10187
rb_context_struct::stack
VALUE * stack
Definition: cont.c:189
RUBY_MARK_LEAVE
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:56
THREAD_RUNNABLE
@ THREAD_RUNNABLE
Definition: vm_core.h:783
rb_context_t
struct rb_context_struct rb_context_t
cont_saved_vm_stack
Definition: cont.c:56
fiber_pool_vacancy
Definition: cont.c:90
fiber_pool_allocation::next
struct fiber_pool_allocation * next
Definition: cont.c:149
FIBER_SUSPENDED
@ FIBER_SUSPENDED
Definition: cont.c:216
rb_vm_invoke_proc
MJIT_FUNC_EXPORTED VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1249
NORETURN
NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)))
fiber_pool::free_stacks
int free_stacks
Definition: cont.c:170
gc.h
rb_execution_context_struct::local_storage
st_table * local_storage
Definition: vm_core.h:860
rb_scan_args
#define rb_scan_args(argc, argvp, fmt,...)
Definition: rb_mjit_min_header-2.7.0.h:6372
fiber_status
fiber_status
Definition: cont.c:213
snprintf
int snprintf(char *__restrict, size_t, const char *__restrict,...) __attribute__((__format__(__printf__
rb_fiber_struct::context
struct coroutine_context context
Definition: cont.c:237
bp
#define bp()
Definition: internal.h:1445
fiber_pool
Definition: cont.c:153
VAR_INITIALIZED
#define VAR_INITIALIZED(var)
Definition: eval_intern.h:156
rb_gc_mark_locations
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:4699
EXEC_EVENT_HOOK
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1935
rb_proc_t::block
const struct rb_block block
Definition: vm_core.h:1050
FIBER_SUSPENDED_P
#define FIBER_SUSPENDED_P(fiber)
Definition: cont.c:222
FLUSH_REGISTER_WINDOWS
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:431
rb_block_to_s
VALUE rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_info)
Definition: proc.c:1360
st_init_numtable
st_table * st_init_numtable(void)
Definition: st.c:653
VALUE
unsigned long VALUE
Definition: ruby.h:102
GET_VM
#define GET_VM()
Definition: vm_core.h:1764
rb_context_struct::mjit_cont
struct mjit_cont * mjit_cont
Definition: cont.c:197
ZALLOC
#define ZALLOC(type)
Definition: ruby.h:1666
fiber_pool::vacancies
struct fiber_pool_vacancy * vacancies
Definition: cont.c:158
rb_gc_location
VALUE rb_gc_location(VALUE value)
Definition: gc.c:8111
FIBER_STACK_FLAGS
#define FIBER_STACK_FLAGS
Definition: cont.c:251
TAG_FATAL
#define TAG_FATAL
Definition: vm_core.h:205
context_type
context_type
Definition: cont.c:51
ruby_Init_Fiber_as_Coroutine
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:2461
rb_fiber_mark_self
void rb_fiber_mark_self(const rb_fiber_t *fiber)
Definition: cont.c:972
FIBER_CONTEXT
@ FIBER_CONTEXT
Definition: cont.c:53
rb_ensure_list::entry
struct rb_ensure_entry entry
Definition: vm_core.h:836
rb_vm_struct::default_params
struct rb_vm_struct::@185 default_params
rb_define_global_function
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
Definition: class.c:1787
rb_execution_context_struct::ensure_list
rb_ensure_list_t * ensure_list
Definition: vm_core.h:869
getenv
#define getenv(name)
Definition: win32.c:73
DWORD
IUnknown DWORD
Definition: win32ole.c:33
FIBER_POOL_ALLOCATION_MAXIMUM_SIZE
#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE
Definition: cont.c:48
FIBER_CREATED
@ FIBER_CREATED
Definition: cont.c:214
rb_execution_context_struct::tag
struct rb_vm_tag * tag
Definition: vm_core.h:849
rb_ensure_entry::marker
VALUE marker
Definition: vm_core.h:829
Qundef
#define Qundef
Definition: ruby.h:470
rb_define_singleton_method
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1755
rb_signal_buff_size
int rb_signal_buff_size(void)
Definition: signal.c:726
rb_make_exception
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
Definition: eval.c:850
fiber_pool_allocation::base
void * base
Definition: cont.c:127
rb_define_method
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1551
GET_EC
#define GET_EC()
Definition: vm_core.h:1766
RUBY_VM_CHECK_INTS
#define RUBY_VM_CHECK_INTS(ec)
Definition: vm_core.h:1862
atoi
int atoi(const char *__nptr)
INT2NUM
#define INT2NUM(x)
Definition: ruby.h:1609
fiber_pool_vacancy::stack
struct fiber_pool_stack stack
Definition: cont.c:92
ptr
struct RIMemo * ptr
Definition: debug.c:74
setjmp
int setjmp(jmp_buf __jmpb)
Qfalse
#define Qfalse
Definition: ruby.h:467
fiber_pool_allocation::pool
struct fiber_pool * pool
Definition: cont.c:143
fiber_pool::allocations
struct fiber_pool_allocation * allocations
Definition: cont.c:155
NULL
#define NULL
Definition: _sdbm.c:101
STACK_PAD_SIZE
#define STACK_PAD_SIZE
rb_context_struct::kw_splat
int kw_splat
Definition: cont.c:182
ruby_longjmp
#define ruby_longjmp(env, val)
Definition: eval_intern.h:59
fiber_pool_stack::base
void * base
Definition: cont.c:69
sysconf
long sysconf(int __name)
st_insert
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
rb_thread_struct::ec
rb_execution_context_t * ec
Definition: vm_core.h:915
rb_fiber_start
void rb_fiber_start(void)
Definition: cont.c:1790
VM_ASSERT
#define VM_ASSERT(expr)
Definition: vm_core.h:56
ruby_setjmp
#define ruby_setjmp(env)
Definition: eval_intern.h:58
rb_define_alias
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1800
rb_fiber_alive_p
VALUE rb_fiber_alive_p(VALUE fiber_value)
Definition: cont.c:2137
rb_undef_method
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1575
rb_context_struct::machine
struct rb_context_struct::@3 machine
pc
rb_control_frame_t const VALUE * pc
Definition: rb_mjit_min_header-2.7.0.h:16923
VM_BLOCK_HANDLER_NONE
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1291
ALLOC_N
#define ALLOC_N(type, n)
Definition: ruby.h:1663
rb_context_struct::stack_size
size_t stack_size
Definition: cont.c:191
rb_control_frame_struct::sp
VALUE * sp
Definition: vm_core.h:762
COROUTINE
#define COROUTINE
Definition: Context.h:13
rb_raise
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2669
e_proc
VALUE e_proc(VALUE)
Definition: cont.c:1515
rb_execution_context_struct::cfp
rb_control_frame_t * cfp
Definition: vm_core.h:847
rb_context_struct::argc
int argc
Definition: cont.c:181
obj
const VALUE VALUE obj
Definition: rb_mjit_min_header-2.7.0.h:5742
rb_obj_is_proc
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
PRIuSIZE
#define PRIuSIZE
Definition: ruby.h:208
rb_vm_proc_local_ep
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:653
strerror
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
rb_fiber_resume_kw
VALUE rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
Definition: cont.c:2082
DATA_PTR
#define DATA_PTR(dta)
Definition: ruby.h:1175
fprintf
int fprintf(FILE *__restrict, const char *__restrict,...) __attribute__((__format__(__printf__
rb_ec_clear_vm_stack
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
Definition: vm.c:2699
mjit_cont_new
struct mjit_cont * mjit_cont_new(rb_execution_context_t *ec)
VM_UNREACHABLE
#define VM_UNREACHABLE(func)
Definition: vm_core.h:57
EC_POP_TAG
#define EC_POP_TAG()
Definition: eval_intern.h:137
THREAD_MUST_BE_RUNNING
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:835
rb_vm_struct::main_thread
struct rb_thread_struct * main_thread
Definition: vm_core.h:581
FIBER_RESUMED
@ FIBER_RESUMED
Definition: cont.c:215
NUM2SIZET
#define NUM2SIZET(x)
Definition: ruby.h:769
rb_fiber_struct::stack
struct fiber_pool_stack stack
Definition: cont.c:238
RB_PAGE_SIZE
#define RB_PAGE_SIZE
Definition: cont.c:27
rb_thread_struct::status
enum rb_thread_status status
Definition: rb_mjit_min_header-2.7.0.h:9892
rb_ary_tmp_new
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:768
i
uint32_t i
Definition: rb_mjit_min_header-2.7.0.h:5464
fiber_pool_stack::allocation
struct fiber_pool_allocation * allocation
Definition: cont.c:84
rb_execution_context_struct::thread_ptr
struct rb_thread_struct * thread_ptr
Definition: vm_core.h:857
rb_ensure_entry::data2
VALUE data2
Definition: vm_core.h:831
rb_fiber_transfer
VALUE rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
Definition: cont.c:2051
VAR_FROM_MEMORY
#define VAR_FROM_MEMORY(var)
Definition: eval_intern.h:155
rb_fiber_reset_root_local_storage
void rb_fiber_reset_root_local_storage(rb_thread_t *th)
Definition: cont.c:2120
cont_saved_vm_stack::ptr
VALUE * ptr
Definition: cont.c:57
rb_execution_context_struct::errinfo
VALUE errinfo
Definition: vm_core.h:875
FIBER_TERMINATED
@ FIBER_TERMINATED
Definition: cont.c:217
rb_exc_new2
#define rb_exc_new2
Definition: intern.h:292
fiber_pool_vacancy::next
struct fiber_pool_vacancy * next
Definition: cont.c:98
mjit.h
COMPILER_WARNING_POP
#define COMPILER_WARNING_POP
Definition: internal.h:2665
EC_EXEC_TAG
#define EC_EXEC_TAG()
Definition: eval_intern.h:181
rb_threadptr_root_fiber_terminate
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
Definition: cont.c:1898
TypedData_Wrap_Struct
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1231
vm_core.h
stderr
#define stderr
Definition: rb_mjit_min_header-2.7.0.h:1485
rb_context_struct::self
VALUE self
Definition: cont.c:183
rb_execution_context_struct::vm_stack
VALUE * vm_stack
Definition: vm_core.h:845
rb_execution_context_struct::trace_arg
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:872
rb_vm_struct::fiber_vm_stack_size
size_t fiber_vm_stack_size
Definition: vm_core.h:665
RUBY_FREE_UNLESS_NULL
#define RUBY_FREE_UNLESS_NULL(ptr)
Definition: gc.h:70
RUBY_FREE_LEAVE
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:58
rb_eRuntimeError
VALUE rb_eRuntimeError
Definition: error.c:920
rb_fiber_struct
Definition: cont.c:226
rb_execution_context_struct::stack_start
VALUE * stack_start
Definition: vm_core.h:887
ALLOCA_N
#define ALLOCA_N(type, n)
Definition: ruby.h:1684
mjit_enabled
#define mjit_enabled
Definition: internal.h:1760
RUBY_FREE_ENTER
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:57
rb_control_frame_struct
Definition: vm_core.h:760
size
int size
Definition: encoding.c:58
rb_str_set_len
void rb_str_set_len(VALUE, long)
Definition: string.c:2692
fiber_pool_stack
Definition: cont.c:67
rb_thread_struct::root_fiber
rb_fiber_t * root_fiber
Definition: vm_core.h:985
rb_gc_mark_movable
void rb_gc_mark_movable(VALUE ptr)
Definition: gc.c:5206
rb_jmpbuf_t
void * rb_jmpbuf_t[5]
Definition: vm_core.h:792
rb_obj_is_fiber
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:1041
rb_context_struct::ensure_array
rb_ensure_entry_t * ensure_array
Definition: cont.c:195
MEMZERO
#define MEMZERO(p, type, n)
Definition: ruby.h:1752
fiber_pool_allocation::stride
size_t stride
Definition: cont.c:133
ERRNOMSG
#define ERRNOMSG
Definition: cont.c:254
rb_fiber_update_self
void rb_fiber_update_self(rb_fiber_t *fiber)
Definition: cont.c:961
rb_execution_context_struct::stack_maxsize
size_t stack_maxsize
Definition: vm_core.h:889
GetProcPtr
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:1046
coroutine_context
Definition: Context.h:17
rb_context_struct::jmpbuf
rb_jmpbuf_t jmpbuf
Definition: cont.c:194
COMPILER_WARNING_PUSH
#define COMPILER_WARNING_PUSH
Definition: internal.h:2664
fiber_pool::initial_count
size_t initial_count
Definition: cont.c:167
RARRAY_CONST_PTR
#define RARRAY_CONST_PTR(a)
Definition: ruby.h:1072
fiber_pool_stack::pool
struct fiber_pool * pool
Definition: cont.c:81
rb_typeddata_is_kind_of
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:872
CLASS_OF
#define CLASS_OF(v)
Definition: ruby.h:484
rb_ec_initialize_vm_stack
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2683
ruby_Init_Continuation_body
RUBY_SYMBOL_EXPORT_BEGIN void ruby_Init_Continuation_body(void)
Definition: cont.c:2450
rb_fiber_struct::transferred
unsigned int transferred
Definition: cont.c:235
rb_context_struct
Definition: cont.c:179
rb_vm_make_jump_tag_but_local_jump
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1478
TAG_RAISE
#define TAG_RAISE
Definition: vm_core.h:203
rb_thread_struct::vm
rb_vm_t * vm
Definition: vm_core.h:913
coroutine_transfer
struct coroutine_context * coroutine_transfer(struct coroutine_context *current, struct coroutine_context *target)
Definition: Context.c:115
rb_ary_new4
#define rb_ary_new4
Definition: intern.h:105
rb_execution_context_struct::machine
struct rb_execution_context_struct::@188 machine
rb_cObject
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:2010
buf
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4322
n
const char size_t n
Definition: rb_mjit_min_header-2.7.0.h:5456
rb_str_cat_cstr
#define rb_str_cat_cstr(str, ptr)
Definition: rb_mjit_min_header-2.7.0.h:6126
rb_exc_raise
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:667
TypedData_Get_Struct
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1252
rb_threadptr_root_fiber_setup
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
Definition: cont.c:1865
fiber_pool::vm_stack_size
size_t vm_stack_size
Definition: cont.c:176
rb_execution_context_struct::vm_stack_size
size_t vm_stack_size
Definition: vm_core.h:846
rb_bug
void rb_bug(const char *fmt,...)
Definition: error.c:634
rb_context_struct::value
VALUE value
Definition: cont.c:184
RUBY_MARK_ENTER
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:55
internal.h
rb_ensure_entry::e_proc
VALUE(* e_proc)(VALUE)
Definition: vm_core.h:830
cont_saved_vm_stack::slen
size_t slen
Definition: cont.c:59
rb_block_call_func_t
rb_block_call_func * rb_block_call_func_t
Definition: ruby.h:1967
argv
char ** argv
Definition: ruby.c:223
rb_execution_context_struct::fiber_ptr
rb_fiber_t * fiber_ptr
Definition: vm_core.h:856
jmp_buf
long jmp_buf[32]
Definition: rb_mjit_min_header-2.7.0.h:9390
fiber_pool::count
size_t count
Definition: cont.c:164
rb_iseq_constant_body::iseq_encoded
VALUE * iseq_encoded
Definition: vm_core.h:325
RUBY_SYMBOL_EXPORT_BEGIN
#define RUBY_SYMBOL_EXPORT_BEGIN
Definition: missing.h:48
rb_fiber_current
VALUE rb_fiber_current(void)
Definition: cont.c:1942
klass
VALUE klass
Definition: rb_mjit_min_header-2.7.0.h:13254
st_data_t
unsigned long st_data_t
Definition: rb_mjit_min_header-2.7.0.h:5363
rb_fiber_resume
VALUE rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
Definition: cont.c:2102
rb_fiber_close
void rb_fiber_close(rb_fiber_t *fiber)
Definition: cont.c:2057
rb_ensure_list
Definition: vm_core.h:834
str
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
GET_THREAD
#define GET_THREAD()
Definition: vm_core.h:1765
rb_control_frame_struct::iseq
const rb_iseq_t * iseq
Definition: vm_core.h:763
ruby_register_rollback_func_for_ensure
void ruby_register_rollback_func_for_ensure(e_proc *ensure_func, e_proc *rollback_func)
Definition: cont.c:1520
RUBY_TYPED_FREE_IMMEDIATELY
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1207
rb_vm_struct::fiber_machine_stack_size
size_t fiber_machine_stack_size
Definition: vm_core.h:666
MEMCPY
#define MEMCPY(p1, p2, type, n)
Definition: ruby.h:1753
fiber_pool_allocation::count
size_t count
Definition: cont.c:136
rb_threadptr_root_fiber_release
void rb_threadptr_root_fiber_release(rb_thread_t *th)
Definition: cont.c:1880
rb_obj_info_dump
void rb_obj_info_dump(VALUE obj)
Definition: gc.c:11661
NIL_P
#define NIL_P(v)
Definition: ruby.h:482
TAG_NONE
#define TAG_NONE
Definition: vm_core.h:197
STACK_DIR_UPPER
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:99
rb_vm_stack_to_heap
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
Definition: vm.c:786
argc
int argc
Definition: ruby.c:222
rb_context_struct::type
enum context_type type
Definition: cont.c:180
rb_fiber_yield
VALUE rb_fiber_yield(int argc, const VALUE *argv)
Definition: cont.c:2114
REALLOC_N
#define REALLOC_N(var, type, n)
Definition: ruby.h:1667
_SC_PAGESIZE
#define _SC_PAGESIZE
Definition: rb_mjit_min_header-2.7.0.h:3371
RB_NO_KEYWORDS
#define RB_NO_KEYWORDS
Definition: ruby.h:1977
err
int err
Definition: win32.c:135
RUBY_EVENT_FIBER_SWITCH
#define RUBY_EVENT_FIBER_SWITCH
Definition: ruby.h:2257
rb_adjust_argv_kw_splat
VALUE rb_adjust_argv_kw_splat(int *, const VALUE **, int *)
Definition: vm_eval.c:237
rb_fiber_struct::prev
struct rb_fiber_struct * prev
Definition: cont.c:229
rb_fiber_struct::cont
rb_context_t cont
Definition: cont.c:227
rb_data_type_struct
Definition: ruby.h:1148
CONTINUATION_CONTEXT
@ CONTINUATION_CONTEXT
Definition: cont.c:52
fiber_pool::size
size_t size
Definition: cont.c:161
SET_MACHINE_STACK_END
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:13
FIBER_CREATED_P
#define FIBER_CREATED_P(fiber)
Definition: cont.c:220
COMPILER_WARNING_IGNORED
#define COMPILER_WARNING_IGNORED(flag)
Definition: internal.h:2667
FIBER_RESUMED_P
#define FIBER_RESUMED_P(fiber)
Definition: cont.c:221
rb_gc_mark
void rb_gc_mark(VALUE ptr)
Definition: gc.c:5212
PASS_KW_SPLAT
#define PASS_KW_SPLAT
Definition: cont.c:1787
EC_PUSH_TAG
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
fiber_pool_allocation
Definition: cont.c:125
count
int count
Definition: encoding.c:57
st_memsize
size_t st_memsize(const st_table *tab)
Definition: st.c:719
Qtrue
#define Qtrue
Definition: ruby.h:468
errno
int errno
STACK_GROW_DIR_DETECTION
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:98
rb_iseq_struct::body
struct rb_iseq_constant_body * body
Definition: vm_core.h:460
rb_fiber_yield_kw
VALUE rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
Definition: cont.c:2108
rb_control_frame_struct::pc
const VALUE * pc
Definition: vm_core.h:761
rb_fiber_struct::BITFIELD
BITFIELD(enum fiber_status, status, 2)
fiber_pool_allocation::size
size_t size
Definition: cont.c:130
rb_thread_struct::self
VALUE self
Definition: vm_core.h:912
rb_yield
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
RUBY_VM_SET_INTERRUPT
#define RUBY_VM_SET_INTERRUPT(ec)
Definition: vm_core.h:1837
eval_intern.h
rb_execution_context_struct::root_svar
VALUE root_svar
Definition: vm_core.h:866
rb_fiber_new
VALUE rb_fiber_new(rb_block_call_func_t func, VALUE obj)
Definition: cont.c:1780
Qnil
#define Qnil
Definition: ruby.h:469
rb_execution_context_mark
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2500
FIBER_POOL_INITIAL_SIZE
#define FIBER_POOL_INITIAL_SIZE
Definition: cont.c:47
exc
const rb_iseq_t const VALUE exc
Definition: rb_mjit_min_header-2.7.0.h:13504
rb_ensure_entry
Definition: vm_core.h:828
rb_execution_context_struct::local_storage_recursive_hash_for_trace
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:862
st_lookup
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
rb_eStandardError
VALUE rb_eStandardError
Definition: error.c:919
RUBY_VM_SET_TRAP_INTERRUPT
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
Definition: vm_core.h:1839
rb_undef_alloc_func
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:722
rb_thread_struct
Definition: vm_core.h:910
rb_execution_context_struct::local_storage_recursive_hash
VALUE local_storage_recursive_hash
Definition: vm_core.h:861
RSTRING_LEN
#define RSTRING_LEN(str)
Definition: ruby.h:1005
st_free_table
void st_free_table(st_table *tab)
Definition: st.c:709
rb_proc_t
Definition: vm_core.h:1049
st_table
Definition: st.h:79
rb_context_struct::saved_ec
rb_execution_context_t saved_ec
Definition: cont.c:193
rb_ensure_list::next
struct rb_ensure_list * next
Definition: vm_core.h:835
RUBY_VM_PREVIOUS_CONTROL_FRAME
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
Definition: vm_core.h:1384
rb_any_to_s
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
Definition: object.c:527
fiber_pool_stack::current
void * current
Definition: cont.c:72
ruby_tag_type
ruby_tag_type
Definition: vm_core.h:184
rb_define_alloc_func
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
rb_execution_context_update
void rb_execution_context_update(const rb_execution_context_t *ec)
Definition: vm.c:2472
rb_execution_context_struct::stack_end
VALUE * stack_end
Definition: vm_core.h:888
fiber_pool::used
size_t used
Definition: cont.c:173
cont_saved_vm_stack::clen
size_t clen
Definition: cont.c:60
rb_threadptr_pending_interrupt_enque
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1757
cfp
rb_control_frame_t * cfp
Definition: rb_mjit_min_header-2.7.0.h:14544
Init_Cont
void Init_Cont(void)
Definition: cont.c:2407
rb_context_struct::stack_src
VALUE * stack_src
Definition: cont.c:190
rb_execution_context_struct::root_lep
const VALUE * root_lep
Definition: vm_core.h:865
rb_execution_context_struct
Definition: vm_core.h:843
rb_block_proc
VALUE rb_block_proc(void)
Definition: proc.c:837