Ruby  2.7.0p0(2019-12-25revision647ee6f091eafcce70ffb75ddf7e121e192ab217)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author$
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23  model 4: M:N User:Native threads with Global VM lock
24  Combination of model 1 and 2
25 
26  model 5: M:N User:Native thread with fine grain lock
27  Combination of model 1 and 3
28 
29 ------------------------------------------------------------------------
30 
31  model 2:
32  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33  When thread scheduling, running thread release GVL. If running thread
34  try blocking operation, this thread must release GVL and another
35  thread can continue this flow. After blocking operation, thread
36  must check interrupt (RUBY_VM_CHECK_INTS).
37 
38  Every VM can run parallel.
39 
40  Ruby threads are scheduled by OS thread scheduler.
41 
42 ------------------------------------------------------------------------
43 
44  model 3:
45  Every threads run concurrent or parallel and to access shared object
46  exclusive access control is needed. For example, to access String
47  object or Array object, fine grain lock must be locked every time.
48  */
49 
50 
51 /*
52  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53  * 2.15 or later and set _FORTIFY_SOURCE > 0.
54  * However, the implementation is wrong. Even though Linux's select(2)
55  * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57  * it doesn't work correctly and makes program abort. Therefore we need to
58  * disable FORTIFY_SOURCE until glibc fixes it.
59  */
60 #undef _FORTIFY_SOURCE
61 #undef __USE_FORTIFY_LEVEL
62 #define __USE_FORTIFY_LEVEL 0
63 
64 /* for model 2 */
65 
66 #include "ruby/config.h"
67 #include "ruby/io.h"
68 #include "eval_intern.h"
69 #include "timev.h"
70 #include "ruby/thread.h"
71 #include "ruby/thread_native.h"
72 #include "ruby/debug.h"
73 #include "gc.h"
74 #include "internal.h"
75 #include "iseq.h"
76 #include "vm_core.h"
77 #include "mjit.h"
78 #include "hrtime.h"
79 
80 #ifdef __linux__
81 // Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
82 #include <alloca.h>
83 #endif
84 
85 #ifndef USE_NATIVE_THREAD_PRIORITY
86 #define USE_NATIVE_THREAD_PRIORITY 0
87 #define RUBY_THREAD_PRIORITY_MAX 3
88 #define RUBY_THREAD_PRIORITY_MIN -3
89 #endif
90 
91 #ifndef THREAD_DEBUG
92 #define THREAD_DEBUG 0
93 #endif
94 
95 static VALUE rb_cThreadShield;
96 
97 static VALUE sym_immediate;
98 static VALUE sym_on_blocking;
99 static VALUE sym_never;
100 
104 };
105 
106 #define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
107 #define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
108 
109 static inline VALUE
110 rb_thread_local_storage(VALUE thread)
111 {
113  rb_ivar_set(thread, idLocals, rb_hash_new());
115  }
116  return rb_ivar_get(thread, idLocals);
117 }
118 
119 static void sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
120 static void sleep_forever(rb_thread_t *th, unsigned int fl);
121 static void rb_thread_sleep_deadly_allow_spurious_wakeup(void);
122 static int rb_threadptr_dead(rb_thread_t *th);
123 static void rb_check_deadlock(rb_vm_t *vm);
124 static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
125 static const char *thread_status_name(rb_thread_t *th, int detail);
126 static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
127 NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
128 static int consume_communication_pipe(int fd);
129 static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
130 void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
131 
132 #define eKillSignal INT2FIX(0)
133 #define eTerminateSignal INT2FIX(1)
134 static volatile int system_working = 1;
135 
136 struct waiting_fd {
137  struct list_node wfd_node; /* <=> vm.waiting_fds */
139  int fd;
140 };
141 
142 inline static void
143 st_delete_wrap(st_table *table, st_data_t key)
144 {
145  st_delete(table, &key, 0);
146 }
147 
148 /********************************************************************************/
149 
150 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
151 
154 };
155 
156 static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
157 static void unblock_function_clear(rb_thread_t *th);
158 
159 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
160  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
161 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
162 
163 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
164  do { \
165  FLUSH_REGISTER_WINDOWS; \
166  setjmp((th)->ec->machine.regs); \
167  SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
168  } while (0)
169 
170 #define GVL_UNLOCK_BEGIN(th) do { \
171  RB_GC_SAVE_MACHINE_CONTEXT(th); \
172  gvl_release(th->vm);
173 
174 #define GVL_UNLOCK_END(th) \
175  gvl_acquire(th->vm, th); \
176  rb_thread_set_current(th); \
177 } while(0)
178 
179 #ifdef __GNUC__
180 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
181 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
182 #else
183 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
184 #endif
185 #else
186 #define only_if_constant(expr, notconst) notconst
187 #endif
188 #define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
189  struct rb_blocking_region_buffer __region; \
190  if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
191  /* always return true unless fail_if_interrupted */ \
192  !only_if_constant(fail_if_interrupted, TRUE)) { \
193  exec; \
194  blocking_region_end(th, &__region); \
195  }; \
196 } while(0)
197 
198 /*
199  * returns true if this thread was spuriously interrupted, false otherwise
200  * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
201  */
202 #define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
203 static inline int
204 vm_check_ints_blocking(rb_execution_context_t *ec)
205 {
206  rb_thread_t *th = rb_ec_thread_ptr(ec);
207 
208  if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
209  if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
210  }
211  else {
214  }
215  return rb_threadptr_execute_interrupts(th, 1);
216 }
217 
218 static int
219 vm_living_thread_num(const rb_vm_t *vm)
220 {
221  return vm->living_thread_num;
222 }
223 
224 /*
225  * poll() is supported by many OSes, but so far Linux is the only
226  * one we know of that supports using poll() in all places select()
227  * would work.
228  */
229 #if defined(HAVE_POLL)
230 # if defined(__linux__)
231 # define USE_POLL
232 # endif
233 # if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
234 # define USE_POLL
235  /* FreeBSD does not set POLLOUT when POLLHUP happens */
236 # define POLLERR_SET (POLLHUP | POLLERR)
237 # endif
238 #endif
239 
240 static void
241 timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
242  const struct timeval *timeout)
243 {
244  if (timeout) {
245  *rel = rb_timeval2hrtime(timeout);
246  *end = rb_hrtime_add(rb_hrtime_now(), *rel);
247  *to = rel;
248  }
249  else {
250  *to = 0;
251  }
252 }
253 
254 #if THREAD_DEBUG
255 #ifdef HAVE_VA_ARGS_MACRO
256 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
257 #define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__)
258 #define POSITION_FORMAT "%s:%d:"
259 #define POSITION_ARGS ,file, line
260 #else
261 void rb_thread_debug(const char *fmt, ...);
262 #define thread_debug rb_thread_debug
263 #define POSITION_FORMAT
264 #define POSITION_ARGS
265 #endif
266 
267 # ifdef NON_SCALAR_THREAD_ID
268 #define fill_thread_id_string ruby_fill_thread_id_string
269 const char *
270 ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_string_t buf)
271 {
272  extern const char ruby_digitmap[];
273  size_t i;
274 
275  buf[0] = '0';
276  buf[1] = 'x';
277  for (i = 0; i < sizeof(thid); i++) {
278 # ifdef LITTLE_ENDIAN
279  size_t j = sizeof(thid) - i - 1;
280 # else
281  size_t j = i;
282 # endif
283  unsigned char c = (unsigned char)((char *)&thid)[j];
284  buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
285  buf[3 + i * 2] = ruby_digitmap[c & 0xf];
286  }
287  buf[sizeof(rb_thread_id_string_t)-1] = '\0';
288  return buf;
289 }
290 # define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
291 # define thread_id_str(th) ((th)->thread_id_string)
292 # define PRI_THREAD_ID "s"
293 # endif
294 
295 # if THREAD_DEBUG < 0
296 static int rb_thread_debug_enabled;
297 
298 /*
299  * call-seq:
300  * Thread.DEBUG -> num
301  *
302  * Returns the thread debug level. Available only if compiled with
303  * THREAD_DEBUG=-1.
304  */
305 
306 static VALUE
307 rb_thread_s_debug(void)
308 {
309  return INT2NUM(rb_thread_debug_enabled);
310 }
311 
312 /*
313  * call-seq:
314  * Thread.DEBUG = num
315  *
316  * Sets the thread debug level. Available only if compiled with
317  * THREAD_DEBUG=-1.
318  */
319 
320 static VALUE
321 rb_thread_s_debug_set(VALUE self, VALUE val)
322 {
323  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
324  return val;
325 }
326 # else
327 # define rb_thread_debug_enabled THREAD_DEBUG
328 # endif
329 #else
330 #define thread_debug if(0)printf
331 #endif
332 
333 #ifndef fill_thread_id_str
334 # define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
335 # define fill_thread_id_str(th) (void)0
336 # define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id)
337 # define PRI_THREAD_ID "p"
338 #endif
339 
340 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start));
341 static void timer_thread_function(void);
342 void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
343 
344 static void
345 ubf_sigwait(void *ignore)
346 {
348 }
349 
350 #if defined(_WIN32)
351 #include "thread_win32.c"
352 
353 #define DEBUG_OUT() \
354  WaitForSingleObject(&debug_mutex, INFINITE); \
355  printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
356  fflush(stdout); \
357  ReleaseMutex(&debug_mutex);
358 
359 #elif defined(HAVE_PTHREAD_H)
360 #include "thread_pthread.c"
361 
362 #define DEBUG_OUT() \
363  pthread_mutex_lock(&debug_mutex); \
364  printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
365  fill_thread_id_string(pthread_self(), thread_id_string), buf); \
366  fflush(stdout); \
367  pthread_mutex_unlock(&debug_mutex);
368 
369 #else
370 #error "unsupported thread type"
371 #endif
372 
373 /*
374  * TODO: somebody with win32 knowledge should be able to get rid of
375  * timer-thread by busy-waiting on signals. And it should be possible
376  * to make the GVL in thread_pthread.c be platform-independent.
377  */
378 #ifndef BUSY_WAIT_SIGNALS
379 # define BUSY_WAIT_SIGNALS (0)
380 #endif
381 
382 #ifndef USE_EVENTFD
383 # define USE_EVENTFD (0)
384 #endif
385 
386 #if THREAD_DEBUG
387 static int debug_mutex_initialized = 1;
388 static rb_nativethread_lock_t debug_mutex;
389 
390 void
391 rb_thread_debug(
392 #ifdef HAVE_VA_ARGS_MACRO
393  const char *file, int line,
394 #endif
395  const char *fmt, ...)
396 {
397  va_list args;
398  char buf[BUFSIZ];
399 #ifdef NON_SCALAR_THREAD_ID
400  rb_thread_id_string_t thread_id_string;
401 #endif
402 
403  if (!rb_thread_debug_enabled) return;
404 
405  if (debug_mutex_initialized == 1) {
406  debug_mutex_initialized = 0;
407  rb_native_mutex_initialize(&debug_mutex);
408  }
409 
410  va_start(args, fmt);
411  vsnprintf(buf, BUFSIZ, fmt, args);
412  va_end(args);
413 
414  DEBUG_OUT();
415 }
416 #endif
417 
418 #include "thread_sync.c"
419 
420 void
422 {
423  gvl_release(vm);
424  gvl_destroy(vm);
425  if (0) {
426  /* may be held by running threads */
429  }
430 }
431 
432 void
434 {
436 }
437 
438 void
440 {
442 }
443 
444 void
446 {
447  rb_native_mutex_lock(lock);
448 }
449 
450 void
452 {
454 }
455 
456 static int
457 unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
458 {
459  do {
460  if (fail_if_interrupted) {
461  if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
462  return FALSE;
463  }
464  }
465  else {
466  RUBY_VM_CHECK_INTS(th->ec);
467  }
468 
470  } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
472 
473  VM_ASSERT(th->unblock.func == NULL);
474 
475  th->unblock.func = func;
476  th->unblock.arg = arg;
478 
479  return TRUE;
480 }
481 
482 static void
483 unblock_function_clear(rb_thread_t *th)
484 {
486  th->unblock.func = NULL;
488 }
489 
490 static void
491 rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
492 {
494  if (trap) {
496  }
497  else {
499  }
500  if (th->unblock.func != NULL) {
501  (th->unblock.func)(th->unblock.arg);
502  }
503  else {
504  /* none */
505  }
507 }
508 
509 void
511 {
512  rb_threadptr_interrupt_common(th, 0);
513 }
514 
515 static void
516 threadptr_trap_interrupt(rb_thread_t *th)
517 {
518  rb_threadptr_interrupt_common(th, 1);
519 }
520 
521 static void
522 terminate_all(rb_vm_t *vm, const rb_thread_t *main_thread)
523 {
524  rb_thread_t *th = 0;
525 
526  list_for_each(&vm->living_threads, th, vmlt_node) {
527  if (th != main_thread) {
528  thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n",
529  thread_id_str(th), thread_status_name(th, TRUE));
532  thread_debug("terminate_all: end (thid: %"PRI_THREAD_ID", status: %s)\n",
533  thread_id_str(th), thread_status_name(th, TRUE));
534  }
535  else {
536  thread_debug("terminate_all: main thread (%p)\n", (void *)th);
537  }
538  }
539 }
540 
541 void
543 {
544  const char *err;
545  rb_mutex_t *mutex;
546  rb_mutex_t *mutexes = th->keeping_mutexes;
547 
548  while (mutexes) {
549  mutex = mutexes;
550  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
551  (void *)mutexes); */
552  mutexes = mutex->next_mutex;
553  err = rb_mutex_unlock_th(mutex, th);
554  if (err) rb_bug("invalid keeping_mutexes: %s", err);
555  }
556 }
557 
558 void
560 {
561  rb_thread_t *volatile th = GET_THREAD(); /* main thread */
562  rb_execution_context_t * volatile ec = th->ec;
563  rb_vm_t *volatile vm = th->vm;
564  volatile int sleeping = 0;
565 
566  if (vm->main_thread != th) {
567  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
568  (void *)vm->main_thread, (void *)th);
569  }
570 
571  /* unlock all locking mutexes */
573 
574  EC_PUSH_TAG(ec);
575  if (EC_EXEC_TAG() == TAG_NONE) {
576  retry:
577  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
578  terminate_all(vm, th);
579 
580  while (vm_living_thread_num(vm) > 1) {
582  /*
583  * Thread exiting routine in thread_start_func_2 notify
584  * me when the last sub-thread exit.
585  */
586  sleeping = 1;
587  native_sleep(th, &rel);
589  sleeping = 0;
590  }
591  }
592  else {
593  /*
594  * When caught an exception (e.g. Ctrl+C), let's broadcast
595  * kill request again to ensure killing all threads even
596  * if they are blocked on sleep, mutex, etc.
597  */
598  if (sleeping) {
599  sleeping = 0;
600  goto retry;
601  }
602  }
603  EC_POP_TAG();
604 }
605 
607 
608 static void
609 thread_cleanup_func_before_exec(void *th_ptr)
610 {
611  rb_thread_t *th = th_ptr;
612  th->status = THREAD_KILLED;
613  // The thread stack doesn't exist in the forked process:
614  th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
615 
617 }
618 
619 static void
620 thread_cleanup_func(void *th_ptr, int atfork)
621 {
622  rb_thread_t *th = th_ptr;
623 
624  th->locking_mutex = Qfalse;
625  thread_cleanup_func_before_exec(th_ptr);
626 
627  /*
628  * Unfortunately, we can't release native threading resource at fork
629  * because libc may have unstable locking state therefore touching
630  * a threading resource may cause a deadlock.
631  *
632  * FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
633  * with NPTL, but native_thread_destroy calls pthread_cond_destroy
634  * which calls free(3), so there is a small memory leak atfork, here.
635  */
636  if (atfork)
637  return;
638 
640  native_thread_destroy(th);
641 }
642 
643 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
644 static VALUE rb_thread_to_s(VALUE thread);
645 
646 void
648 {
649  native_thread_init_stack(th);
650 }
651 
652 const VALUE *
654 {
655  const VALUE *ep = vm_proc_ep(proc);
656 
657  if (ep) {
658  return rb_vm_ep_local_ep(ep);
659  }
660  else {
661  return NULL;
662  }
663 }
664 
665 static void
666 thread_do_start(rb_thread_t *th)
667 {
668  native_set_thread_name(th);
669 
670  if (th->invoke_type == thread_invoke_type_proc) {
671  VALUE args = th->invoke_arg.proc.args;
672  int args_len = (int)RARRAY_LEN(args);
673  int kw_splat = th->invoke_arg.proc.kw_splat;
674  const VALUE *args_ptr;
675  VALUE procval = th->invoke_arg.proc.proc;
676  rb_proc_t *proc;
677  GetProcPtr(procval, proc);
678 
679  th->ec->errinfo = Qnil;
680  th->ec->root_lep = rb_vm_proc_local_ep(procval);
681  th->ec->root_svar = Qfalse;
682 
684  vm_check_ints_blocking(th->ec);
685 
686  if (args_len < 8) {
687  /* free proc.args if the length is enough small */
688  args_ptr = ALLOCA_N(VALUE, args_len);
689  MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, args_len);
690  th->invoke_arg.proc.args = Qnil;
691  }
692  else {
693  args_ptr = RARRAY_CONST_PTR(args);
694  }
695 
696  rb_adjust_argv_kw_splat(&args_len, &args_ptr, &kw_splat);
697  th->value = rb_vm_invoke_proc(th->ec, proc,
698  args_len, args_ptr,
699  kw_splat, VM_BLOCK_HANDLER_NONE);
700 
701  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
702  }
703  else {
704  th->value = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
705  }
706 }
707 
709 
710 static int
711 thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
712 {
714  enum ruby_tag_type state;
715  rb_thread_list_t *join_list;
716  rb_thread_t *main_th;
717  VALUE errinfo = Qnil;
718  size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
719  VALUE * vm_stack = NULL;
720 
721  if (th == th->vm->main_thread) {
722  rb_bug("thread_start_func_2 must not be used for main thread");
723  }
724 
725  thread_debug("thread start: %p\n", (void *)th);
726  VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
727 
728  vm_stack = alloca(size * sizeof(VALUE));
729  VM_ASSERT(vm_stack);
730 
731  gvl_acquire(th->vm, th);
732 
733  rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
734  th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
735  th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
736 
737  ruby_thread_set_native(th);
738 
739  {
740  thread_debug("thread start (get lock): %p\n", (void *)th);
741  rb_thread_set_current(th);
742 
743  EC_PUSH_TAG(th->ec);
744  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
745  SAVE_ROOT_JMPBUF(th, thread_do_start(th));
746  }
747  else {
748  errinfo = th->ec->errinfo;
749  if (state == TAG_FATAL) {
750  /* fatal error within this thread, need to stop whole script */
751  }
752  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
753  /* exit on main_thread. */
754  }
755  else {
756  if (th->report_on_exception) {
757  VALUE mesg = rb_thread_to_s(th->self);
758  rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
759  rb_write_error_str(mesg);
760  rb_ec_error_print(th->ec, errinfo);
761  }
762  if (th->vm->thread_abort_on_exception ||
764  /* exit on main_thread */
765  }
766  else {
767  errinfo = Qnil;
768  }
769  }
770  th->value = Qnil;
771  }
772 
773  th->status = THREAD_KILLED;
774  thread_debug("thread end: %p\n", (void *)th);
775 
776  main_th = th->vm->main_thread;
777  if (main_th == th) {
778  ruby_stop(0);
779  }
780  if (RB_TYPE_P(errinfo, T_OBJECT)) {
781  /* treat with normal error object */
782  rb_threadptr_raise(main_th, 1, &errinfo);
783  }
784  EC_POP_TAG();
785 
787 
788  /* locking_mutex must be Qfalse */
789  if (th->locking_mutex != Qfalse) {
790  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
791  (void *)th, th->locking_mutex);
792  }
793 
794  /* delete self other than main thread from living_threads */
795  rb_vm_living_threads_remove(th->vm, th);
796  if (main_th->status == THREAD_KILLED && rb_thread_alone()) {
797  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
798  rb_threadptr_interrupt(main_th);
799  }
800 
801  /* wake up joining threads */
802  join_list = th->join_list;
803  while (join_list) {
804  rb_threadptr_interrupt(join_list->th);
805  switch (join_list->th->status) {
807  join_list->th->status = THREAD_RUNNABLE;
808  default: break;
809  }
810  join_list = join_list->next;
811  }
812 
814  rb_check_deadlock(th->vm);
815 
817  }
818 
819  thread_cleanup_func(th, FALSE);
820  VM_ASSERT(th->ec->vm_stack == NULL);
821 
822  gvl_release(th->vm);
823 
824  return 0;
825 }
826 
827 static VALUE
828 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(void *))
829 {
830  rb_thread_t *th = rb_thread_ptr(thval), *current_th = GET_THREAD();
831  int err;
832 
833  if (OBJ_FROZEN(current_th->thgroup)) {
835  "can't start a new thread (frozen ThreadGroup)");
836  }
837 
838  if (fn) {
839  th->invoke_type = thread_invoke_type_func;
840  th->invoke_arg.func.func = fn;
841  th->invoke_arg.func.arg = (void *)args;
842  }
843  else {
844  (void)RARRAY_LENINT(args);
845  th->invoke_type = thread_invoke_type_proc;
846  th->invoke_arg.proc.proc = rb_block_proc();
847  th->invoke_arg.proc.args = args;
848  th->invoke_arg.proc.kw_splat = rb_empty_keyword_given_p() ?
851  }
852 
853  th->priority = current_th->priority;
854  th->thgroup = current_th->thgroup;
855 
858  th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
860 
862 
863  /* kick thread */
864  err = native_thread_create(th);
865  if (err) {
866  th->status = THREAD_KILLED;
867  rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
868  }
869  rb_vm_living_threads_insert(th->vm, th);
870  return thval;
871 }
872 
873 #define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
874 
875 /*
876  * call-seq:
877  * Thread.new { ... } -> thread
878  * Thread.new(*args, &proc) -> thread
879  * Thread.new(*args) { |args| ... } -> thread
880  *
881  * Creates a new thread executing the given block.
882  *
883  * Any +args+ given to ::new will be passed to the block:
884  *
885  * arr = []
886  * a, b, c = 1, 2, 3
887  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
888  * arr #=> [1, 2, 3]
889  *
890  * A ThreadError exception is raised if ::new is called without a block.
891  *
892  * If you're going to subclass Thread, be sure to call super in your
893  * +initialize+ method, otherwise a ThreadError will be raised.
894  */
895 static VALUE
896 thread_s_new(int argc, VALUE *argv, VALUE klass)
897 {
898  rb_thread_t *th;
899  VALUE thread = rb_thread_alloc(klass);
900 
901  if (GET_VM()->main_thread->status == THREAD_KILLED)
902  rb_raise(rb_eThreadError, "can't alloc thread");
903 
905  th = rb_thread_ptr(thread);
906  if (!threadptr_initialized(th)) {
907  rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
908  klass);
909  }
910  return thread;
911 }
912 
913 /*
914  * call-seq:
915  * Thread.start([args]*) {|args| block } -> thread
916  * Thread.fork([args]*) {|args| block } -> thread
917  *
918  * Basically the same as ::new. However, if class Thread is subclassed, then
919  * calling +start+ in that subclass will not invoke the subclass's
920  * +initialize+ method.
921  */
922 
923 static VALUE
924 thread_start(VALUE klass, VALUE args)
925 {
926  return thread_create_core(rb_thread_alloc(klass), args, 0);
927 }
928 
929 static VALUE
930 threadptr_invoke_proc_location(rb_thread_t *th)
931 {
932  if (th->invoke_type == thread_invoke_type_proc) {
933  return rb_proc_location(th->invoke_arg.proc.proc);
934  }
935  else {
936  return Qnil;
937  }
938 }
939 
940 /* :nodoc: */
941 static VALUE
942 thread_initialize(VALUE thread, VALUE args)
943 {
944  rb_thread_t *th = rb_thread_ptr(thread);
945 
946  if (!rb_block_given_p()) {
947  rb_raise(rb_eThreadError, "must be called with a block");
948  }
949  else if (th->invoke_type != thread_invoke_type_none) {
950  VALUE loc = threadptr_invoke_proc_location(th);
951  if (!NIL_P(loc)) {
953  "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
954  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
955  }
956  else {
957  rb_raise(rb_eThreadError, "already initialized thread");
958  }
959  }
960  else {
961  return thread_create_core(thread, args, NULL);
962  }
963 }
964 
965 VALUE
966 rb_thread_create(VALUE (*fn)(void *), void *arg)
967 {
968  return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
969 }
970 
971 
972 struct join_arg {
975 };
976 
977 static VALUE
978 remove_from_join_list(VALUE arg)
979 {
980  struct join_arg *p = (struct join_arg *)arg;
981  rb_thread_t *target_th = p->target, *th = p->waiting;
982 
983  if (target_th->status != THREAD_KILLED) {
984  rb_thread_list_t **p = &target_th->join_list;
985 
986  while (*p) {
987  if ((*p)->th == th) {
988  *p = (*p)->next;
989  break;
990  }
991  p = &(*p)->next;
992  }
993  }
994 
995  return Qnil;
996 }
997 
998 static VALUE
999 thread_join_sleep(VALUE arg)
1000 {
1001  struct join_arg *p = (struct join_arg *)arg;
1002  rb_thread_t *target_th = p->target, *th = p->waiting;
1003  rb_hrtime_t end = 0;
1004 
1005  if (p->limit) {
1006  end = rb_hrtime_add(*p->limit, rb_hrtime_now());
1007  }
1008 
1009  while (target_th->status != THREAD_KILLED) {
1010  if (!p->limit) {
1012  th->vm->sleeper++;
1013  rb_check_deadlock(th->vm);
1014  native_sleep(th, 0);
1015  th->vm->sleeper--;
1016  }
1017  else {
1018  if (hrtime_update_expire(p->limit, end)) {
1019  thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
1020  thread_id_str(target_th));
1021  return Qfalse;
1022  }
1023  th->status = THREAD_STOPPED;
1024  native_sleep(th, p->limit);
1025  }
1027  th->status = THREAD_RUNNABLE;
1028  thread_debug("thread_join: interrupted (thid: %"PRI_THREAD_ID", status: %s)\n",
1029  thread_id_str(target_th), thread_status_name(target_th, TRUE));
1030  }
1031  return Qtrue;
1032 }
1033 
1034 static VALUE
1035 thread_join(rb_thread_t *target_th, rb_hrtime_t *rel)
1036 {
1037  rb_thread_t *th = GET_THREAD();
1038  struct join_arg arg;
1039 
1040  if (th == target_th) {
1041  rb_raise(rb_eThreadError, "Target thread must not be current thread");
1042  }
1043  if (GET_VM()->main_thread == target_th) {
1044  rb_raise(rb_eThreadError, "Target thread must not be main thread");
1045  }
1046 
1047  arg.target = target_th;
1048  arg.waiting = th;
1049  arg.limit = rel;
1050 
1051  thread_debug("thread_join (thid: %"PRI_THREAD_ID", status: %s)\n",
1052  thread_id_str(target_th), thread_status_name(target_th, TRUE));
1053 
1054  if (target_th->status != THREAD_KILLED) {
1056  list.next = target_th->join_list;
1057  list.th = th;
1058  target_th->join_list = &list;
1059  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
1060  remove_from_join_list, (VALUE)&arg)) {
1061  return Qnil;
1062  }
1063  }
1064 
1065  thread_debug("thread_join: success (thid: %"PRI_THREAD_ID", status: %s)\n",
1066  thread_id_str(target_th), thread_status_name(target_th, TRUE));
1067 
1068  if (target_th->ec->errinfo != Qnil) {
1069  VALUE err = target_th->ec->errinfo;
1070 
1071  if (FIXNUM_P(err)) {
1072  switch (err) {
1073  case INT2FIX(TAG_FATAL):
1074  thread_debug("thread_join: terminated (thid: %"PRI_THREAD_ID", status: %s)\n",
1075  thread_id_str(target_th), thread_status_name(target_th, TRUE));
1076 
1077  /* OK. killed. */
1078  break;
1079  default:
1080  rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1081  }
1082  }
1083  else if (THROW_DATA_P(target_th->ec->errinfo)) {
1084  rb_bug("thread_join: THROW_DATA should not reach here.");
1085  }
1086  else {
1087  /* normal exception */
1088  rb_exc_raise(err);
1089  }
1090  }
1091  return target_th->self;
1092 }
1093 
1094 static rb_hrtime_t *double2hrtime(rb_hrtime_t *, double);
1095 
1096 /*
1097  * call-seq:
1098  * thr.join -> thr
1099  * thr.join(limit) -> thr
1100  *
1101  * The calling thread will suspend execution and run this +thr+.
1102  *
1103  * Does not return until +thr+ exits or until the given +limit+ seconds have
1104  * passed.
1105  *
1106  * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1107  * returned.
1108  *
1109  * Any threads not joined will be killed when the main program exits.
1110  *
1111  * If +thr+ had previously raised an exception and the ::abort_on_exception or
1112  * $DEBUG flags are not set, (so the exception has not yet been processed), it
1113  * will be processed at this time.
1114  *
1115  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1116  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1117  * x.join # Let thread x finish, thread a will be killed on exit.
1118  * #=> "axyz"
1119  *
1120  * The following example illustrates the +limit+ parameter.
1121  *
1122  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1123  * puts "Waiting" until y.join(0.15)
1124  *
1125  * This will produce:
1126  *
1127  * tick...
1128  * Waiting
1129  * tick...
1130  * Waiting
1131  * tick...
1132  * tick...
1133  */
1134 
1135 static VALUE
1136 thread_join_m(int argc, VALUE *argv, VALUE self)
1137 {
1138  VALUE limit;
1139  rb_hrtime_t rel, *to = 0;
1140 
1141  /*
1142  * This supports INFINITY and negative values, so we can't use
1143  * rb_time_interval right now...
1144  */
1145  if (!rb_check_arity(argc, 0, 1) || NIL_P(argv[0])) {
1146  /* unlimited */
1147  }
1148  else if (FIXNUM_P(limit = argv[0])) {
1149  rel = rb_sec2hrtime(NUM2TIMET(limit));
1150  to = &rel;
1151  }
1152  else {
1153  to = double2hrtime(&rel, rb_num2dbl(limit));
1154  }
1155 
1156  return thread_join(rb_thread_ptr(self), to);
1157 }
1158 
1159 /*
1160  * call-seq:
1161  * thr.value -> obj
1162  *
1163  * Waits for +thr+ to complete, using #join, and returns its value or raises
1164  * the exception which terminated the thread.
1165  *
1166  * a = Thread.new { 2 + 2 }
1167  * a.value #=> 4
1168  *
1169  * b = Thread.new { raise 'something went wrong' }
1170  * b.value #=> RuntimeError: something went wrong
1171  */
1172 
1173 static VALUE
1174 thread_value(VALUE self)
1175 {
1176  rb_thread_t *th = rb_thread_ptr(self);
1177  thread_join(th, 0);
1178  return th->value;
1179 }
1180 
1181 /*
1182  * Thread Scheduling
1183  */
1184 
1185 /*
1186  * Back when we used "struct timeval", not all platforms implemented
1187  * tv_sec as time_t. Nowadays we use "struct timespec" and tv_sec
1188  * seems to be implemented more consistently across platforms.
1189  * At least other parts of our code hasn't had to deal with non-time_t
1190  * tv_sec in timespec...
1191  */
1192 #define TIMESPEC_SEC_MAX TIMET_MAX
1193 #define TIMESPEC_SEC_MIN TIMET_MIN
1194 
1195 static rb_hrtime_t *
1196 double2hrtime(rb_hrtime_t *hrt, double d)
1197 {
1198  /* assume timespec.tv_sec has same signedness as time_t */
1199  const double TIMESPEC_SEC_MAX_PLUS_ONE = TIMET_MAX_PLUS_ONE;
1200 
1201  if (TIMESPEC_SEC_MAX_PLUS_ONE <= d) {
1202  return NULL;
1203  }
1204  else if (d <= 0) {
1205  *hrt = 0;
1206  }
1207  else {
1208  *hrt = (rb_hrtime_t)(d * (double)RB_HRTIME_PER_SEC);
1209  }
1210  return hrt;
1211 }
1212 
1213 static void
1214 getclockofday(struct timespec *ts)
1215 {
1216 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1217  if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1218  return;
1219 #endif
1220  rb_timespec_now(ts);
1221 }
1222 
1223 /*
1224  * Don't inline this, since library call is already time consuming
1225  * and we don't want "struct timespec" on stack too long for GC
1226  */
1230 {
1231  struct timespec ts;
1232 
1233  getclockofday(&ts);
1234  return rb_timespec2hrtime(&ts);
1235 }
1236 
1237 static void
1238 sleep_forever(rb_thread_t *th, unsigned int fl)
1239 {
1240  enum rb_thread_status prev_status = th->status;
1241  enum rb_thread_status status;
1242  int woke;
1243 
1245  th->status = status;
1247  while (th->status == status) {
1248  if (fl & SLEEP_DEADLOCKABLE) {
1249  th->vm->sleeper++;
1250  rb_check_deadlock(th->vm);
1251  }
1252  native_sleep(th, 0);
1253  if (fl & SLEEP_DEADLOCKABLE) {
1254  th->vm->sleeper--;
1255  }
1256  woke = vm_check_ints_blocking(th->ec);
1257  if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1258  break;
1259  }
1260  th->status = prev_status;
1261 }
1262 
1263 /*
1264  * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1265  * being uninitialized, maybe other versions, too.
1266  */
1268 #if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1269 COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1270 #endif
1271 #ifndef PRIu64
1272 #define PRIu64 PRI_64_PREFIX "u"
1273 #endif
1274 /*
1275  * @end is the absolute time when @ts is set to expire
1276  * Returns true if @end has past
1277  * Updates @ts and returns false otherwise
1278  */
1279 static int
1280 hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1281 {
1282  rb_hrtime_t now = rb_hrtime_now();
1283 
1284  if (now > end) return 1;
1285  thread_debug("hrtime_update_expire: "
1286  "%"PRIu64" > %"PRIu64"\n",
1287  (uint64_t)end, (uint64_t)now);
1288  *timeout = end - now;
1289  return 0;
1290 }
1292 
1293 static void
1294 sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1295 {
1296  enum rb_thread_status prev_status = th->status;
1297  int woke;
1298  rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1299 
1300  th->status = THREAD_STOPPED;
1302  while (th->status == THREAD_STOPPED) {
1303  native_sleep(th, &rel);
1304  woke = vm_check_ints_blocking(th->ec);
1305  if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1306  break;
1307  if (hrtime_update_expire(&rel, end))
1308  break;
1309  }
1310  th->status = prev_status;
1311 }
1312 
1313 void
1315 {
1316  thread_debug("rb_thread_sleep_forever\n");
1317  sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1318 }
1319 
1320 void
1322 {
1323  thread_debug("rb_thread_sleep_deadly\n");
1325 }
1326 
1327 void
1329 {
1330  rb_thread_t *th = GET_THREAD();
1331  enum rb_thread_status prev_status = th->status;
1332 
1333  th->status = THREAD_STOPPED;
1334  native_sleep(th, 0);
1336  th->status = prev_status;
1337 }
1338 
1339 static void
1340 rb_thread_sleep_deadly_allow_spurious_wakeup(void)
1341 {
1342  thread_debug("rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1343  sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1344 }
1345 
1346 void
1348 {
1349  rb_thread_t *th = GET_THREAD();
1350 
1351  sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1352 }
1353 
1354 /*
1355  * CAUTION: This function causes thread switching.
1356  * rb_thread_check_ints() check ruby's interrupts.
1357  * some interrupt needs thread switching/invoke handlers,
1358  * and so on.
1359  */
1360 
1361 void
1363 {
1365 }
1366 
1367 /*
1368  * Hidden API for tcl/tk wrapper.
1369  * There is no guarantee to perpetuate it.
1370  */
1371 int
1373 {
1374  return rb_signal_buff_size() != 0;
1375 }
1376 
1377 /* This function can be called in blocking region. */
1378 int
1380 {
1381  return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1382 }
1383 
1384 void
1386 {
1388 }
1389 
1390 static void
1391 rb_thread_schedule_limits(uint32_t limits_us)
1392 {
1393  thread_debug("rb_thread_schedule\n");
1394  if (!rb_thread_alone()) {
1395  rb_thread_t *th = GET_THREAD();
1396 
1397  if (th->running_time_us >= limits_us) {
1398  thread_debug("rb_thread_schedule/switch start\n");
1400  gvl_yield(th->vm, th);
1401  rb_thread_set_current(th);
1402  thread_debug("rb_thread_schedule/switch done\n");
1403  }
1404  }
1405 }
1406 
1407 void
1409 {
1410  rb_thread_schedule_limits(0);
1412 }
1413 
1414 /* blocking region */
1415 
1416 static inline int
1417 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1418  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1419 {
1420  region->prev_status = th->status;
1421  if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1422  th->blocking_region_buffer = region;
1423  th->status = THREAD_STOPPED;
1424  thread_debug("enter blocking region (%p)\n", (void *)th);
1426  gvl_release(th->vm);
1427  return TRUE;
1428  }
1429  else {
1430  return FALSE;
1431  }
1432 }
1433 
1434 static inline void
1435 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1436 {
1437  /* entry to ubf_list still permitted at this point, make it impossible: */
1438  unblock_function_clear(th);
1439  /* entry to ubf_list impossible at this point, so unregister is safe: */
1440  unregister_ubf_list(th);
1441 
1442  gvl_acquire(th->vm, th);
1443  rb_thread_set_current(th);
1444  thread_debug("leave blocking region (%p)\n", (void *)th);
1445  th->blocking_region_buffer = 0;
1446  if (th->status == THREAD_STOPPED) {
1447  th->status = region->prev_status;
1448  }
1449 }
1450 
1451 void *
1452 rb_nogvl(void *(*func)(void *), void *data1,
1453  rb_unblock_function_t *ubf, void *data2,
1454  int flags)
1455 {
1456  void *val = 0;
1458  rb_thread_t *th = rb_ec_thread_ptr(ec);
1459  int saved_errno = 0;
1460  VALUE ubf_th = Qfalse;
1461 
1462  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1463  ubf = ubf_select;
1464  data2 = th;
1465  }
1466  else if (ubf && vm_living_thread_num(th->vm) == 1) {
1468  th->vm->ubf_async_safe = 1;
1469  }
1470  else {
1471  ubf_th = rb_thread_start_unblock_thread();
1472  }
1473  }
1474 
1475  BLOCKING_REGION(th, {
1476  val = func(data1);
1477  saved_errno = errno;
1478  }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1479 
1480  th->vm->ubf_async_safe = 0;
1481 
1482  if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1484  }
1485 
1486  if (ubf_th != Qfalse) {
1487  thread_value(rb_thread_kill(ubf_th));
1488  }
1489 
1490  errno = saved_errno;
1491 
1492  return val;
1493 }
1494 
1495 /*
1496  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1497  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1498  * without interrupt process.
1499  *
1500  * rb_thread_call_without_gvl() does:
1501  * (1) Check interrupts.
1502  * (2) release GVL.
1503  * Other Ruby threads may run in parallel.
1504  * (3) call func with data1
1505  * (4) acquire GVL.
1506  * Other Ruby threads can not run in parallel any more.
1507  * (5) Check interrupts.
1508  *
1509  * rb_thread_call_without_gvl2() does:
1510  * (1) Check interrupt and return if interrupted.
1511  * (2) release GVL.
1512  * (3) call func with data1 and a pointer to the flags.
1513  * (4) acquire GVL.
1514  *
1515  * If another thread interrupts this thread (Thread#kill, signal delivery,
1516  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1517  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1518  * toggling a cancellation flag, canceling the invocation of a call inside
1519  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1520  *
1521  * There are built-in ubfs and you can specify these ubfs:
1522  *
1523  * * RUBY_UBF_IO: ubf for IO operation
1524  * * RUBY_UBF_PROCESS: ubf for process operation
1525  *
1526  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1527  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1528  * provide proper ubf(), your program will not stop for Control+C or other
1529  * shutdown events.
1530  *
1531  * "Check interrupts" on above list means checking asynchronous
1532  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1533  * request, and so on) and calling corresponding procedures
1534  * (such as `trap' for signals, raise an exception for Thread#raise).
1535  * If `func()' finished and received interrupts, you may skip interrupt
1536  * checking. For example, assume the following func() it reads data from file.
1537  *
1538  * read_func(...) {
1539  * // (a) before read
1540  * read(buffer); // (b) reading
1541  * // (c) after read
1542  * }
1543  *
1544  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1545  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1546  * at (c), after *read* operation is completed, checking interrupts is harmful
1547  * because it causes irrevocable side-effect, the read data will vanish. To
1548  * avoid such problem, the `read_func()' should be used with
1549  * `rb_thread_call_without_gvl2()'.
1550  *
1551  * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1552  * immediately. This function does not show when the execution was interrupted.
1553  * For example, there are 4 possible timing (a), (b), (c) and before calling
1554  * read_func(). You need to record progress of a read_func() and check
1555  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1556  * `rb_thread_check_ints()' correctly or your program can not process proper
1557  * process such as `trap' and so on.
1558  *
1559  * NOTE: You can not execute most of Ruby C API and touch Ruby
1560  * objects in `func()' and `ubf()', including raising an
1561  * exception, because current thread doesn't acquire GVL
1562  * (it causes synchronization problems). If you need to
1563  * call ruby functions either use rb_thread_call_with_gvl()
1564  * or read source code of C APIs and confirm safety by
1565  * yourself.
1566  *
1567  * NOTE: In short, this API is difficult to use safely. I recommend you
1568  * use other ways if you have. We lack experiences to use this API.
1569  * Please report your problem related on it.
1570  *
1571  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1572  * for a short running `func()'. Be sure to benchmark and use this
1573  * mechanism when `func()' consumes enough time.
1574  *
1575  * Safe C API:
1576  * * rb_thread_interrupted() - check interrupt flag
1577  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1578  * they will work without GVL, and may acquire GVL when GC is needed.
1579  */
1580 void *
1581 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1582  rb_unblock_function_t *ubf, void *data2)
1583 {
1584  return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1585 }
1586 
1587 void *
1588 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1589  rb_unblock_function_t *ubf, void *data2)
1590 {
1591  return rb_nogvl(func, data1, ubf, data2, 0);
1592 }
1593 
1594 VALUE
1596 {
1597  volatile VALUE val = Qundef; /* shouldn't be used */
1598  rb_execution_context_t * volatile ec = GET_EC();
1599  volatile int saved_errno = 0;
1600  enum ruby_tag_type state;
1601  struct waiting_fd wfd;
1602 
1603  wfd.fd = fd;
1604  wfd.th = rb_ec_thread_ptr(ec);
1605  list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &wfd.wfd_node);
1606 
1607  EC_PUSH_TAG(ec);
1608  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1609  BLOCKING_REGION(wfd.th, {
1610  val = func(data1);
1611  saved_errno = errno;
1612  }, ubf_select, wfd.th, FALSE);
1613  }
1614  EC_POP_TAG();
1615 
1616  /*
1617  * must be deleted before jump
1618  * this will delete either from waiting_fds or on-stack LIST_HEAD(busy)
1619  */
1620  list_del(&wfd.wfd_node);
1621 
1622  if (state) {
1623  EC_JUMP_TAG(ec, state);
1624  }
1625  /* TODO: check func() */
1627 
1628  errno = saved_errno;
1629 
1630  return val;
1631 }
1632 
1633 /*
1634  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1635  *
1636  * After releasing GVL using
1637  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1638  * methods. If you need to access Ruby you must use this function
1639  * rb_thread_call_with_gvl().
1640  *
1641  * This function rb_thread_call_with_gvl() does:
1642  * (1) acquire GVL.
1643  * (2) call passed function `func'.
1644  * (3) release GVL.
1645  * (4) return a value which is returned at (2).
1646  *
1647  * NOTE: You should not return Ruby object at (2) because such Object
1648  * will not be marked.
1649  *
1650  * NOTE: If an exception is raised in `func', this function DOES NOT
1651  * protect (catch) the exception. If you have any resources
1652  * which should free before throwing exception, you need use
1653  * rb_protect() in `func' and return a value which represents
1654  * exception was raised.
1655  *
1656  * NOTE: This function should not be called by a thread which was not
1657  * created as Ruby thread (created by Thread.new or so). In other
1658  * words, this function *DOES NOT* associate or convert a NON-Ruby
1659  * thread to a Ruby thread.
1660  */
1661 void *
1662 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1663 {
1664  rb_thread_t *th = ruby_thread_from_native();
1665  struct rb_blocking_region_buffer *brb;
1666  struct rb_unblock_callback prev_unblock;
1667  void *r;
1668 
1669  if (th == 0) {
1670  /* Error has occurred, but we can't use rb_bug()
1671  * because this thread is not Ruby's thread.
1672  * What should we do?
1673  */
1674 
1675  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1676  exit(EXIT_FAILURE);
1677  }
1678 
1680  prev_unblock = th->unblock;
1681 
1682  if (brb == 0) {
1683  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1684  }
1685 
1686  blocking_region_end(th, brb);
1687  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1688  r = (*func)(data1);
1689  /* leave from Ruby world: You can not access Ruby values, etc. */
1690  int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1691  RUBY_ASSERT_ALWAYS(released);
1692  return r;
1693 }
1694 
1695 /*
1696  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1697  *
1698  ***
1699  *** This API is EXPERIMENTAL!
1700  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1701  ***
1702  */
1703 
1704 int
1706 {
1707  rb_thread_t *th = ruby_thread_from_native();
1708 
1709  if (th && th->blocking_region_buffer == 0) {
1710  return 1;
1711  }
1712  else {
1713  return 0;
1714  }
1715 }
1716 
1717 /*
1718  * call-seq:
1719  * Thread.pass -> nil
1720  *
1721  * Give the thread scheduler a hint to pass execution to another thread.
1722  * A running thread may or may not switch, it depends on OS and processor.
1723  */
1724 
1725 static VALUE
1726 thread_s_pass(VALUE klass)
1727 {
1729  return Qnil;
1730 }
1731 
1732 /*****************************************************/
1733 
1734 /*
1735  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1736  *
1737  * Async events such as an exception thrown by Thread#raise,
1738  * Thread#kill and thread termination (after main thread termination)
1739  * will be queued to th->pending_interrupt_queue.
1740  * - clear: clear the queue.
1741  * - enque: enqueue err object into queue.
1742  * - deque: dequeue err object from queue.
1743  * - active_p: return 1 if the queue should be checked.
1744  *
1745  * All rb_threadptr_pending_interrupt_* functions are called by
1746  * a GVL acquired thread, of course.
1747  * Note that all "rb_" prefix APIs need GVL to call.
1748  */
1749 
1750 void
1752 {
1754 }
1755 
1756 void
1758 {
1761 }
1762 
1763 static void
1764 threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1765 {
1766  if (!th->pending_interrupt_queue) {
1767  rb_raise(rb_eThreadError, "uninitialized thread");
1768  }
1769 }
1770 
1776 };
1777 
1778 static enum handle_interrupt_timing
1779 rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1780 {
1781  VALUE mask;
1782  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1783  const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1784  VALUE mod;
1785  long i;
1786 
1787  for (i=0; i<mask_stack_len; i++) {
1788  mask = mask_stack[mask_stack_len-(i+1)];
1789 
1790  for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1791  VALUE klass = mod;
1792  VALUE sym;
1793 
1794  if (BUILTIN_TYPE(mod) == T_ICLASS) {
1795  klass = RBASIC(mod)->klass;
1796  }
1797  else if (mod != RCLASS_ORIGIN(mod)) {
1798  continue;
1799  }
1800 
1801  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1802  if (sym == sym_immediate) {
1803  return INTERRUPT_IMMEDIATE;
1804  }
1805  else if (sym == sym_on_blocking) {
1806  return INTERRUPT_ON_BLOCKING;
1807  }
1808  else if (sym == sym_never) {
1809  return INTERRUPT_NEVER;
1810  }
1811  else {
1812  rb_raise(rb_eThreadError, "unknown mask signature");
1813  }
1814  }
1815  }
1816  /* try to next mask */
1817  }
1818  return INTERRUPT_NONE;
1819 }
1820 
1821 static int
1822 rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
1823 {
1824  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1825 }
1826 
1827 static int
1828 rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
1829 {
1830  int i;
1831  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1833  if (rb_class_inherited_p(e, err)) {
1834  return TRUE;
1835  }
1836  }
1837  return FALSE;
1838 }
1839 
1840 static VALUE
1841 rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
1842 {
1843 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1844  int i;
1845 
1846  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1848 
1849  enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
1850 
1851  switch (mask_timing) {
1852  case INTERRUPT_ON_BLOCKING:
1853  if (timing != INTERRUPT_ON_BLOCKING) {
1854  break;
1855  }
1856  /* fall through */
1857  case INTERRUPT_NONE: /* default: IMMEDIATE */
1858  case INTERRUPT_IMMEDIATE:
1860  return err;
1861  case INTERRUPT_NEVER:
1862  break;
1863  }
1864  }
1865 
1867  return Qundef;
1868 #else
1870  if (rb_threadptr_pending_interrupt_empty_p(th)) {
1872  }
1873  return err;
1874 #endif
1875 }
1876 
1877 static int
1878 threadptr_pending_interrupt_active_p(rb_thread_t *th)
1879 {
1880  /*
1881  * For optimization, we don't check async errinfo queue
1882  * if the queue and the thread interrupt mask were not changed
1883  * since last check.
1884  */
1886  return 0;
1887  }
1888 
1889  if (rb_threadptr_pending_interrupt_empty_p(th)) {
1890  return 0;
1891  }
1892 
1893  return 1;
1894 }
1895 
1896 static int
1897 handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
1898 {
1899  VALUE *maskp = (VALUE *)args;
1900 
1901  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1902  rb_raise(rb_eArgError, "unknown mask signature");
1903  }
1904 
1905  if (!*maskp) {
1906  *maskp = rb_ident_hash_new();
1907  }
1908  rb_hash_aset(*maskp, key, val);
1909 
1910  return ST_CONTINUE;
1911 }
1912 
1913 /*
1914  * call-seq:
1915  * Thread.handle_interrupt(hash) { ... } -> result of the block
1916  *
1917  * Changes asynchronous interrupt timing.
1918  *
1919  * _interrupt_ means asynchronous event and corresponding procedure
1920  * by Thread#raise, Thread#kill, signal trap (not supported yet)
1921  * and main thread termination (if main thread terminates, then all
1922  * other thread will be killed).
1923  *
1924  * The given +hash+ has pairs like <code>ExceptionClass =>
1925  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1926  * the given block. The TimingSymbol can be one of the following symbols:
1927  *
1928  * [+:immediate+] Invoke interrupts immediately.
1929  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1930  * [+:never+] Never invoke all interrupts.
1931  *
1932  * _BlockingOperation_ means that the operation will block the calling thread,
1933  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1934  * operation executed without GVL.
1935  *
1936  * Masked asynchronous interrupts are delayed until they are enabled.
1937  * This method is similar to sigprocmask(3).
1938  *
1939  * === NOTE
1940  *
1941  * Asynchronous interrupts are difficult to use.
1942  *
1943  * If you need to communicate between threads, please consider to use another way such as Queue.
1944  *
1945  * Or use them with deep understanding about this method.
1946  *
1947  * === Usage
1948  *
1949  * In this example, we can guard from Thread#raise exceptions.
1950  *
1951  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1952  * ignored in the first block of the main thread. In the second
1953  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1954  *
1955  * th = Thread.new do
1956  * Thread.handle_interrupt(RuntimeError => :never) {
1957  * begin
1958  * # You can write resource allocation code safely.
1959  * Thread.handle_interrupt(RuntimeError => :immediate) {
1960  * # ...
1961  * }
1962  * ensure
1963  * # You can write resource deallocation code safely.
1964  * end
1965  * }
1966  * end
1967  * Thread.pass
1968  * # ...
1969  * th.raise "stop"
1970  *
1971  * While we are ignoring the RuntimeError exception, it's safe to write our
1972  * resource allocation code. Then, the ensure block is where we can safely
1973  * deallocate your resources.
1974  *
1975  * ==== Guarding from Timeout::Error
1976  *
1977  * In the next example, we will guard from the Timeout::Error exception. This
1978  * will help prevent from leaking resources when Timeout::Error exceptions occur
1979  * during normal ensure clause. For this example we use the help of the
1980  * standard library Timeout, from lib/timeout.rb
1981  *
1982  * require 'timeout'
1983  * Thread.handle_interrupt(Timeout::Error => :never) {
1984  * timeout(10){
1985  * # Timeout::Error doesn't occur here
1986  * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
1987  * # possible to be killed by Timeout::Error
1988  * # while blocking operation
1989  * }
1990  * # Timeout::Error doesn't occur here
1991  * }
1992  * }
1993  *
1994  * In the first part of the +timeout+ block, we can rely on Timeout::Error being
1995  * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
1996  * operation that will block the calling thread is susceptible to a
1997  * Timeout::Error exception being raised.
1998  *
1999  * ==== Stack control settings
2000  *
2001  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2002  * to control more than one ExceptionClass and TimingSymbol at a time.
2003  *
2004  * Thread.handle_interrupt(FooError => :never) {
2005  * Thread.handle_interrupt(BarError => :never) {
2006  * # FooError and BarError are prohibited.
2007  * }
2008  * }
2009  *
2010  * ==== Inheritance with ExceptionClass
2011  *
2012  * All exceptions inherited from the ExceptionClass parameter will be considered.
2013  *
2014  * Thread.handle_interrupt(Exception => :never) {
2015  * # all exceptions inherited from Exception are prohibited.
2016  * }
2017  *
2018  */
2019 static VALUE
2020 rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2021 {
2022  VALUE mask;
2023  rb_execution_context_t * volatile ec = GET_EC();
2024  rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2025  volatile VALUE r = Qnil;
2026  enum ruby_tag_type state;
2027 
2028  if (!rb_block_given_p()) {
2029  rb_raise(rb_eArgError, "block is needed.");
2030  }
2031 
2032  mask = 0;
2033  mask_arg = rb_to_hash_type(mask_arg);
2034  rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2035  if (!mask) {
2036  return rb_yield(Qnil);
2037  }
2040  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2043  }
2044 
2045  EC_PUSH_TAG(th->ec);
2046  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2047  r = rb_yield(Qnil);
2048  }
2049  EC_POP_TAG();
2050 
2052  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2055  }
2056 
2057  RUBY_VM_CHECK_INTS(th->ec);
2058 
2059  if (state) {
2060  EC_JUMP_TAG(th->ec, state);
2061  }
2062 
2063  return r;
2064 }
2065 
2066 /*
2067  * call-seq:
2068  * target_thread.pending_interrupt?(error = nil) -> true/false
2069  *
2070  * Returns whether or not the asynchronous queue is empty for the target thread.
2071  *
2072  * If +error+ is given, then check only for +error+ type deferred events.
2073  *
2074  * See ::pending_interrupt? for more information.
2075  */
2076 static VALUE
2077 rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2078 {
2079  rb_thread_t *target_th = rb_thread_ptr(target_thread);
2080 
2081  if (!target_th->pending_interrupt_queue) {
2082  return Qfalse;
2083  }
2084  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2085  return Qfalse;
2086  }
2087  if (rb_check_arity(argc, 0, 1)) {
2088  VALUE err = argv[0];
2089  if (!rb_obj_is_kind_of(err, rb_cModule)) {
2090  rb_raise(rb_eTypeError, "class or module required for rescue clause");
2091  }
2092  if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
2093  return Qtrue;
2094  }
2095  else {
2096  return Qfalse;
2097  }
2098  }
2099  else {
2100  return Qtrue;
2101  }
2102 }
2103 
2104 /*
2105  * call-seq:
2106  * Thread.pending_interrupt?(error = nil) -> true/false
2107  *
2108  * Returns whether or not the asynchronous queue is empty.
2109  *
2110  * Since Thread::handle_interrupt can be used to defer asynchronous events,
2111  * this method can be used to determine if there are any deferred events.
2112  *
2113  * If you find this method returns true, then you may finish +:never+ blocks.
2114  *
2115  * For example, the following method processes deferred asynchronous events
2116  * immediately.
2117  *
2118  * def Thread.kick_interrupt_immediately
2119  * Thread.handle_interrupt(Object => :immediate) {
2120  * Thread.pass
2121  * }
2122  * end
2123  *
2124  * If +error+ is given, then check only for +error+ type deferred events.
2125  *
2126  * === Usage
2127  *
2128  * th = Thread.new{
2129  * Thread.handle_interrupt(RuntimeError => :on_blocking){
2130  * while true
2131  * ...
2132  * # reach safe point to invoke interrupt
2133  * if Thread.pending_interrupt?
2134  * Thread.handle_interrupt(Object => :immediate){}
2135  * end
2136  * ...
2137  * end
2138  * }
2139  * }
2140  * ...
2141  * th.raise # stop thread
2142  *
2143  * This example can also be written as the following, which you should use to
2144  * avoid asynchronous interrupts.
2145  *
2146  * flag = true
2147  * th = Thread.new{
2148  * Thread.handle_interrupt(RuntimeError => :on_blocking){
2149  * while true
2150  * ...
2151  * # reach safe point to invoke interrupt
2152  * break if flag == false
2153  * ...
2154  * end
2155  * }
2156  * }
2157  * ...
2158  * flag = false # stop thread
2159  */
2160 
2161 static VALUE
2162 rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2163 {
2164  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2165 }
2166 
2167 NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2168 
2169 static void
2170 rb_threadptr_to_kill(rb_thread_t *th)
2171 {
2173  th->status = THREAD_RUNNABLE;
2174  th->to_kill = 1;
2175  th->ec->errinfo = INT2FIX(TAG_FATAL);
2176  EC_JUMP_TAG(th->ec, TAG_FATAL);
2177 }
2178 
2179 static inline rb_atomic_t
2180 threadptr_get_interrupts(rb_thread_t *th)
2181 {
2182  rb_execution_context_t *ec = th->ec;
2183  rb_atomic_t interrupt;
2184  rb_atomic_t old;
2185 
2186  do {
2187  interrupt = ec->interrupt_flag;
2188  old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2189  } while (old != interrupt);
2190  return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2191 }
2192 
2195 {
2196  rb_atomic_t interrupt;
2197  int postponed_job_interrupt = 0;
2198  int ret = FALSE;
2199 
2200  if (th->ec->raised_flag) return ret;
2201 
2202  while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2203  int sig;
2204  int timer_interrupt;
2205  int pending_interrupt;
2206  int trap_interrupt;
2207 
2208  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2209  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2210  postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2211  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2212 
2213  if (postponed_job_interrupt) {
2215  }
2216 
2217  /* signal handling */
2218  if (trap_interrupt && (th == th->vm->main_thread)) {
2219  enum rb_thread_status prev_status = th->status;
2220  int sigwait_fd = rb_sigwait_fd_get(th);
2221 
2222  if (sigwait_fd >= 0) {
2223  (void)consume_communication_pipe(sigwait_fd);
2224  ruby_sigchld_handler(th->vm);
2225  rb_sigwait_fd_put(th, sigwait_fd);
2227  }
2228  th->status = THREAD_RUNNABLE;
2229  while ((sig = rb_get_next_signal()) != 0) {
2230  ret |= rb_signal_exec(th, sig);
2231  }
2232  th->status = prev_status;
2233  }
2234 
2235  /* exception from another thread */
2236  if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2237  VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2238  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
2239  ret = TRUE;
2240 
2241  if (err == Qundef) {
2242  /* no error */
2243  }
2244  else if (err == eKillSignal /* Thread#kill received */ ||
2245  err == eTerminateSignal /* Terminate thread */ ||
2246  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2247  rb_threadptr_to_kill(th);
2248  }
2249  else {
2251  /* the only special exception to be queued across thread */
2253  }
2254  /* set runnable if th was slept. */
2255  if (th->status == THREAD_STOPPED ||
2257  th->status = THREAD_RUNNABLE;
2258  rb_exc_raise(err);
2259  }
2260  }
2261 
2262  if (timer_interrupt) {
2263  uint32_t limits_us = TIME_QUANTUM_USEC;
2264 
2265  if (th->priority > 0)
2266  limits_us <<= th->priority;
2267  else
2268  limits_us >>= -th->priority;
2269 
2270  if (th->status == THREAD_RUNNABLE)
2271  th->running_time_us += TIME_QUANTUM_USEC;
2272 
2273  VM_ASSERT(th->ec->cfp);
2275  0, 0, 0, Qundef);
2276 
2277  rb_thread_schedule_limits(limits_us);
2278  }
2279  }
2280  return ret;
2281 }
2282 
2283 void
2285 {
2286  rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2287 }
2288 
2289 static void
2290 rb_threadptr_ready(rb_thread_t *th)
2291 {
2293 }
2294 
2295 static VALUE
2296 rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2297 {
2298  VALUE exc;
2299 
2300  if (rb_threadptr_dead(target_th)) {
2301  return Qnil;
2302  }
2303 
2304  if (argc == 0) {
2305  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2306  }
2307  else {
2309  }
2310 
2311  /* making an exception object can switch thread,
2312  so we need to check thread deadness again */
2313  if (rb_threadptr_dead(target_th)) {
2314  return Qnil;
2315  }
2316 
2317  rb_ec_setup_exception(GET_EC(), exc, Qundef);
2319  rb_threadptr_interrupt(target_th);
2320  return Qnil;
2321 }
2322 
2323 void
2325 {
2326  VALUE argv[2];
2327 
2328  argv[0] = rb_eSignal;
2329  argv[1] = INT2FIX(sig);
2330  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2331 }
2332 
2333 void
2335 {
2336  VALUE argv[2];
2337 
2338  argv[0] = rb_eSystemExit;
2339  argv[1] = rb_str_new2("exit");
2340  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2341 }
2342 
2343 int
2345 {
2346  if (ec->raised_flag & RAISED_EXCEPTION) {
2347  return 1;
2348  }
2350  return 0;
2351 }
2352 
2353 int
2355 {
2356  if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2357  return 0;
2358  }
2359  ec->raised_flag &= ~RAISED_EXCEPTION;
2360  return 1;
2361 }
2362 
2363 int
2364 rb_notify_fd_close(int fd, struct list_head *busy)
2365 {
2366  rb_vm_t *vm = GET_THREAD()->vm;
2367  struct waiting_fd *wfd = 0, *next;
2368 
2369  list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2370  if (wfd->fd == fd) {
2371  rb_thread_t *th = wfd->th;
2372  VALUE err;
2373 
2374  list_del(&wfd->wfd_node);
2375  list_add(busy, &wfd->wfd_node);
2376 
2380  }
2381  }
2382  return !list_empty(busy);
2383 }
2384 
2385 void
2387 {
2388  struct list_head busy;
2389 
2390  list_head_init(&busy);
2391  if (rb_notify_fd_close(fd, &busy)) {
2392  do rb_thread_schedule(); while (!list_empty(&busy));
2393  }
2394 }
2395 
2396 /*
2397  * call-seq:
2398  * thr.raise
2399  * thr.raise(string)
2400  * thr.raise(exception [, string [, array]])
2401  *
2402  * Raises an exception from the given thread. The caller does not have to be
2403  * +thr+. See Kernel#raise for more information.
2404  *
2405  * Thread.abort_on_exception = true
2406  * a = Thread.new { sleep(200) }
2407  * a.raise("Gotcha")
2408  *
2409  * This will produce:
2410  *
2411  * prog.rb:3: Gotcha (RuntimeError)
2412  * from prog.rb:2:in `initialize'
2413  * from prog.rb:2:in `new'
2414  * from prog.rb:2
2415  */
2416 
2417 static VALUE
2418 thread_raise_m(int argc, VALUE *argv, VALUE self)
2419 {
2420  rb_thread_t *target_th = rb_thread_ptr(self);
2421  const rb_thread_t *current_th = GET_THREAD();
2422 
2423  threadptr_check_pending_interrupt_queue(target_th);
2424  rb_threadptr_raise(target_th, argc, argv);
2425 
2426  /* To perform Thread.current.raise as Kernel.raise */
2427  if (current_th == target_th) {
2428  RUBY_VM_CHECK_INTS(target_th->ec);
2429  }
2430  return Qnil;
2431 }
2432 
2433 
2434 /*
2435  * call-seq:
2436  * thr.exit -> thr
2437  * thr.kill -> thr
2438  * thr.terminate -> thr
2439  *
2440  * Terminates +thr+ and schedules another thread to be run, returning
2441  * the terminated Thread. If this is the main thread, or the last
2442  * thread, exits the process.
2443  */
2444 
2445 VALUE
2447 {
2448  rb_thread_t *th = rb_thread_ptr(thread);
2449 
2450  if (th->to_kill || th->status == THREAD_KILLED) {
2451  return thread;
2452  }
2453  if (th == th->vm->main_thread) {
2455  }
2456 
2457  thread_debug("rb_thread_kill: %p (%"PRI_THREAD_ID")\n", (void *)th, thread_id_str(th));
2458 
2459  if (th == GET_THREAD()) {
2460  /* kill myself immediately */
2461  rb_threadptr_to_kill(th);
2462  }
2463  else {
2464  threadptr_check_pending_interrupt_queue(th);
2467  }
2468  return thread;
2469 }
2470 
2471 int
2473 {
2474  rb_thread_t *th = rb_thread_ptr(thread);
2475 
2476  if (th->to_kill || th->status == THREAD_KILLED) {
2477  return TRUE;
2478  }
2479  return FALSE;
2480 }
2481 
2482 /*
2483  * call-seq:
2484  * Thread.kill(thread) -> thread
2485  *
2486  * Causes the given +thread+ to exit, see also Thread::exit.
2487  *
2488  * count = 0
2489  * a = Thread.new { loop { count += 1 } }
2490  * sleep(0.1) #=> 0
2491  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2492  * count #=> 93947
2493  * a.alive? #=> false
2494  */
2495 
2496 static VALUE
2497 rb_thread_s_kill(VALUE obj, VALUE th)
2498 {
2499  return rb_thread_kill(th);
2500 }
2501 
2502 
2503 /*
2504  * call-seq:
2505  * Thread.exit -> thread
2506  *
2507  * Terminates the currently running thread and schedules another thread to be
2508  * run.
2509  *
2510  * If this thread is already marked to be killed, ::exit returns the Thread.
2511  *
2512  * If this is the main thread, or the last thread, exit the process.
2513  */
2514 
2515 static VALUE
2516 rb_thread_exit(VALUE _)
2517 {
2518  rb_thread_t *th = GET_THREAD();
2519  return rb_thread_kill(th->self);
2520 }
2521 
2522 
2523 /*
2524  * call-seq:
2525  * thr.wakeup -> thr
2526  *
2527  * Marks a given thread as eligible for scheduling, however it may still
2528  * remain blocked on I/O.
2529  *
2530  * *Note:* This does not invoke the scheduler, see #run for more information.
2531  *
2532  * c = Thread.new { Thread.stop; puts "hey!" }
2533  * sleep 0.1 while c.status!='sleep'
2534  * c.wakeup
2535  * c.join
2536  * #=> "hey!"
2537  */
2538 
2539 VALUE
2541 {
2542  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2543  rb_raise(rb_eThreadError, "killed thread");
2544  }
2545  return thread;
2546 }
2547 
2548 VALUE
2550 {
2551  rb_thread_t *target_th = rb_thread_ptr(thread);
2552  if (target_th->status == THREAD_KILLED) return Qnil;
2553 
2554  rb_threadptr_ready(target_th);
2555 
2556  if (target_th->status == THREAD_STOPPED ||
2557  target_th->status == THREAD_STOPPED_FOREVER) {
2558  target_th->status = THREAD_RUNNABLE;
2559  }
2560 
2561  return thread;
2562 }
2563 
2564 
2565 /*
2566  * call-seq:
2567  * thr.run -> thr
2568  *
2569  * Wakes up +thr+, making it eligible for scheduling.
2570  *
2571  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2572  * sleep 0.1 while a.status!='sleep'
2573  * puts "Got here"
2574  * a.run
2575  * a.join
2576  *
2577  * This will produce:
2578  *
2579  * a
2580  * Got here
2581  * c
2582  *
2583  * See also the instance method #wakeup.
2584  */
2585 
2586 VALUE
2588 {
2589  rb_thread_wakeup(thread);
2591  return thread;
2592 }
2593 
2594 
2595 VALUE
2597 {
2598  if (rb_thread_alone()) {
2600  "stopping only thread\n\tnote: use sleep to stop forever");
2601  }
2603  return Qnil;
2604 }
2605 
2606 /*
2607  * call-seq:
2608  * Thread.stop -> nil
2609  *
2610  * Stops execution of the current thread, putting it into a ``sleep'' state,
2611  * and schedules execution of another thread.
2612  *
2613  * a = Thread.new { print "a"; Thread.stop; print "c" }
2614  * sleep 0.1 while a.status!='sleep'
2615  * print "b"
2616  * a.run
2617  * a.join
2618  * #=> "abc"
2619  */
2620 
2621 static VALUE
2622 thread_stop(VALUE _)
2623 {
2624  return rb_thread_stop();
2625 }
2626 
2627 /********************************************************************/
2628 
2629 VALUE
2631 {
2632  VALUE ary = rb_ary_new();
2633  rb_vm_t *vm = GET_THREAD()->vm;
2634  rb_thread_t *th = 0;
2635 
2636  list_for_each(&vm->living_threads, th, vmlt_node) {
2637  switch (th->status) {
2638  case THREAD_RUNNABLE:
2639  case THREAD_STOPPED:
2641  rb_ary_push(ary, th->self);
2642  default:
2643  break;
2644  }
2645  }
2646  return ary;
2647 }
2648 
2649 /*
2650  * call-seq:
2651  * Thread.list -> array
2652  *
2653  * Returns an array of Thread objects for all threads that are either runnable
2654  * or stopped.
2655  *
2656  * Thread.new { sleep(200) }
2657  * Thread.new { 1000000.times {|i| i*i } }
2658  * Thread.new { Thread.stop }
2659  * Thread.list.each {|t| p t}
2660  *
2661  * This will produce:
2662  *
2663  * #<Thread:0x401b3e84 sleep>
2664  * #<Thread:0x401b3f38 run>
2665  * #<Thread:0x401b3fb0 sleep>
2666  * #<Thread:0x401bdf4c run>
2667  */
2668 
2669 static VALUE
2670 thread_list(VALUE _)
2671 {
2672  return rb_thread_list();
2673 }
2674 
2675 VALUE
2677 {
2678  return GET_THREAD()->self;
2679 }
2680 
2681 /*
2682  * call-seq:
2683  * Thread.current -> thread
2684  *
2685  * Returns the currently executing thread.
2686  *
2687  * Thread.current #=> #<Thread:0x401bdf4c run>
2688  */
2689 
2690 static VALUE
2691 thread_s_current(VALUE klass)
2692 {
2693  return rb_thread_current();
2694 }
2695 
2696 VALUE
2698 {
2699  return GET_THREAD()->vm->main_thread->self;
2700 }
2701 
2702 /*
2703  * call-seq:
2704  * Thread.main -> thread
2705  *
2706  * Returns the main thread.
2707  */
2708 
2709 static VALUE
2710 rb_thread_s_main(VALUE klass)
2711 {
2712  return rb_thread_main();
2713 }
2714 
2715 
2716 /*
2717  * call-seq:
2718  * Thread.abort_on_exception -> true or false
2719  *
2720  * Returns the status of the global ``abort on exception'' condition.
2721  *
2722  * The default is +false+.
2723  *
2724  * When set to +true+, if any thread is aborted by an exception, the
2725  * raised exception will be re-raised in the main thread.
2726  *
2727  * Can also be specified by the global $DEBUG flag or command line option
2728  * +-d+.
2729  *
2730  * See also ::abort_on_exception=.
2731  *
2732  * There is also an instance level method to set this for a specific thread,
2733  * see #abort_on_exception.
2734  */
2735 
2736 static VALUE
2737 rb_thread_s_abort_exc(VALUE _)
2738 {
2739  return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
2740 }
2741 
2742 
2743 /*
2744  * call-seq:
2745  * Thread.abort_on_exception= boolean -> true or false
2746  *
2747  * When set to +true+, if any thread is aborted by an exception, the
2748  * raised exception will be re-raised in the main thread.
2749  * Returns the new state.
2750  *
2751  * Thread.abort_on_exception = true
2752  * t1 = Thread.new do
2753  * puts "In new thread"
2754  * raise "Exception from thread"
2755  * end
2756  * sleep(1)
2757  * puts "not reached"
2758  *
2759  * This will produce:
2760  *
2761  * In new thread
2762  * prog.rb:4: Exception from thread (RuntimeError)
2763  * from prog.rb:2:in `initialize'
2764  * from prog.rb:2:in `new'
2765  * from prog.rb:2
2766  *
2767  * See also ::abort_on_exception.
2768  *
2769  * There is also an instance level method to set this for a specific thread,
2770  * see #abort_on_exception=.
2771  */
2772 
2773 static VALUE
2774 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
2775 {
2776  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
2777  return val;
2778 }
2779 
2780 
2781 /*
2782  * call-seq:
2783  * thr.abort_on_exception -> true or false
2784  *
2785  * Returns the status of the thread-local ``abort on exception'' condition for
2786  * this +thr+.
2787  *
2788  * The default is +false+.
2789  *
2790  * See also #abort_on_exception=.
2791  *
2792  * There is also a class level method to set this for all threads, see
2793  * ::abort_on_exception.
2794  */
2795 
2796 static VALUE
2797 rb_thread_abort_exc(VALUE thread)
2798 {
2799  return rb_thread_ptr(thread)->abort_on_exception ? Qtrue : Qfalse;
2800 }
2801 
2802 
2803 /*
2804  * call-seq:
2805  * thr.abort_on_exception= boolean -> true or false
2806  *
2807  * When set to +true+, if this +thr+ is aborted by an exception, the
2808  * raised exception will be re-raised in the main thread.
2809  *
2810  * See also #abort_on_exception.
2811  *
2812  * There is also a class level method to set this for all threads, see
2813  * ::abort_on_exception=.
2814  */
2815 
2816 static VALUE
2817 rb_thread_abort_exc_set(VALUE thread, VALUE val)
2818 {
2819  rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
2820  return val;
2821 }
2822 
2823 
2824 /*
2825  * call-seq:
2826  * Thread.report_on_exception -> true or false
2827  *
2828  * Returns the status of the global ``report on exception'' condition.
2829  *
2830  * The default is +true+ since Ruby 2.5.
2831  *
2832  * All threads created when this flag is true will report
2833  * a message on $stderr if an exception kills the thread.
2834  *
2835  * Thread.new { 1.times { raise } }
2836  *
2837  * will produce this output on $stderr:
2838  *
2839  * #<Thread:...> terminated with exception (report_on_exception is true):
2840  * Traceback (most recent call last):
2841  * 2: from -e:1:in `block in <main>'
2842  * 1: from -e:1:in `times'
2843  *
2844  * This is done to catch errors in threads early.
2845  * In some cases, you might not want this output.
2846  * There are multiple ways to avoid the extra output:
2847  *
2848  * * If the exception is not intended, the best is to fix the cause of
2849  * the exception so it does not happen anymore.
2850  * * If the exception is intended, it might be better to rescue it closer to
2851  * where it is raised rather then let it kill the Thread.
2852  * * If it is guaranteed the Thread will be joined with Thread#join or
2853  * Thread#value, then it is safe to disable this report with
2854  * <code>Thread.current.report_on_exception = false</code>
2855  * when starting the Thread.
2856  * However, this might handle the exception much later, or not at all
2857  * if the Thread is never joined due to the parent thread being blocked, etc.
2858  *
2859  * See also ::report_on_exception=.
2860  *
2861  * There is also an instance level method to set this for a specific thread,
2862  * see #report_on_exception=.
2863  *
2864  */
2865 
2866 static VALUE
2867 rb_thread_s_report_exc(VALUE _)
2868 {
2869  return GET_THREAD()->vm->thread_report_on_exception ? Qtrue : Qfalse;
2870 }
2871 
2872 
2873 /*
2874  * call-seq:
2875  * Thread.report_on_exception= boolean -> true or false
2876  *
2877  * Returns the new state.
2878  * When set to +true+, all threads created afterwards will inherit the
2879  * condition and report a message on $stderr if an exception kills a thread:
2880  *
2881  * Thread.report_on_exception = true
2882  * t1 = Thread.new do
2883  * puts "In new thread"
2884  * raise "Exception from thread"
2885  * end
2886  * sleep(1)
2887  * puts "In the main thread"
2888  *
2889  * This will produce:
2890  *
2891  * In new thread
2892  * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
2893  * Traceback (most recent call last):
2894  * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
2895  * In the main thread
2896  *
2897  * See also ::report_on_exception.
2898  *
2899  * There is also an instance level method to set this for a specific thread,
2900  * see #report_on_exception=.
2901  */
2902 
2903 static VALUE
2904 rb_thread_s_report_exc_set(VALUE self, VALUE val)
2905 {
2906  GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
2907  return val;
2908 }
2909 
2910 
2911 /*
2912  * call-seq:
2913  * thr.report_on_exception -> true or false
2914  *
2915  * Returns the status of the thread-local ``report on exception'' condition for
2916  * this +thr+.
2917  *
2918  * The default value when creating a Thread is the value of
2919  * the global flag Thread.report_on_exception.
2920  *
2921  * See also #report_on_exception=.
2922  *
2923  * There is also a class level method to set this for all new threads, see
2924  * ::report_on_exception=.
2925  */
2926 
2927 static VALUE
2928 rb_thread_report_exc(VALUE thread)
2929 {
2930  return rb_thread_ptr(thread)->report_on_exception ? Qtrue : Qfalse;
2931 }
2932 
2933 
2934 /*
2935  * call-seq:
2936  * thr.report_on_exception= boolean -> true or false
2937  *
2938  * When set to +true+, a message is printed on $stderr if an exception
2939  * kills this +thr+. See ::report_on_exception for details.
2940  *
2941  * See also #report_on_exception.
2942  *
2943  * There is also a class level method to set this for all new threads, see
2944  * ::report_on_exception=.
2945  */
2946 
2947 static VALUE
2948 rb_thread_report_exc_set(VALUE thread, VALUE val)
2949 {
2950  rb_thread_ptr(thread)->report_on_exception = RTEST(val);
2951  return val;
2952 }
2953 
2954 
2955 /*
2956  * call-seq:
2957  * thr.group -> thgrp or nil
2958  *
2959  * Returns the ThreadGroup which contains the given thread, or returns +nil+
2960  * if +thr+ is not a member of any group.
2961  *
2962  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2963  */
2964 
2965 VALUE
2967 {
2968  VALUE group = rb_thread_ptr(thread)->thgroup;
2969  return group == 0 ? Qnil : group;
2970 }
2971 
2972 static const char *
2973 thread_status_name(rb_thread_t *th, int detail)
2974 {
2975  switch (th->status) {
2976  case THREAD_RUNNABLE:
2977  return th->to_kill ? "aborting" : "run";
2979  if (detail) return "sleep_forever";
2980  case THREAD_STOPPED:
2981  return "sleep";
2982  case THREAD_KILLED:
2983  return "dead";
2984  default:
2985  return "unknown";
2986  }
2987 }
2988 
2989 static int
2990 rb_threadptr_dead(rb_thread_t *th)
2991 {
2992  return th->status == THREAD_KILLED;
2993 }
2994 
2995 
2996 /*
2997  * call-seq:
2998  * thr.status -> string, false or nil
2999  *
3000  * Returns the status of +thr+.
3001  *
3002  * [<tt>"sleep"</tt>]
3003  * Returned if this thread is sleeping or waiting on I/O
3004  * [<tt>"run"</tt>]
3005  * When this thread is executing
3006  * [<tt>"aborting"</tt>]
3007  * If this thread is aborting
3008  * [+false+]
3009  * When this thread is terminated normally
3010  * [+nil+]
3011  * If terminated with an exception.
3012  *
3013  * a = Thread.new { raise("die now") }
3014  * b = Thread.new { Thread.stop }
3015  * c = Thread.new { Thread.exit }
3016  * d = Thread.new { sleep }
3017  * d.kill #=> #<Thread:0x401b3678 aborting>
3018  * a.status #=> nil
3019  * b.status #=> "sleep"
3020  * c.status #=> false
3021  * d.status #=> "aborting"
3022  * Thread.current.status #=> "run"
3023  *
3024  * See also the instance methods #alive? and #stop?
3025  */
3026 
3027 static VALUE
3028 rb_thread_status(VALUE thread)
3029 {
3030  rb_thread_t *target_th = rb_thread_ptr(thread);
3031 
3032  if (rb_threadptr_dead(target_th)) {
3033  if (!NIL_P(target_th->ec->errinfo) &&
3034  !FIXNUM_P(target_th->ec->errinfo)) {
3035  return Qnil;
3036  }
3037  else {
3038  return Qfalse;
3039  }
3040  }
3041  else {
3042  return rb_str_new2(thread_status_name(target_th, FALSE));
3043  }
3044 }
3045 
3046 
3047 /*
3048  * call-seq:
3049  * thr.alive? -> true or false
3050  *
3051  * Returns +true+ if +thr+ is running or sleeping.
3052  *
3053  * thr = Thread.new { }
3054  * thr.join #=> #<Thread:0x401b3fb0 dead>
3055  * Thread.current.alive? #=> true
3056  * thr.alive? #=> false
3057  *
3058  * See also #stop? and #status.
3059  */
3060 
3061 static VALUE
3062 rb_thread_alive_p(VALUE thread)
3063 {
3064  if (rb_threadptr_dead(rb_thread_ptr(thread))) {
3065  return Qfalse;
3066  }
3067  else {
3068  return Qtrue;
3069  }
3070 }
3071 
3072 /*
3073  * call-seq:
3074  * thr.stop? -> true or false
3075  *
3076  * Returns +true+ if +thr+ is dead or sleeping.
3077  *
3078  * a = Thread.new { Thread.stop }
3079  * b = Thread.current
3080  * a.stop? #=> true
3081  * b.stop? #=> false
3082  *
3083  * See also #alive? and #status.
3084  */
3085 
3086 static VALUE
3087 rb_thread_stop_p(VALUE thread)
3088 {
3089  rb_thread_t *th = rb_thread_ptr(thread);
3090 
3091  if (rb_threadptr_dead(th)) {
3092  return Qtrue;
3093  }
3094  else if (th->status == THREAD_STOPPED ||
3095  th->status == THREAD_STOPPED_FOREVER) {
3096  return Qtrue;
3097  }
3098  else {
3099  return Qfalse;
3100  }
3101 }
3102 
3103 /*
3104  * call-seq:
3105  * thr.safe_level -> integer
3106  *
3107  * Returns the safe level.
3108  *
3109  * This method is obsolete because $SAFE is a process global state.
3110  * Simply check $SAFE.
3111  */
3112 
3113 static VALUE
3114 rb_thread_safe_level(VALUE thread)
3115 {
3116  rb_warn("Thread#safe_level will be removed in Ruby 3.0");
3117  return UINT2NUM(GET_VM()->safe_level_);
3118 }
3119 
3120 /*
3121  * call-seq:
3122  * thr.name -> string
3123  *
3124  * show the name of the thread.
3125  */
3126 
3127 static VALUE
3128 rb_thread_getname(VALUE thread)
3129 {
3130  return rb_thread_ptr(thread)->name;
3131 }
3132 
3133 /*
3134  * call-seq:
3135  * thr.name=(name) -> string
3136  *
3137  * set given name to the ruby thread.
3138  * On some platform, it may set the name to pthread and/or kernel.
3139  */
3140 
3141 static VALUE
3142 rb_thread_setname(VALUE thread, VALUE name)
3143 {
3144  rb_thread_t *target_th = rb_thread_ptr(thread);
3145 
3146  if (!NIL_P(name)) {
3147  rb_encoding *enc;
3149  enc = rb_enc_get(name);
3150  if (!rb_enc_asciicompat(enc)) {
3151  rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3152  rb_enc_name(enc));
3153  }
3155  }
3156  target_th->name = name;
3157  if (threadptr_initialized(target_th)) {
3158  native_set_another_thread_name(target_th->thread_id, name);
3159  }
3160  return name;
3161 }
3162 
3163 /*
3164  * call-seq:
3165  * thr.to_s -> string
3166  *
3167  * Dump the name, id, and status of _thr_ to a string.
3168  */
3169 
3170 static VALUE
3171 rb_thread_to_s(VALUE thread)
3172 {
3173  VALUE cname = rb_class_path(rb_obj_class(thread));
3174  rb_thread_t *target_th = rb_thread_ptr(thread);
3175  const char *status;
3176  VALUE str, loc;
3177 
3178  status = thread_status_name(target_th, TRUE);
3179  str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3180  if (!NIL_P(target_th->name)) {
3181  rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3182  }
3183  if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3185  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3186  rb_gc_force_recycle(loc);
3187  }
3188  rb_str_catf(str, " %s>", status);
3189 
3190  return str;
3191 }
3192 
3193 /* variables for recursive traversals */
3194 static ID recursive_key;
3195 
3196 static VALUE
3197 threadptr_local_aref(rb_thread_t *th, ID id)
3198 {
3199  if (id == recursive_key) {
3200  return th->ec->local_storage_recursive_hash;
3201  }
3202  else {
3203  st_data_t val;
3204  st_table *local_storage = th->ec->local_storage;
3205 
3206  if (local_storage != NULL && st_lookup(local_storage, id, &val)) {
3207  return (VALUE)val;
3208  }
3209  else {
3210  return Qnil;
3211  }
3212  }
3213 }
3214 
3215 VALUE
3217 {
3218  return threadptr_local_aref(rb_thread_ptr(thread), id);
3219 }
3220 
3221 /*
3222  * call-seq:
3223  * thr[sym] -> obj or nil
3224  *
3225  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3226  * if not explicitly inside a Fiber), using either a symbol or a string name.
3227  * If the specified variable does not exist, returns +nil+.
3228  *
3229  * [
3230  * Thread.new { Thread.current["name"] = "A" },
3231  * Thread.new { Thread.current[:name] = "B" },
3232  * Thread.new { Thread.current["name"] = "C" }
3233  * ].each do |th|
3234  * th.join
3235  * puts "#{th.inspect}: #{th[:name]}"
3236  * end
3237  *
3238  * This will produce:
3239  *
3240  * #<Thread:0x00000002a54220 dead>: A
3241  * #<Thread:0x00000002a541a8 dead>: B
3242  * #<Thread:0x00000002a54130 dead>: C
3243  *
3244  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3245  * This confusion did not exist in Ruby 1.8 because
3246  * fibers are only available since Ruby 1.9.
3247  * Ruby 1.9 chooses that the methods behaves fiber-local to save
3248  * following idiom for dynamic scope.
3249  *
3250  * def meth(newvalue)
3251  * begin
3252  * oldvalue = Thread.current[:name]
3253  * Thread.current[:name] = newvalue
3254  * yield
3255  * ensure
3256  * Thread.current[:name] = oldvalue
3257  * end
3258  * end
3259  *
3260  * The idiom may not work as dynamic scope if the methods are thread-local
3261  * and a given block switches fiber.
3262  *
3263  * f = Fiber.new {
3264  * meth(1) {
3265  * Fiber.yield
3266  * }
3267  * }
3268  * meth(2) {
3269  * f.resume
3270  * }
3271  * f.resume
3272  * p Thread.current[:name]
3273  * #=> nil if fiber-local
3274  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3275  *
3276  * For thread-local variables, please see #thread_variable_get and
3277  * #thread_variable_set.
3278  *
3279  */
3280 
3281 static VALUE
3282 rb_thread_aref(VALUE thread, VALUE key)
3283 {
3284  ID id = rb_check_id(&key);
3285  if (!id) return Qnil;
3286  return rb_thread_local_aref(thread, id);
3287 }
3288 
3289 /*
3290  * call-seq:
3291  * thr.fetch(sym) -> obj
3292  * thr.fetch(sym) { } -> obj
3293  * thr.fetch(sym, default) -> obj
3294  *
3295  * Returns a fiber-local for the given key. If the key can't be
3296  * found, there are several options: With no other arguments, it will
3297  * raise a KeyError exception; if <i>default</i> is given, then that
3298  * will be returned; if the optional code block is specified, then
3299  * that will be run and its result returned. See Thread#[] and
3300  * Hash#fetch.
3301  */
3302 static VALUE
3303 rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3304 {
3305  VALUE key, val;
3306  ID id;
3307  rb_thread_t *target_th = rb_thread_ptr(self);
3308  int block_given;
3309 
3310  rb_check_arity(argc, 1, 2);
3311  key = argv[0];
3312 
3313  block_given = rb_block_given_p();
3314  if (block_given && argc == 2) {
3315  rb_warn("block supersedes default value argument");
3316  }
3317 
3318  id = rb_check_id(&key);
3319 
3320  if (id == recursive_key) {
3321  return target_th->ec->local_storage_recursive_hash;
3322  }
3323  else if (id && target_th->ec->local_storage &&
3324  st_lookup(target_th->ec->local_storage, id, &val)) {
3325  return val;
3326  }
3327  else if (block_given) {
3328  return rb_yield(key);
3329  }
3330  else if (argc == 1) {
3331  rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3332  }
3333  else {
3334  return argv[1];
3335  }
3336 }
3337 
3338 static VALUE
3339 threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3340 {
3341  if (id == recursive_key) {
3342  th->ec->local_storage_recursive_hash = val;
3343  return val;
3344  }
3345  else {
3346  st_table *local_storage = th->ec->local_storage;
3347 
3348  if (NIL_P(val)) {
3349  if (!local_storage) return Qnil;
3350  st_delete_wrap(local_storage, id);
3351  return Qnil;
3352  }
3353  else {
3354  if (local_storage == NULL) {
3355  th->ec->local_storage = local_storage = st_init_numtable();
3356  }
3357  st_insert(local_storage, id, val);
3358  return val;
3359  }
3360  }
3361 }
3362 
3363 VALUE
3365 {
3366  if (OBJ_FROZEN(thread)) {
3367  rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3368  }
3369 
3370  return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3371 }
3372 
3373 /*
3374  * call-seq:
3375  * thr[sym] = obj -> obj
3376  *
3377  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3378  * using either a symbol or a string.
3379  *
3380  * See also Thread#[].
3381  *
3382  * For thread-local variables, please see #thread_variable_set and
3383  * #thread_variable_get.
3384  */
3385 
3386 static VALUE
3387 rb_thread_aset(VALUE self, VALUE id, VALUE val)
3388 {
3389  return rb_thread_local_aset(self, rb_to_id(id), val);
3390 }
3391 
3392 /*
3393  * call-seq:
3394  * thr.thread_variable_get(key) -> obj or nil
3395  *
3396  * Returns the value of a thread local variable that has been set. Note that
3397  * these are different than fiber local values. For fiber local values,
3398  * please see Thread#[] and Thread#[]=.
3399  *
3400  * Thread local values are carried along with threads, and do not respect
3401  * fibers. For example:
3402  *
3403  * Thread.new {
3404  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3405  * Thread.current["foo"] = "bar" # set a fiber local
3406  *
3407  * Fiber.new {
3408  * Fiber.yield [
3409  * Thread.current.thread_variable_get("foo"), # get the thread local
3410  * Thread.current["foo"], # get the fiber local
3411  * ]
3412  * }.resume
3413  * }.join.value # => ['bar', nil]
3414  *
3415  * The value "bar" is returned for the thread local, where nil is returned
3416  * for the fiber local. The fiber is executed in the same thread, so the
3417  * thread local values are available.
3418  */
3419 
3420 static VALUE
3421 rb_thread_variable_get(VALUE thread, VALUE key)
3422 {
3423  VALUE locals;
3424 
3426  return Qnil;
3427  }
3428  locals = rb_thread_local_storage(thread);
3429  return rb_hash_aref(locals, rb_to_symbol(key));
3430 }
3431 
3432 /*
3433  * call-seq:
3434  * thr.thread_variable_set(key, value)
3435  *
3436  * Sets a thread local with +key+ to +value+. Note that these are local to
3437  * threads, and not to fibers. Please see Thread#thread_variable_get and
3438  * Thread#[] for more information.
3439  */
3440 
3441 static VALUE
3442 rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
3443 {
3444  VALUE locals;
3445 
3446  if (OBJ_FROZEN(thread)) {
3447  rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3448  }
3449 
3450  locals = rb_thread_local_storage(thread);
3451  return rb_hash_aset(locals, rb_to_symbol(id), val);
3452 }
3453 
3454 /*
3455  * call-seq:
3456  * thr.key?(sym) -> true or false
3457  *
3458  * Returns +true+ if the given string (or symbol) exists as a fiber-local
3459  * variable.
3460  *
3461  * me = Thread.current
3462  * me[:oliver] = "a"
3463  * me.key?(:oliver) #=> true
3464  * me.key?(:stanley) #=> false
3465  */
3466 
3467 static VALUE
3468 rb_thread_key_p(VALUE self, VALUE key)
3469 {
3470  ID id = rb_check_id(&key);
3471  st_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3472 
3473  if (!id || local_storage == NULL) {
3474  return Qfalse;
3475  }
3476  else if (st_is_member(local_storage, id)) {
3477  return Qtrue;
3478  }
3479  else {
3480  return Qfalse;
3481  }
3482 }
3483 
3484 static int
3485 thread_keys_i(ID key, VALUE value, VALUE ary)
3486 {
3487  rb_ary_push(ary, ID2SYM(key));
3488  return ST_CONTINUE;
3489 }
3490 
3491 int
3493 {
3494  return vm_living_thread_num(GET_VM()) == 1;
3495 }
3496 
3497 /*
3498  * call-seq:
3499  * thr.keys -> array
3500  *
3501  * Returns an array of the names of the fiber-local variables (as Symbols).
3502  *
3503  * thr = Thread.new do
3504  * Thread.current[:cat] = 'meow'
3505  * Thread.current["dog"] = 'woof'
3506  * end
3507  * thr.join #=> #<Thread:0x401b3f10 dead>
3508  * thr.keys #=> [:dog, :cat]
3509  */
3510 
3511 static VALUE
3512 rb_thread_keys(VALUE self)
3513 {
3514  st_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3515  VALUE ary = rb_ary_new();
3516 
3517  if (local_storage) {
3518  st_foreach(local_storage, thread_keys_i, ary);
3519  }
3520  return ary;
3521 }
3522 
3523 static int
3524 keys_i(VALUE key, VALUE value, VALUE ary)
3525 {
3526  rb_ary_push(ary, key);
3527  return ST_CONTINUE;
3528 }
3529 
3530 /*
3531  * call-seq:
3532  * thr.thread_variables -> array
3533  *
3534  * Returns an array of the names of the thread-local variables (as Symbols).
3535  *
3536  * thr = Thread.new do
3537  * Thread.current.thread_variable_set(:cat, 'meow')
3538  * Thread.current.thread_variable_set("dog", 'woof')
3539  * end
3540  * thr.join #=> #<Thread:0x401b3f10 dead>
3541  * thr.thread_variables #=> [:dog, :cat]
3542  *
3543  * Note that these are not fiber local variables. Please see Thread#[] and
3544  * Thread#thread_variable_get for more details.
3545  */
3546 
3547 static VALUE
3548 rb_thread_variables(VALUE thread)
3549 {
3550  VALUE locals;
3551  VALUE ary;
3552 
3553  ary = rb_ary_new();
3555  return ary;
3556  }
3557  locals = rb_thread_local_storage(thread);
3558  rb_hash_foreach(locals, keys_i, ary);
3559 
3560  return ary;
3561 }
3562 
3563 /*
3564  * call-seq:
3565  * thr.thread_variable?(key) -> true or false
3566  *
3567  * Returns +true+ if the given string (or symbol) exists as a thread-local
3568  * variable.
3569  *
3570  * me = Thread.current
3571  * me.thread_variable_set(:oliver, "a")
3572  * me.thread_variable?(:oliver) #=> true
3573  * me.thread_variable?(:stanley) #=> false
3574  *
3575  * Note that these are not fiber local variables. Please see Thread#[] and
3576  * Thread#thread_variable_get for more details.
3577  */
3578 
3579 static VALUE
3580 rb_thread_variable_p(VALUE thread, VALUE key)
3581 {
3582  VALUE locals;
3583  ID id = rb_check_id(&key);
3584 
3585  if (!id) return Qfalse;
3586 
3588  return Qfalse;
3589  }
3590  locals = rb_thread_local_storage(thread);
3591 
3592  if (rb_hash_lookup(locals, ID2SYM(id)) != Qnil) {
3593  return Qtrue;
3594  }
3595  else {
3596  return Qfalse;
3597  }
3598 
3599  return Qfalse;
3600 }
3601 
3602 /*
3603  * call-seq:
3604  * thr.priority -> integer
3605  *
3606  * Returns the priority of <i>thr</i>. Default is inherited from the
3607  * current thread which creating the new thread, or zero for the
3608  * initial main thread; higher-priority thread will run more frequently
3609  * than lower-priority threads (but lower-priority threads can also run).
3610  *
3611  * This is just hint for Ruby thread scheduler. It may be ignored on some
3612  * platform.
3613  *
3614  * Thread.current.priority #=> 0
3615  */
3616 
3617 static VALUE
3618 rb_thread_priority(VALUE thread)
3619 {
3620  return INT2NUM(rb_thread_ptr(thread)->priority);
3621 }
3622 
3623 
3624 /*
3625  * call-seq:
3626  * thr.priority= integer -> thr
3627  *
3628  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3629  * will run more frequently than lower-priority threads (but lower-priority
3630  * threads can also run).
3631  *
3632  * This is just hint for Ruby thread scheduler. It may be ignored on some
3633  * platform.
3634  *
3635  * count1 = count2 = 0
3636  * a = Thread.new do
3637  * loop { count1 += 1 }
3638  * end
3639  * a.priority = -1
3640  *
3641  * b = Thread.new do
3642  * loop { count2 += 1 }
3643  * end
3644  * b.priority = -2
3645  * sleep 1 #=> 1
3646  * count1 #=> 622504
3647  * count2 #=> 5832
3648  */
3649 
3650 static VALUE
3651 rb_thread_priority_set(VALUE thread, VALUE prio)
3652 {
3653  rb_thread_t *target_th = rb_thread_ptr(thread);
3654  int priority;
3655 
3656 #if USE_NATIVE_THREAD_PRIORITY
3657  target_th->priority = NUM2INT(prio);
3658  native_thread_apply_priority(th);
3659 #else
3660  priority = NUM2INT(prio);
3661  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3662  priority = RUBY_THREAD_PRIORITY_MAX;
3663  }
3664  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3665  priority = RUBY_THREAD_PRIORITY_MIN;
3666  }
3667  target_th->priority = (int8_t)priority;
3668 #endif
3669  return INT2NUM(target_th->priority);
3670 }
3671 
3672 /* for IO */
3673 
3674 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3675 
3676 /*
3677  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3678  * in select(2) system call.
3679  *
3680  * - Linux 2.2.12 (?)
3681  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3682  * select(2) documents how to allocate fd_set dynamically.
3683  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3684  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3685  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3686  * select(2) documents how to allocate fd_set dynamically.
3687  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3688  * - HP-UX documents how to allocate fd_set dynamically.
3689  * http://docs.hp.com/en/B2355-60105/select.2.html
3690  * - Solaris 8 has select_large_fdset
3691  * - Mac OS X 10.7 (Lion)
3692  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3693  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3694  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3695  *
3696  * When fd_set is not big enough to hold big file descriptors,
3697  * it should be allocated dynamically.
3698  * Note that this assumes fd_set is structured as bitmap.
3699  *
3700  * rb_fd_init allocates the memory.
3701  * rb_fd_term free the memory.
3702  * rb_fd_set may re-allocates bitmap.
3703  *
3704  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3705  */
3706 
3707 void
3708 rb_fd_init(rb_fdset_t *fds)
3709 {
3710  fds->maxfd = 0;
3711  fds->fdset = ALLOC(fd_set);
3712  FD_ZERO(fds->fdset);
3713 }
3714 
3715 void
3717 {
3718  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3719 
3720  if (size < sizeof(fd_set))
3721  size = sizeof(fd_set);
3722  dst->maxfd = src->maxfd;
3723  dst->fdset = xmalloc(size);
3724  memcpy(dst->fdset, src->fdset, size);
3725 }
3726 
3727 void
3728 rb_fd_term(rb_fdset_t *fds)
3729 {
3730  if (fds->fdset) xfree(fds->fdset);
3731  fds->maxfd = 0;
3732  fds->fdset = 0;
3733 }
3734 
3735 void
3736 rb_fd_zero(rb_fdset_t *fds)
3737 {
3738  if (fds->fdset)
3739  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3740 }
3741 
3742 static void
3743 rb_fd_resize(int n, rb_fdset_t *fds)
3744 {
3745  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3746  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3747 
3748  if (m < sizeof(fd_set)) m = sizeof(fd_set);
3749  if (o < sizeof(fd_set)) o = sizeof(fd_set);
3750 
3751  if (m > o) {
3752  fds->fdset = xrealloc(fds->fdset, m);
3753  memset((char *)fds->fdset + o, 0, m - o);
3754  }
3755  if (n >= fds->maxfd) fds->maxfd = n + 1;
3756 }
3757 
3758 void
3759 rb_fd_set(int n, rb_fdset_t *fds)
3760 {
3761  rb_fd_resize(n, fds);
3762  FD_SET(n, fds->fdset);
3763 }
3764 
3765 void
3766 rb_fd_clr(int n, rb_fdset_t *fds)
3767 {
3768  if (n >= fds->maxfd) return;
3769  FD_CLR(n, fds->fdset);
3770 }
3771 
3772 int
3773 rb_fd_isset(int n, const rb_fdset_t *fds)
3774 {
3775  if (n >= fds->maxfd) return 0;
3776  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3777 }
3778 
3779 void
3780 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3781 {
3782  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3783 
3784  if (size < sizeof(fd_set)) size = sizeof(fd_set);
3785  dst->maxfd = max;
3786  dst->fdset = xrealloc(dst->fdset, size);
3787  memcpy(dst->fdset, src, size);
3788 }
3789 
3790 void
3791 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3792 {
3793  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3794 
3795  if (size < sizeof(fd_set))
3796  size = sizeof(fd_set);
3797  dst->maxfd = src->maxfd;
3798  dst->fdset = xrealloc(dst->fdset, size);
3799  memcpy(dst->fdset, src->fdset, size);
3800 }
3801 
3802 int
3803 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3804 {
3805  fd_set *r = NULL, *w = NULL, *e = NULL;
3806  if (readfds) {
3807  rb_fd_resize(n - 1, readfds);
3808  r = rb_fd_ptr(readfds);
3809  }
3810  if (writefds) {
3811  rb_fd_resize(n - 1, writefds);
3812  w = rb_fd_ptr(writefds);
3813  }
3814  if (exceptfds) {
3815  rb_fd_resize(n - 1, exceptfds);
3816  e = rb_fd_ptr(exceptfds);
3817  }
3818  return select(n, r, w, e, timeout);
3819 }
3820 
3821 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
3822 
3823 #undef FD_ZERO
3824 #undef FD_SET
3825 #undef FD_CLR
3826 #undef FD_ISSET
3827 
3828 #define FD_ZERO(f) rb_fd_zero(f)
3829 #define FD_SET(i, f) rb_fd_set((i), (f))
3830 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3831 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3832 
3833 #elif defined(_WIN32)
3834 
3835 void
3836 rb_fd_init(rb_fdset_t *set)
3837 {
3838  set->capa = FD_SETSIZE;
3839  set->fdset = ALLOC(fd_set);
3840  FD_ZERO(set->fdset);
3841 }
3842 
3843 void
3845 {
3846  rb_fd_init(dst);
3847  rb_fd_dup(dst, src);
3848 }
3849 
3850 void
3851 rb_fd_term(rb_fdset_t *set)
3852 {
3853  xfree(set->fdset);
3854  set->fdset = NULL;
3855  set->capa = 0;
3856 }
3857 
3858 void
3859 rb_fd_set(int fd, rb_fdset_t *set)
3860 {
3861  unsigned int i;
3862  SOCKET s = rb_w32_get_osfhandle(fd);
3863 
3864  for (i = 0; i < set->fdset->fd_count; i++) {
3865  if (set->fdset->fd_array[i] == s) {
3866  return;
3867  }
3868  }
3869  if (set->fdset->fd_count >= (unsigned)set->capa) {
3870  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3871  set->fdset =
3873  set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
3874  }
3875  set->fdset->fd_array[set->fdset->fd_count++] = s;
3876 }
3877 
3878 #undef FD_ZERO
3879 #undef FD_SET
3880 #undef FD_CLR
3881 #undef FD_ISSET
3882 
3883 #define FD_ZERO(f) rb_fd_zero(f)
3884 #define FD_SET(i, f) rb_fd_set((i), (f))
3885 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3886 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3887 
3888 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
3889 
3890 #endif
3891 
3892 #ifndef rb_fd_no_init
3893 #define rb_fd_no_init(fds) (void)(fds)
3894 #endif
3895 
3896 static int
3897 wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
3898 {
3899  if (*result < 0) {
3900  switch (errnum) {
3901  case EINTR:
3902 #ifdef ERESTART
3903  case ERESTART:
3904 #endif
3905  *result = 0;
3906  if (rel && hrtime_update_expire(rel, end)) {
3907  *rel = 0;
3908  }
3909  return TRUE;
3910  }
3911  return FALSE;
3912  }
3913  else if (*result == 0) {
3914  /* check for spurious wakeup */
3915  if (rel) {
3916  return !hrtime_update_expire(rel, end);
3917  }
3918  return TRUE;
3919  }
3920  return FALSE;
3921 }
3922 
3923 struct select_set {
3924  int max;
3933  struct timeval *timeout;
3934 };
3935 
3936 static VALUE
3937 select_set_free(VALUE p)
3938 {
3939  struct select_set *set = (struct select_set *)p;
3940 
3941  if (set->sigwait_fd >= 0) {
3942  rb_sigwait_fd_put(set->th, set->sigwait_fd);
3943  rb_sigwait_fd_migrate(set->th->vm);
3944  }
3945 
3946  rb_fd_term(&set->orig_rset);
3947  rb_fd_term(&set->orig_wset);
3948  rb_fd_term(&set->orig_eset);
3949 
3950  return Qfalse;
3951 }
3952 
3953 static const rb_hrtime_t *
3954 sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
3955  int *drained_p)
3956 {
3957  static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
3958 
3959  if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
3960  *drained_p = check_signals_nogvl(th, sigwait_fd);
3961  if (!orig || *orig > quantum)
3962  return &quantum;
3963  }
3964 
3965  return orig;
3966 }
3967 
3968 static VALUE
3969 do_select(VALUE p)
3970 {
3971  struct select_set *set = (struct select_set *)p;
3972  int result = 0;
3973  int lerrno;
3974  rb_hrtime_t *to, rel, end = 0;
3975 
3976  timeout_prepare(&to, &rel, &end, set->timeout);
3977 #define restore_fdset(dst, src) \
3978  ((dst) ? rb_fd_dup(dst, src) : (void)0)
3979 #define do_select_update() \
3980  (restore_fdset(set->rset, &set->orig_rset), \
3981  restore_fdset(set->wset, &set->orig_wset), \
3982  restore_fdset(set->eset, &set->orig_eset), \
3983  TRUE)
3984 
3985  do {
3986  int drained;
3987  lerrno = 0;
3988 
3989  BLOCKING_REGION(set->th, {
3990  const rb_hrtime_t *sto;
3991  struct timeval tv;
3992 
3993  sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
3994  if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
3995  result = native_fd_select(set->max, set->rset, set->wset,
3996  set->eset,
3997  rb_hrtime2timeval(&tv, sto), set->th);
3998  if (result < 0) lerrno = errno;
3999  }
4000  }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4001 
4002  if (set->sigwait_fd >= 0) {
4003  if (result > 0 && rb_fd_isset(set->sigwait_fd, set->rset)) {
4004  result--;
4005  (void)check_signals_nogvl(set->th, set->sigwait_fd);
4006  } else {
4007  (void)check_signals_nogvl(set->th, -1);
4008  }
4009  }
4010 
4011  RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4012  } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4013 
4014  if (result < 0) {
4015  errno = lerrno;
4016  }
4017 
4018  return (VALUE)result;
4019 }
4020 
4021 static void
4022 rb_thread_wait_fd_rw(int fd, int read)
4023 {
4024  int result = 0;
4025  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
4026 
4027  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
4028 
4029  if (fd < 0) {
4030  rb_raise(rb_eIOError, "closed stream");
4031  }
4032 
4033  result = rb_wait_for_single_fd(fd, events, NULL);
4034  if (result < 0) {
4035  rb_sys_fail(0);
4036  }
4037 
4038  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
4039 }
4040 
4041 void
4043 {
4044  rb_thread_wait_fd_rw(fd, 1);
4045 }
4046 
4047 int
4049 {
4050  rb_thread_wait_fd_rw(fd, 0);
4051  return TRUE;
4052 }
4053 
4054 static rb_fdset_t *
4055 init_set_fd(int fd, rb_fdset_t *fds)
4056 {
4057  if (fd < 0) {
4058  return 0;
4059  }
4060  rb_fd_init(fds);
4061  rb_fd_set(fd, fds);
4062 
4063  return fds;
4064 }
4065 
4066 int
4068  struct timeval *timeout)
4069 {
4070  struct select_set set;
4071 
4072  set.th = GET_THREAD();
4074  set.max = max;
4075  set.rset = read;
4076  set.wset = write;
4077  set.eset = except;
4078  set.timeout = timeout;
4079 
4080  if (!set.rset && !set.wset && !set.eset) {
4081  if (!timeout) {
4083  return 0;
4084  }
4086  return 0;
4087  }
4088 
4089  set.sigwait_fd = rb_sigwait_fd_get(set.th);
4090  if (set.sigwait_fd >= 0) {
4091  if (set.rset)
4092  rb_fd_set(set.sigwait_fd, set.rset);
4093  else
4094  set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4095  if (set.sigwait_fd >= set.max) {
4096  set.max = set.sigwait_fd + 1;
4097  }
4098  }
4099 #define fd_init_copy(f) do { \
4100  if (set.f) { \
4101  rb_fd_resize(set.max - 1, set.f); \
4102  if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4103  rb_fd_init_copy(&set.orig_##f, set.f); \
4104  } \
4105  } \
4106  else { \
4107  rb_fd_no_init(&set.orig_##f); \
4108  } \
4109  } while (0)
4110  fd_init_copy(rset);
4111  fd_init_copy(wset);
4112  fd_init_copy(eset);
4113 #undef fd_init_copy
4114 
4115  return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4116 }
4117 
4118 #ifdef USE_POLL
4119 
4120 /* The same with linux kernel. TODO: make platform independent definition. */
4121 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4122 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4123 #define POLLEX_SET (POLLPRI)
4124 
4125 #ifndef POLLERR_SET /* defined for FreeBSD for now */
4126 # define POLLERR_SET (0)
4127 #endif
4128 
4129 /*
4130  * returns a mask of events
4131  */
4132 int
4133 rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4134 {
4135  struct pollfd fds[2];
4136  int result = 0, lerrno;
4137  rb_hrtime_t *to, rel, end = 0;
4138  int drained;
4139  nfds_t nfds;
4140  rb_unblock_function_t *ubf;
4141  struct waiting_fd wfd;
4142  int state;
4143 
4144  wfd.th = GET_THREAD();
4145  wfd.fd = fd;
4146  list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4147  EC_PUSH_TAG(wfd.th->ec);
4148  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4149  RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4150  timeout_prepare(&to, &rel, &end, timeout);
4151  fds[0].fd = fd;
4152  fds[0].events = (short)events;
4153  fds[0].revents = 0;
4154  do {
4155  fds[1].fd = rb_sigwait_fd_get(wfd.th);
4156 
4157  if (fds[1].fd >= 0) {
4158  fds[1].events = POLLIN;
4159  fds[1].revents = 0;
4160  nfds = 2;
4161  ubf = ubf_sigwait;
4162  }
4163  else {
4164  nfds = 1;
4165  ubf = ubf_select;
4166  }
4167 
4168  lerrno = 0;
4169  BLOCKING_REGION(wfd.th, {
4170  const rb_hrtime_t *sto;
4171  struct timespec ts;
4172 
4173  sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4174  if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4175  result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4176  if (result < 0) lerrno = errno;
4177  }
4178  }, ubf, wfd.th, TRUE);
4179 
4180  if (fds[1].fd >= 0) {
4181  if (result > 0 && fds[1].revents) {
4182  result--;
4183  (void)check_signals_nogvl(wfd.th, fds[1].fd);
4184  } else {
4185  (void)check_signals_nogvl(wfd.th, -1);
4186  }
4187  rb_sigwait_fd_put(wfd.th, fds[1].fd);
4188  rb_sigwait_fd_migrate(wfd.th->vm);
4189  }
4190  RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4191  } while (wait_retryable(&result, lerrno, to, end));
4192  }
4193  EC_POP_TAG();
4194  list_del(&wfd.wfd_node);
4195  if (state) {
4196  EC_JUMP_TAG(wfd.th->ec, state);
4197  }
4198 
4199  if (result < 0) {
4200  errno = lerrno;
4201  return -1;
4202  }
4203 
4204  if (fds[0].revents & POLLNVAL) {
4205  errno = EBADF;
4206  return -1;
4207  }
4208 
4209  /*
4210  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4211  * Therefore we need to fix it up.
4212  */
4213  result = 0;
4214  if (fds[0].revents & POLLIN_SET)
4215  result |= RB_WAITFD_IN;
4216  if (fds[0].revents & POLLOUT_SET)
4217  result |= RB_WAITFD_OUT;
4218  if (fds[0].revents & POLLEX_SET)
4219  result |= RB_WAITFD_PRI;
4220 
4221  /* all requested events are ready if there is an error */
4222  if (fds[0].revents & POLLERR_SET)
4223  result |= events;
4224 
4225  return result;
4226 }
4227 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4228 struct select_args {
4229  union {
4230  int fd;
4231  int error;
4232  } as;
4236  struct waiting_fd wfd;
4237  struct timeval *tv;
4238 };
4239 
4240 static VALUE
4241 select_single(VALUE ptr)
4242 {
4243  struct select_args *args = (struct select_args *)ptr;
4244  int r;
4245 
4246  r = rb_thread_fd_select(args->as.fd + 1,
4247  args->read, args->write, args->except, args->tv);
4248  if (r == -1)
4249  args->as.error = errno;
4250  if (r > 0) {
4251  r = 0;
4252  if (args->read && rb_fd_isset(args->as.fd, args->read))
4253  r |= RB_WAITFD_IN;
4254  if (args->write && rb_fd_isset(args->as.fd, args->write))
4255  r |= RB_WAITFD_OUT;
4256  if (args->except && rb_fd_isset(args->as.fd, args->except))
4257  r |= RB_WAITFD_PRI;
4258  }
4259  return (VALUE)r;
4260 }
4261 
4262 static VALUE
4263 select_single_cleanup(VALUE ptr)
4264 {
4265  struct select_args *args = (struct select_args *)ptr;
4266 
4267  list_del(&args->wfd.wfd_node);
4268  if (args->read) rb_fd_term(args->read);
4269  if (args->write) rb_fd_term(args->write);
4270  if (args->except) rb_fd_term(args->except);
4271 
4272  return (VALUE)-1;
4273 }
4274 
4275 int
4276 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
4277 {
4278  rb_fdset_t rfds, wfds, efds;
4279  struct select_args args;
4280  int r;
4281  VALUE ptr = (VALUE)&args;
4282 
4283  args.as.fd = fd;
4284  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4285  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4286  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4287  args.tv = tv;
4288  args.wfd.fd = fd;
4289  args.wfd.th = GET_THREAD();
4290 
4291  list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4292  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4293  if (r == -1)
4294  errno = args.as.error;
4295 
4296  return r;
4297 }
4298 #endif /* ! USE_POLL */
4299 
4300 /*
4301  * for GC
4302  */
4303 
4304 #ifdef USE_CONSERVATIVE_STACK_END
4305 void
4307 {
4308  VALUE stack_end;
4309  *stack_end_p = &stack_end;
4310 }
4311 #endif
4312 
4313 /*
4314  *
4315  */
4316 
4317 void
4319 {
4320  /* mth must be main_thread */
4321  if (rb_signal_buff_size() > 0) {
4322  /* wakeup main thread */
4323  threadptr_trap_interrupt(mth);
4324  }
4325 }
4326 
4327 static void
4328 timer_thread_function(void)
4329 {
4330  volatile rb_execution_context_t *ec;
4331 
4332  /* for time slice */
4335  if (ec) RUBY_VM_SET_TIMER_INTERRUPT(ec);
4336 }
4337 
4338 static void
4339 async_bug_fd(const char *mesg, int errno_arg, int fd)
4340 {
4341  char buff[64];
4342  size_t n = strlcpy(buff, mesg, sizeof(buff));
4343  if (n < sizeof(buff)-3) {
4344  ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4345  }
4346  rb_async_bug_errno(buff, errno_arg);
4347 }
4348 
4349 /* VM-dependent API is not available for this function */
4350 static int
4351 consume_communication_pipe(int fd)
4352 {
4353 #if USE_EVENTFD
4354  uint64_t buff[1];
4355 #else
4356  /* buffer can be shared because no one refers to them. */
4357  static char buff[1024];
4358 #endif
4359  ssize_t result;
4360  int ret = FALSE; /* for rb_sigwait_sleep */
4361 
4362  /*
4363  * disarm UBF_TIMER before we read, because it can become
4364  * re-armed at any time via sighandler and the pipe will refill
4365  * We can disarm it because this thread is now processing signals
4366  * and we do not want unnecessary SIGVTALRM
4367  */
4368  ubf_timer_disarm();
4369 
4370  while (1) {
4371  result = read(fd, buff, sizeof(buff));
4372  if (result > 0) {
4373  ret = TRUE;
4374  if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4375  return ret;
4376  }
4377  }
4378  else if (result == 0) {
4379  return ret;
4380  }
4381  else if (result < 0) {
4382  int e = errno;
4383  switch (e) {
4384  case EINTR:
4385  continue; /* retry */
4386  case EAGAIN:
4387 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4388  case EWOULDBLOCK:
4389 #endif
4390  return ret;
4391  default:
4392  async_bug_fd("consume_communication_pipe: read", e, fd);
4393  }
4394  }
4395  }
4396 }
4397 
4398 static int
4399 check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
4400 {
4401  rb_vm_t *vm = GET_VM(); /* th may be 0 */
4402  int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4403  ubf_wakeup_all_threads();
4405  if (rb_signal_buff_size()) {
4406  if (th == vm->main_thread)
4407  /* no need to lock + wakeup if already in main thread */
4409  else
4410  threadptr_trap_interrupt(vm->main_thread);
4411  ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
4412  }
4413  return ret;
4414 }
4415 
4416 void
4418 {
4419  if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4420  native_reset_timer_thread();
4421  }
4422 }
4423 
4424 void
4426 {
4427  native_reset_timer_thread();
4428 }
4429 
4430 void
4432 {
4433  system_working = 1;
4434  rb_thread_create_timer_thread();
4435 }
4436 
4437 static int
4438 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4439 {
4440  int i;
4441  VALUE coverage = (VALUE)val;
4442  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4443  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4444 
4445  if (lines) {
4446  if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4447  rb_ary_clear(lines);
4448  }
4449  else {
4450  int i;
4451  for (i = 0; i < RARRAY_LEN(lines); i++) {
4452  if (RARRAY_AREF(lines, i) != Qnil)
4453  RARRAY_ASET(lines, i, INT2FIX(0));
4454  }
4455  }
4456  }
4457  if (branches) {
4458  VALUE counters = RARRAY_AREF(branches, 1);
4459  for (i = 0; i < RARRAY_LEN(counters); i++) {
4460  RARRAY_ASET(counters, i, INT2FIX(0));
4461  }
4462  }
4463 
4464  return ST_CONTINUE;
4465 }
4466 
4467 void
4469 {
4470  VALUE coverages = rb_get_coverages();
4471  if (RTEST(coverages)) {
4472  rb_hash_foreach(coverages, clear_coverage_i, 0);
4473  }
4474 }
4475 
4476 #if defined(HAVE_WORKING_FORK)
4477 static void
4478 rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4479 {
4480  rb_thread_t *i = 0;
4481  rb_vm_t *vm = th->vm;
4482  vm->main_thread = th;
4483 
4484  gvl_atfork(th->vm);
4485  ubf_list_atfork();
4486 
4487  list_for_each(&vm->living_threads, i, vmlt_node) {
4488  atfork(i, th);
4489  }
4490  rb_vm_living_threads_init(vm);
4491  rb_vm_living_threads_insert(vm, th);
4492 
4493  /* may be held by MJIT threads in parent */
4496 
4497  /* may be held by any thread in parent */
4499 
4500  vm->fork_gen++;
4501 
4502  vm->sleeper = 0;
4504 }
4505 
4506 static void
4507 terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4508 {
4509  if (th != current_th) {
4510  rb_mutex_abandon_keeping_mutexes(th);
4511  rb_mutex_abandon_locking_mutex(th);
4512  thread_cleanup_func(th, TRUE);
4513  }
4514 }
4515 
4516 void rb_fiber_atfork(rb_thread_t *);
4517 void
4518 rb_thread_atfork(void)
4519 {
4520  rb_thread_t *th = GET_THREAD();
4521  rb_thread_atfork_internal(th, terminate_atfork_i);
4522  th->join_list = NULL;
4523  rb_fiber_atfork(th);
4524 
4525  /* We don't want reproduce CVE-2003-0900. */
4527 
4528  /* For child, starting MJIT worker thread in this place which is safer than immediately after `after_fork_ruby`. */
4530 }
4531 
4532 static void
4533 terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4534 {
4535  if (th != current_th) {
4536  thread_cleanup_func_before_exec(th);
4537  }
4538 }
4539 
4540 void
4542 {
4543  rb_thread_t *th = GET_THREAD();
4544  rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4545 }
4546 #else
4547 void
4549 {
4550 }
4551 
4552 void
4554 {
4555 }
4556 #endif
4557 
4558 struct thgroup {
4561 };
4562 
4563 static size_t
4564 thgroup_memsize(const void *ptr)
4565 {
4566  return sizeof(struct thgroup);
4567 }
4568 
4569 static const rb_data_type_t thgroup_data_type = {
4570  "thgroup",
4571  {NULL, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4573 };
4574 
4575 /*
4576  * Document-class: ThreadGroup
4577  *
4578  * ThreadGroup provides a means of keeping track of a number of threads as a
4579  * group.
4580  *
4581  * A given Thread object can only belong to one ThreadGroup at a time; adding
4582  * a thread to a new group will remove it from any previous group.
4583  *
4584  * Newly created threads belong to the same group as the thread from which they
4585  * were created.
4586  */
4587 
4588 /*
4589  * Document-const: Default
4590  *
4591  * The default ThreadGroup created when Ruby starts; all Threads belong to it
4592  * by default.
4593  */
4594 static VALUE
4595 thgroup_s_alloc(VALUE klass)
4596 {
4597  VALUE group;
4598  struct thgroup *data;
4599 
4600  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4601  data->enclosed = 0;
4602  data->group = group;
4603 
4604  return group;
4605 }
4606 
4607 /*
4608  * call-seq:
4609  * thgrp.list -> array
4610  *
4611  * Returns an array of all existing Thread objects that belong to this group.
4612  *
4613  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4614  */
4615 
4616 static VALUE
4617 thgroup_list(VALUE group)
4618 {
4619  VALUE ary = rb_ary_new();
4620  rb_vm_t *vm = GET_THREAD()->vm;
4621  rb_thread_t *th = 0;
4622 
4623  list_for_each(&vm->living_threads, th, vmlt_node) {
4624  if (th->thgroup == group) {
4625  rb_ary_push(ary, th->self);
4626  }
4627  }
4628  return ary;
4629 }
4630 
4631 
4632 /*
4633  * call-seq:
4634  * thgrp.enclose -> thgrp
4635  *
4636  * Prevents threads from being added to or removed from the receiving
4637  * ThreadGroup.
4638  *
4639  * New threads can still be started in an enclosed ThreadGroup.
4640  *
4641  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4642  * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4643  * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4644  * tg.add thr
4645  * #=> ThreadError: can't move from the enclosed thread group
4646  */
4647 
4648 static VALUE
4649 thgroup_enclose(VALUE group)
4650 {
4651  struct thgroup *data;
4652 
4653  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4654  data->enclosed = 1;
4655 
4656  return group;
4657 }
4658 
4659 
4660 /*
4661  * call-seq:
4662  * thgrp.enclosed? -> true or false
4663  *
4664  * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4665  */
4666 
4667 static VALUE
4668 thgroup_enclosed_p(VALUE group)
4669 {
4670  struct thgroup *data;
4671 
4672  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4673  if (data->enclosed)
4674  return Qtrue;
4675  return Qfalse;
4676 }
4677 
4678 
4679 /*
4680  * call-seq:
4681  * thgrp.add(thread) -> thgrp
4682  *
4683  * Adds the given +thread+ to this group, removing it from any other
4684  * group to which it may have previously been a member.
4685  *
4686  * puts "Initial group is #{ThreadGroup::Default.list}"
4687  * tg = ThreadGroup.new
4688  * t1 = Thread.new { sleep }
4689  * t2 = Thread.new { sleep }
4690  * puts "t1 is #{t1}"
4691  * puts "t2 is #{t2}"
4692  * tg.add(t1)
4693  * puts "Initial group now #{ThreadGroup::Default.list}"
4694  * puts "tg group now #{tg.list}"
4695  *
4696  * This will produce:
4697  *
4698  * Initial group is #<Thread:0x401bdf4c>
4699  * t1 is #<Thread:0x401b3c90>
4700  * t2 is #<Thread:0x401b3c18>
4701  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4702  * tg group now #<Thread:0x401b3c90>
4703  */
4704 
4705 static VALUE
4706 thgroup_add(VALUE group, VALUE thread)
4707 {
4708  rb_thread_t *target_th = rb_thread_ptr(thread);
4709  struct thgroup *data;
4710 
4711  if (OBJ_FROZEN(group)) {
4712  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4713  }
4714  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4715  if (data->enclosed) {
4716  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4717  }
4718 
4719  if (!target_th->thgroup) {
4720  return Qnil;
4721  }
4722 
4723  if (OBJ_FROZEN(target_th->thgroup)) {
4724  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4725  }
4726  TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4727  if (data->enclosed) {
4729  "can't move from the enclosed thread group");
4730  }
4731 
4732  target_th->thgroup = group;
4733  return group;
4734 }
4735 
4736 /*
4737  * Document-class: ThreadShield
4738  */
4739 static void
4740 thread_shield_mark(void *ptr)
4741 {
4742  rb_gc_mark((VALUE)ptr);
4743 }
4744 
4745 static const rb_data_type_t thread_shield_data_type = {
4746  "thread_shield",
4747  {thread_shield_mark, 0, 0,},
4749 };
4750 
4751 static VALUE
4752 thread_shield_alloc(VALUE klass)
4753 {
4754  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4755 }
4756 
4757 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4758 #define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4759 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4760 #define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4762 static inline unsigned int
4763 rb_thread_shield_waiting(VALUE b)
4764 {
4766 }
4767 
4768 static inline void
4769 rb_thread_shield_waiting_inc(VALUE b)
4770 {
4771  unsigned int w = rb_thread_shield_waiting(b);
4772  w++;
4773  if (w > THREAD_SHIELD_WAITING_MAX)
4774  rb_raise(rb_eRuntimeError, "waiting count overflow");
4775  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4776  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4777 }
4778 
4779 static inline void
4780 rb_thread_shield_waiting_dec(VALUE b)
4781 {
4782  unsigned int w = rb_thread_shield_waiting(b);
4783  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4784  w--;
4785  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4786  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4787 }
4788 
4789 VALUE
4791 {
4792  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4793  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4794  return thread_shield;
4795 }
4796 
4797 /*
4798  * Wait a thread shield.
4799  *
4800  * Returns
4801  * true: acquired the thread shield
4802  * false: the thread shield was destroyed and no other threads waiting
4803  * nil: the thread shield was destroyed but still in use
4804  */
4805 VALUE
4807 {
4808  VALUE mutex = GetThreadShieldPtr(self);
4809  rb_mutex_t *m;
4810 
4811  if (!mutex) return Qfalse;
4812  m = mutex_ptr(mutex);
4813  if (m->th == GET_THREAD()) return Qnil;
4814  rb_thread_shield_waiting_inc(self);
4815  rb_mutex_lock(mutex);
4816  rb_thread_shield_waiting_dec(self);
4817  if (DATA_PTR(self)) return Qtrue;
4818  rb_mutex_unlock(mutex);
4819  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4820 }
4821 
4822 static VALUE
4823 thread_shield_get_mutex(VALUE self)
4824 {
4825  VALUE mutex = GetThreadShieldPtr(self);
4826  if (!mutex)
4827  rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
4828  return mutex;
4829 }
4830 
4831 /*
4832  * Release a thread shield, and return true if it has waiting threads.
4833  */
4834 VALUE
4836 {
4837  VALUE mutex = thread_shield_get_mutex(self);
4838  rb_mutex_unlock(mutex);
4839  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4840 }
4841 
4842 /*
4843  * Release and destroy a thread shield, and return true if it has waiting threads.
4844  */
4845 VALUE
4847 {
4848  VALUE mutex = thread_shield_get_mutex(self);
4849  DATA_PTR(self) = 0;
4850  rb_mutex_unlock(mutex);
4851  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4852 }
4853 
4854 static VALUE
4855 threadptr_recursive_hash(rb_thread_t *th)
4856 {
4857  return th->ec->local_storage_recursive_hash;
4858 }
4859 
4860 static void
4861 threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
4862 {
4863  th->ec->local_storage_recursive_hash = hash;
4864 }
4865 
4866 ID rb_frame_last_func(void);
4867 
4868 /*
4869  * Returns the current "recursive list" used to detect recursion.
4870  * This list is a hash table, unique for the current thread and for
4871  * the current __callee__.
4872  */
4873 
4874 static VALUE
4875 recursive_list_access(VALUE sym)
4876 {
4877  rb_thread_t *th = GET_THREAD();
4878  VALUE hash = threadptr_recursive_hash(th);
4879  VALUE list;
4880  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4881  hash = rb_ident_hash_new();
4882  threadptr_recursive_hash_set(th, hash);
4883  list = Qnil;
4884  }
4885  else {
4886  list = rb_hash_aref(hash, sym);
4887  }
4888  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4889  list = rb_ident_hash_new();
4890  rb_hash_aset(hash, sym, list);
4891  }
4892  return list;
4893 }
4894 
4895 /*
4896  * Returns Qtrue iff obj (or the pair <obj, paired_obj>) is already
4897  * in the recursion list.
4898  * Assumes the recursion list is valid.
4899  */
4900 
4901 static VALUE
4902 recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
4903 {
4904 #if SIZEOF_LONG == SIZEOF_VOIDP
4905  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4906 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4907  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4908  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4909 #endif
4910 
4911  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4912  if (pair_list == Qundef)
4913  return Qfalse;
4914  if (paired_obj_id) {
4915  if (!RB_TYPE_P(pair_list, T_HASH)) {
4916  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4917  return Qfalse;
4918  }
4919  else {
4920  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4921  return Qfalse;
4922  }
4923  }
4924  return Qtrue;
4925 }
4926 
4927 /*
4928  * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
4929  * For a single obj, it sets list[obj] to Qtrue.
4930  * For a pair, it sets list[obj] to paired_obj_id if possible,
4931  * otherwise list[obj] becomes a hash like:
4932  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4933  * Assumes the recursion list is valid.
4934  */
4935 
4936 static void
4937 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
4938 {
4939  VALUE pair_list;
4940 
4941  if (!paired_obj) {
4943  }
4944  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4945  rb_hash_aset(list, obj, paired_obj);
4946  }
4947  else {
4948  if (!RB_TYPE_P(pair_list, T_HASH)){
4949  VALUE other_paired_obj = pair_list;
4950  pair_list = rb_hash_new();
4951  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4952  rb_hash_aset(list, obj, pair_list);
4953  }
4954  rb_hash_aset(pair_list, paired_obj, Qtrue);
4955  }
4956 }
4957 
4958 /*
4959  * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
4960  * For a pair, if list[obj] is a hash, then paired_obj_id is
4961  * removed from the hash and no attempt is made to simplify
4962  * list[obj] from {only_one_paired_id => true} to only_one_paired_id
4963  * Assumes the recursion list is valid.
4964  */
4965 
4966 static int
4967 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
4968 {
4969  if (paired_obj) {
4970  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4971  if (pair_list == Qundef) {
4972  return 0;
4973  }
4974  if (RB_TYPE_P(pair_list, T_HASH)) {
4975  rb_hash_delete_entry(pair_list, paired_obj);
4976  if (!RHASH_EMPTY_P(pair_list)) {
4977  return 1; /* keep hash until is empty */
4978  }
4979  }
4980  }
4982  return 1;
4983 }
4984 
4991 };
4992 
4993 static VALUE
4994 exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
4995 {
4996  struct exec_recursive_params *p = (void *)data;
4997  return (*p->func)(p->obj, p->arg, FALSE);
4998 }
4999 
5000 /*
5001  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5002  * current method is called recursively on obj, or on the pair <obj, pairid>
5003  * If outer is 0, then the innermost func will be called with recursive set
5004  * to Qtrue, otherwise the outermost func will be called. In the latter case,
5005  * all inner func are short-circuited by throw.
5006  * Implementation details: the value thrown is the recursive list which is
5007  * proper to the current method and unlikely to be caught anywhere else.
5008  * list[recursive_key] is used as a flag for the outermost call.
5009  */
5010 
5011 static VALUE
5012 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
5013 {
5014  VALUE result = Qundef;
5015  const ID mid = rb_frame_last_func();
5016  const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5017  struct exec_recursive_params p;
5018  int outermost;
5019  p.list = recursive_list_access(sym);
5020  p.obj = obj;
5021  p.pairid = pairid;
5022  p.arg = arg;
5023  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5024 
5025  if (recursive_check(p.list, p.obj, pairid)) {
5026  if (outer && !outermost) {
5027  rb_throw_obj(p.list, p.list);
5028  }
5029  return (*func)(obj, arg, TRUE);
5030  }
5031  else {
5032  enum ruby_tag_type state;
5033 
5034  p.func = func;
5035 
5036  if (outermost) {
5037  recursive_push(p.list, ID2SYM(recursive_key), 0);
5038  recursive_push(p.list, p.obj, p.pairid);
5039  result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5040  if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5041  if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5042  if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5043  if (result == p.list) {
5044  result = (*func)(obj, arg, TRUE);
5045  }
5046  }
5047  else {
5048  volatile VALUE ret = Qundef;
5049  recursive_push(p.list, p.obj, p.pairid);
5050  EC_PUSH_TAG(GET_EC());
5051  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5052  ret = (*func)(obj, arg, FALSE);
5053  }
5054  EC_POP_TAG();
5055  if (!recursive_pop(p.list, p.obj, p.pairid)) {
5056  invalid:
5057  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5058  "for %+"PRIsVALUE" in %+"PRIsVALUE,
5059  sym, rb_thread_current());
5060  }
5061  if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5062  result = ret;
5063  }
5064  }
5065  *(volatile struct exec_recursive_params *)&p;
5066  return result;
5067 }
5068 
5069 /*
5070  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5071  * current method is called recursively on obj
5072  */
5073 
5074 VALUE
5076 {
5077  return exec_recursive(func, obj, 0, arg, 0);
5078 }
5079 
5080 /*
5081  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5082  * current method is called recursively on the ordered pair <obj, paired_obj>
5083  */
5084 
5085 VALUE
5087 {
5088  return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0);
5089 }
5090 
5091 /*
5092  * If recursion is detected on the current method and obj, the outermost
5093  * func will be called with (obj, arg, Qtrue). All inner func will be
5094  * short-circuited using throw.
5095  */
5096 
5097 VALUE
5099 {
5100  return exec_recursive(func, obj, 0, arg, 1);
5101 }
5102 
5103 /*
5104  * If recursion is detected on the current method, obj and paired_obj,
5105  * the outermost func will be called with (obj, arg, Qtrue). All inner
5106  * func will be short-circuited using throw.
5107  */
5108 
5109 VALUE
5111 {
5112  return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1);
5113 }
5114 
5115 /*
5116  * call-seq:
5117  * thread.backtrace -> array
5118  *
5119  * Returns the current backtrace of the target thread.
5120  *
5121  */
5122 
5123 static VALUE
5124 rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5125 {
5126  return rb_vm_thread_backtrace(argc, argv, thval);
5127 }
5128 
5129 /* call-seq:
5130  * thread.backtrace_locations(*args) -> array or nil
5131  *
5132  * Returns the execution stack for the target thread---an array containing
5133  * backtrace location objects.
5134  *
5135  * See Thread::Backtrace::Location for more information.
5136  *
5137  * This method behaves similarly to Kernel#caller_locations except it applies
5138  * to a specific thread.
5139  */
5140 static VALUE
5141 rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5142 {
5143  return rb_vm_thread_backtrace_locations(argc, argv, thval);
5144 }
5145 
5146 /*
5147  * Document-class: ThreadError
5148  *
5149  * Raised when an invalid operation is attempted on a thread.
5150  *
5151  * For example, when no other thread has been started:
5152  *
5153  * Thread.stop
5154  *
5155  * This will raises the following exception:
5156  *
5157  * ThreadError: stopping only thread
5158  * note: use sleep to stop forever
5159  */
5160 
5161 void
5163 {
5164 #undef rb_intern
5165 #define rb_intern(str) rb_intern_const(str)
5166 
5167  VALUE cThGroup;
5168  rb_thread_t *th = GET_THREAD();
5169 
5170  sym_never = ID2SYM(rb_intern("never"));
5171  sym_immediate = ID2SYM(rb_intern("immediate"));
5172  sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
5173 
5174  rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5175  rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5176  rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5177  rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5178  rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5179  rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5180  rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5181  rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5182  rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5183  rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5184  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5185  rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5186  rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5187  rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5188 #if THREAD_DEBUG < 0
5189  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
5190  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
5191 #endif
5192  rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5193  rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5194  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5195 
5196  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5197  rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5198  rb_define_method(rb_cThread, "join", thread_join_m, -1);
5199  rb_define_method(rb_cThread, "value", thread_value, 0);
5201  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5205  rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5206  rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5207  rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5208  rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5209  rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5210  rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5211  rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5213  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5214  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5215  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5216  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5217  rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5218  rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5219  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5220  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5221  rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5222  rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5223  rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
5225  rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5226  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5227 
5228  rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5229  rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5230  rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5231  rb_define_alias(rb_cThread, "inspect", "to_s");
5232 
5234  "stream closed in another thread");
5235 
5236  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5237  rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5238  rb_define_method(cThGroup, "list", thgroup_list, 0);
5239  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5240  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5241  rb_define_method(cThGroup, "add", thgroup_add, 1);
5242 
5243  {
5244  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
5245  rb_define_const(cThGroup, "Default", th->thgroup);
5246  }
5247 
5248  recursive_key = rb_intern("__recursive_key__");
5250 
5251  /* init thread core */
5252  {
5253  /* main thread setting */
5254  {
5255  /* acquire global vm lock */
5256  gvl_init(th->vm);
5257  gvl_acquire(th->vm, th);
5261 
5265  }
5266  }
5267 
5268  rb_thread_create_timer_thread();
5269 
5270  /* suppress warnings on cygwin, mingw and mswin.*/
5271  (void)native_mutex_trylock;
5272 
5273  Init_thread_sync();
5274 }
5275 
5276 int
5278 {
5279  rb_thread_t *th = ruby_thread_from_native();
5280 
5281  return th != 0;
5282 }
5283 
5284 static void
5285 debug_deadlock_check(rb_vm_t *vm, VALUE msg)
5286 {
5287  rb_thread_t *th = 0;
5288  VALUE sep = rb_str_new_cstr("\n ");
5289 
5290  rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5291  vm_living_thread_num(vm), vm->sleeper, (void *)GET_THREAD(), (void *)vm->main_thread);
5292  list_for_each(&vm->living_threads, th, vmlt_node) {
5293  rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5294  "native:%"PRI_THREAD_ID" int:%u",
5295  th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
5296  if (th->locking_mutex) {
5297  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5298  rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5299  (void *)mutex->th, rb_mutex_num_waiting(mutex));
5300  }
5301  {
5302  rb_thread_list_t *list = th->join_list;
5303  while (list) {
5304  rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->th);
5305  list = list->next;
5306  }
5307  }
5308  rb_str_catf(msg, "\n ");
5309  rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5310  rb_str_catf(msg, "\n");
5311  }
5312 }
5313 
5314 static void
5315 rb_check_deadlock(rb_vm_t *vm)
5316 {
5317  int found = 0;
5318  rb_thread_t *th = 0;
5319 
5320  if (vm_living_thread_num(vm) > vm->sleeper) return;
5321  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5322  if (patrol_thread && patrol_thread != GET_THREAD()) return;
5323 
5324  list_for_each(&vm->living_threads, th, vmlt_node) {
5325  if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5326  found = 1;
5327  }
5328  else if (th->locking_mutex) {
5329  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5330 
5331  if (mutex->th == th || (!mutex->th && !list_empty(&mutex->waitq))) {
5332  found = 1;
5333  }
5334  }
5335  if (found)
5336  break;
5337  }
5338 
5339  if (!found) {
5340  VALUE argv[2];
5341  argv[0] = rb_eFatal;
5342  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5343  debug_deadlock_check(vm, argv[1]);
5344  vm->sleeper--;
5345  rb_threadptr_raise(vm->main_thread, 2, argv);
5346  }
5347 }
5348 
5349 static void
5350 update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5351 {
5352  const rb_control_frame_t *cfp = GET_EC()->cfp;
5353  VALUE coverage = rb_iseq_coverage(cfp->iseq);
5354  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5355  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5356  if (lines) {
5357  long line = rb_sourceline() - 1;
5358  long count;
5359  VALUE num;
5360  void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5361  if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5363  rb_ary_push(lines, LONG2FIX(line + 1));
5364  return;
5365  }
5366  if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5367  return;
5368  }
5369  num = RARRAY_AREF(lines, line);
5370  if (!FIXNUM_P(num)) return;
5371  count = FIX2LONG(num) + 1;
5372  if (POSFIXABLE(count)) {
5373  RARRAY_ASET(lines, line, LONG2FIX(count));
5374  }
5375  }
5376  }
5377 }
5378 
5379 static void
5380 update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5381 {
5382  const rb_control_frame_t *cfp = GET_EC()->cfp;
5383  VALUE coverage = rb_iseq_coverage(cfp->iseq);
5384  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5385  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5386  if (branches) {
5387  long pc = cfp->pc - cfp->iseq->body->iseq_encoded - 1;
5389  VALUE counters = RARRAY_AREF(branches, 1);
5390  VALUE num = RARRAY_AREF(counters, idx);
5391  count = FIX2LONG(num) + 1;
5392  if (POSFIXABLE(count)) {
5393  RARRAY_ASET(counters, idx, LONG2FIX(count));
5394  }
5395  }
5396  }
5397 }
5398 
5399 const rb_method_entry_t *
5400 rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5401 {
5402  VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5403 
5404  retry:
5405  switch (me->def->type) {
5406  case VM_METHOD_TYPE_ISEQ: {
5407  const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5409  path = rb_iseq_path(iseq);
5410  beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5411  beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5412  end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5413  end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5414  break;
5415  }
5416  case VM_METHOD_TYPE_BMETHOD: {
5418  if (iseq) {
5419  rb_iseq_location_t *loc;
5420  rb_iseq_check(iseq);
5421  path = rb_iseq_path(iseq);
5422  loc = &iseq->body->location;
5423  beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5424  beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5425  end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5426  end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5427  break;
5428  }
5429  return NULL;
5430  }
5431  case VM_METHOD_TYPE_ALIAS:
5432  me = me->def->body.alias.original_me;
5433  goto retry;
5435  me = me->def->body.refined.orig_me;
5436  if (!me) return NULL;
5437  goto retry;
5438  default:
5439  return NULL;
5440  }
5441 
5442  /* found */
5443  if (RB_TYPE_P(path, T_ARRAY)) {
5444  path = rb_ary_entry(path, 1);
5445  if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5446  }
5447  if (resolved_location) {
5448  resolved_location[0] = path;
5449  resolved_location[1] = beg_pos_lineno;
5450  resolved_location[2] = beg_pos_column;
5451  resolved_location[3] = end_pos_lineno;
5452  resolved_location[4] = end_pos_column;
5453  }
5454  return me;
5455 }
5456 
5457 static void
5458 update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5459 {
5460  const rb_control_frame_t *cfp = GET_EC()->cfp;
5462  const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5463  VALUE rcount;
5464  long count;
5465 
5467  if (!me) return;
5468 
5469  rcount = rb_hash_aref(me2counter, (VALUE) me);
5470  count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5471  if (POSFIXABLE(count)) {
5472  rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5473  }
5474 }
5475 
5476 VALUE
5478 {
5479  return GET_VM()->coverages;
5480 }
5481 
5482 int
5484 {
5485  return GET_VM()->coverage_mode;
5486 }
5487 
5488 void
5489 rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5490 {
5491  GET_VM()->coverages = coverages;
5492  GET_VM()->coverage_mode = mode;
5494  if (mode & COVERAGE_TARGET_BRANCHES) {
5496  }
5497  if (mode & COVERAGE_TARGET_METHODS) {
5499  }
5500 }
5501 
5502 /* Make coverage arrays empty so old covered files are no longer tracked. */
5503 void
5505 {
5508  GET_VM()->coverages = Qfalse;
5509  rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5510  if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5511  rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5512  }
5513  if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5514  rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5515  }
5516 }
5517 
5518 VALUE
5520 {
5521  VALUE coverage = rb_ary_tmp_new_fill(3);
5522  VALUE lines = Qfalse, branches = Qfalse;
5523  int mode = GET_VM()->coverage_mode;
5524 
5525  if (mode & COVERAGE_TARGET_LINES) {
5526  lines = n > 0 ? rb_ary_tmp_new_fill(n) : rb_ary_tmp_new(0);
5527  }
5528  RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5529 
5530  if (mode & COVERAGE_TARGET_BRANCHES) {
5531  branches = rb_ary_tmp_new_fill(2);
5532  /* internal data structures for branch coverage:
5533  *
5534  * [[base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column,
5535  * target_type_1, target_first_lineno_1, target_first_column_1, target_last_lineno_1, target_last_column_1, target_counter_index_1,
5536  * target_type_2, target_first_lineno_2, target_first_column_2, target_last_lineno_2, target_last_column_2, target_counter_index_2, ...],
5537  * ...]
5538  *
5539  * Example: [[:case, 1, 0, 4, 3,
5540  * :when, 2, 8, 2, 9, 0,
5541  * :when, 3, 8, 3, 9, 1, ...],
5542  * ...]
5543  */
5544  RARRAY_ASET(branches, 0, rb_ary_tmp_new(0));
5545  /* branch execution counters */
5546  RARRAY_ASET(branches, 1, rb_ary_tmp_new(0));
5547  }
5548  RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5549 
5550  return coverage;
5551 }
5552 
5553 VALUE
5555 {
5556  VALUE interrupt_mask = rb_ident_hash_new();
5557  rb_thread_t *cur_th = GET_THREAD();
5558 
5559  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5560  OBJ_FREEZE_RAW(interrupt_mask);
5561  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5562 
5563  return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5564 }
idNULL
@ idNULL
Definition: id.h:113
rb_execution_context_struct::raised_flag
uint8_t raised_flag
Definition: vm_core.h:878
RB_GC_SAVE_MACHINE_CONTEXT
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:163
rb_thread_reset_timer_thread
void rb_thread_reset_timer_thread(void)
Definition: thread.c:4425
rb_intern
#define rb_intern(str)
rb_reset_random_seed
void rb_reset_random_seed(void)
Definition: random.c:1498
rb_nativethread_lock_unlock
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Definition: thread.c:451
ruby_stop
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:287
ID
unsigned long ID
Definition: ruby.h:103
rb_threadptr_interrupt
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:510
rb_to_hash_type
VALUE rb_to_hash_type(VALUE hash)
Definition: hash.c:1818
rb_str_concat
VALUE rb_str_concat(VALUE, VALUE)
Definition: string.c:3065
rb_define_class
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:649
sig
int sig
Definition: rb_mjit_min_header-2.7.0.h:10422
list_add
#define list_add(h, n)
Definition: rb_mjit_min_header-2.7.0.h:9004
fd_set
#define fd_set
Definition: rb_mjit_min_header-2.7.0.h:1271
void
void
Definition: rb_mjit_min_header-2.7.0.h:13273
rb_thread_shield_destroy
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4846
rb_method_bmethod_struct::proc
VALUE proc
Definition: method.h:152
rb_fd_select
int rb_fd_select(int, rb_fdset_t *, rb_fdset_t *, rb_fdset_t *, struct timeval *)
TypedData_Make_Struct
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1244
select_set
Definition: thread.c:3923
rb_fd_init_copy
#define rb_fd_init_copy(d, s)
Definition: intern.h:413
TRUE
#define TRUE
Definition: nkf.h:175
ruby_current_execution_context_ptr
rb_execution_context_t * ruby_current_execution_context_ptr
Definition: vm.c:373
rb_ec_error_print
void rb_ec_error_print(rb_execution_context_t *volatile ec, volatile VALUE errinfo)
Definition: eval_error.c:346
rb_memory_id
VALUE rb_memory_id(VALUE obj)
Definition: gc.c:3737
rb_ec_reset_raised
int rb_ec_reset_raised(rb_execution_context_t *ec)
Definition: thread.c:2354
rb_check_id
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
Definition: symbol.c:919
RB_HRTIME_PER_SEC
#define RB_HRTIME_PER_SEC
Definition: hrtime.h:37
rb_exc_new
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:959
exec_recursive_params::list
VALUE list
Definition: thread.c:4987
RUBY_VM_INTERRUPTED
#define RUBY_VM_INTERRUPTED(ec)
Definition: vm_core.h:1840
rb_str_new2
#define rb_str_new2
Definition: intern.h:903
ruby_snprintf
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Definition: sprintf.c:1031
handle_interrupt_timing
handle_interrupt_timing
Definition: thread.c:1771
rb_enc_name
#define rb_enc_name(enc)
Definition: encoding.h:177
rb_thread_alloc
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2756
THREAD_RUNNABLE
@ THREAD_RUNNABLE
Definition: vm_core.h:783
rb_thread_terminate_all
void rb_thread_terminate_all(void)
Definition: thread.c:559
select_args::write
VALUE write
Definition: io.c:9208
RUBY_VM_INTERRUPTED_ANY
#define RUBY_VM_INTERRUPTED_ANY(ec)
Definition: vm_core.h:1842
id
const int id
Definition: nkf.c:209
rb_thread_alone
int rb_thread_alone(void)
Definition: thread.c:3492
VM_METHOD_TYPE_REFINED
@ VM_METHOD_TYPE_REFINED
refinement
Definition: method.h:113
vsnprintf
int int vsnprintf(char *__restrict, size_t, const char *__restrict, __gnuc_va_list) __attribute__((__format__(__printf__
RB_PASS_CALLED_KEYWORDS
#define RB_PASS_CALLED_KEYWORDS
Definition: ruby.h:1980
FIX2INT
#define FIX2INT(x)
Definition: ruby.h:717
rb_thread_struct::invoke_type
enum rb_thread_struct::@190 invoke_type
RUBY_ASSERT_ALWAYS
#define RUBY_ASSERT_ALWAYS(expr)
Definition: assert.h:34
rb_iseq_struct
Definition: vm_core.h:456
join_arg
Definition: thread.c:972
rb_vm_invoke_proc
MJIT_FUNC_EXPORTED VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1249
rb_ident_hash_new
VALUE rb_ident_hash_new(void)
Definition: hash.c:4203
rb_hash_new
VALUE rb_hash_new(void)
Definition: hash.c:1501
rb_threadptr_unlock_all_locking_mutexes
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:542
rb_timespec_now
void rb_timespec_now(struct timespec *)
Definition: time.c:1873
path
VALUE path
Definition: rb_mjit_min_header-2.7.0.h:7351
howmany
#define howmany(x, y)
Definition: rb_mjit_min_header-2.7.0.h:11087
rb_vm_thread_backtrace_locations
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:966
rb_ary_tmp_new_fill
VALUE rb_ary_tmp_new_fill(long capa)
Definition: array.c:776
rb_warn
void rb_warn(const char *fmt,...)
Definition: error.c:313
memset
void * memset(void *, int, size_t)
rb_block_given_p
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:897
ruby_error_stream_closed
@ ruby_error_stream_closed
Definition: vm_core.h:511
rb_hrtime_now
rb_hrtime_t rb_hrtime_now(void)
Definition: thread.c:1229
gc.h
short
#define short
Definition: rb_mjit_min_header-2.7.0.h:2877
RBASIC_CLEAR_CLASS
#define RBASIC_CLEAR_CLASS(obj)
Definition: internal.h:1981
rb_execution_context_struct::local_storage
st_table * local_storage
Definition: vm_core.h:860
rb_thread_sleep_interruptible
void rb_thread_sleep_interruptible(void)
Definition: thread.c:1328
rb_clear_coverages
void rb_clear_coverages(void)
Definition: thread.c:4468
FD_SET
#define FD_SET(fd, set)
Definition: win32.h:614
EWOULDBLOCK
#define EWOULDBLOCK
Definition: rubysocket.h:134
rb_mutex_struct::th
rb_thread_t * th
Definition: thread_sync.c:45
rb_gc_force_recycle
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:7011
INT2FIX
#define INT2FIX(i)
Definition: ruby.h:263
rb_to_symbol
VALUE rb_to_symbol(VALUE name)
Definition: string.c:11156
st_is_member
#define st_is_member(table, key)
Definition: st.h:97
EINTR
#define EINTR
Definition: rb_mjit_min_header-2.7.0.h:10941
RUBY_EVENT_HOOK_FLAG_RAW_ARG
@ RUBY_EVENT_HOOK_FLAG_RAW_ARG
Definition: debug.h:100
PRIxVALUE
#define PRIxVALUE
Definition: ruby.h:164
select_args::except
VALUE except
Definition: io.c:9208
RB_FL_SET_RAW
#define RB_FL_SET_RAW(x, f)
Definition: ruby.h:1322
rb_thread_shield_new
VALUE rb_thread_shield_new(void)
Definition: thread.c:4790
time
time_t time(time_t *_timer)
rb_thread_kill
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2446
rb_threadptr_signal_exit
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2334
rb_empty_keyword_given_p
int rb_empty_keyword_given_p(void)
Definition: eval.c:918
EXEC_EVENT_HOOK
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1935
FD_CLR
#define FD_CLR(f, s)
Definition: win32.h:632
rb_throw_obj
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:2193
RB_WAITFD_PRI
#define RB_WAITFD_PRI
Definition: io.h:52
rb_thread_sleep_deadly
void rb_thread_sleep_deadly(void)
Definition: thread.c:1321
rb_hash_aref
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:1964
st_init_numtable
st_table * st_init_numtable(void)
Definition: st.c:653
rb_exec_recursive_paired
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:5086
COVERAGE_INDEX_BRANCHES
#define COVERAGE_INDEX_BRANCHES
Definition: internal.h:2212
RB_WAITFD_IN
#define RB_WAITFD_IN
Definition: io.h:51
rb_thread_wakeup
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2540
VALUE
unsigned long VALUE
Definition: ruby.h:102
select_set::max
int max
Definition: thread.c:3924
GET_VM
#define GET_VM()
Definition: vm_core.h:1764
rb_eArgError
VALUE rb_eArgError
Definition: error.c:923
rb_iseq_location_struct::code_location
rb_code_location_t code_location
Definition: vm_core.h:278
list_del
#define list_del(n)
Definition: rb_mjit_min_header-2.7.0.h:9041
select_set::orig_rset
rb_fdset_t orig_rset
Definition: thread.c:3930
st_delete
int st_delete(st_table *tab, st_data_t *key, st_data_t *value)
Definition: st.c:1418
rb_thread_struct::invoke_arg
union rb_thread_struct::@189 invoke_arg
rb_thread_local_aref
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:3216
RB_TYPE_P
#define RB_TYPE_P(obj, type)
Definition: ruby.h:560
POSTPONED_JOB_INTERRUPT_MASK
@ POSTPONED_JOB_INTERRUPT_MASK
Definition: vm_core.h:1832
rb_callable_method_entry_struct
Definition: method.h:59
TAG_FATAL
#define TAG_FATAL
Definition: vm_core.h:205
rb_cModule
RUBY_EXTERN VALUE rb_cModule
Definition: ruby.h:2034
rb_thread_struct::proc
VALUE proc
Definition: vm_core.h:965
rb_thread_execute_interrupts
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:2284
rb_reset_coverages
void rb_reset_coverages(void)
Definition: thread.c:5504
rb_enc_get
rb_encoding * rb_enc_get(VALUE obj)
Definition: encoding.c:872
rb_enc_asciicompat
#define rb_enc_asciicompat(enc)
Definition: encoding.h:245
rb_obj_call_init_kw
void rb_obj_call_init_kw(VALUE obj, int argc, const VALUE *argv, int kw_splat)
Definition: eval.c:1687
OBJ_ID_EQL
#define OBJ_ID_EQL(obj_id, other)
rb_gc_set_stack_end
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:4306
rb_hrtime_t
uint64_t rb_hrtime_t
Definition: hrtime.h:47
rb_method_definition_struct::type
rb_method_type_t type
Definition: rb_mjit_min_header-2.7.0.h:8877
rb_thread_struct::func
VALUE(* func)(void *)
Definition: vm_core.h:970
int
__inline__ int
Definition: rb_mjit_min_header-2.7.0.h:2839
PENDING_INTERRUPT_MASK
@ PENDING_INTERRUPT_MASK
Definition: vm_core.h:1831
old
VALUE ID VALUE old
Definition: rb_mjit_min_header-2.7.0.h:16133
rb_mutex_struct::next_mutex
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:46
join_arg::limit
rb_hrtime_t * limit
Definition: thread.c:974
rb_thread_struct::thread_id
rb_nativethread_id_t thread_id
Definition: vm_core.h:927
rb_sigwait_fd_migrate
void rb_sigwait_fd_migrate(rb_vm_t *)
Definition: process.c:998
rb_vm_struct::default_params
struct rb_vm_struct::@185 default_params
rb_iseq_path
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
EC_JUMP_TAG
#define EC_JUMP_TAG(ec, st)
Definition: eval_intern.h:184
FD_ISSET
#define FD_ISSET(f, s)
Definition: win32.h:635
UINT2NUM
#define UINT2NUM(x)
Definition: ruby.h:1610
rb_thread_struct::to_kill
unsigned int to_kill
Definition: vm_core.h:933
uint64_t
unsigned long long uint64_t
Definition: sha2.h:102
rb_eSignal
VALUE rb_eSignal
Definition: error.c:917
rb_vm_struct::living_thread_num
int living_thread_num
Definition: vm_core.h:597
rb_frame_last_func
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition: eval.c:1238
NOINLINE
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start))
rb_iseq_constant_body::location
rb_iseq_location_t location
Definition: vm_core.h:399
waiting_fd::th
rb_thread_t * th
Definition: thread.c:138
RB_PASS_EMPTY_KEYWORDS
#define RB_PASS_EMPTY_KEYWORDS
Definition: ruby.h:1979
rb_code_position_struct::lineno
int lineno
Definition: node.h:131
exec_recursive_params::obj
VALUE obj
Definition: thread.c:4988
rb_proc_location
VALUE rb_proc_location(VALUE self)
Definition: proc.c:1256
rb_thread_stop
VALUE rb_thread_stop(void)
Definition: thread.c:2596
HAVE_VA_ARGS_MACRO
#define HAVE_VA_ARGS_MACRO
Definition: rb_mjit_min_header-2.7.0.h:156
SLEEP_SPURIOUS_CHECK
@ SLEEP_SPURIOUS_CHECK
Definition: thread.c:103
rb_method_definition_struct::refined
rb_method_refined_t refined
Definition: method.h:173
rb_hash_lookup2
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Definition: hash.c:1977
rb_thread_interrupted
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1379
rb_vm_ep_local_ep
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:75
rb_thread_atfork_before_exec
void rb_thread_atfork_before_exec(void)
Definition: thread.c:4553
rb_method_refined_struct::orig_me
struct rb_method_entry_struct * orig_me
Definition: method.h:147
Qundef
#define Qundef
Definition: ruby.h:470
THROW_DATA_P
#define THROW_DATA_P(err)
Definition: internal.h:1201
thread_debug
#define thread_debug
Definition: thread.c:330
rb_define_singleton_method
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1755
rb_signal_buff_size
int rb_signal_buff_size(void)
Definition: signal.c:726
BUSY_WAIT_SIGNALS
#define BUSY_WAIT_SIGNALS
Definition: thread.c:379
rb_thread_call_without_gvl
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1588
rb_make_exception
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
Definition: eval.c:850
EXIT_FAILURE
#define EXIT_FAILURE
Definition: eval_intern.h:32
rb_define_method
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1551
GET_EC
#define GET_EC()
Definition: vm_core.h:1766
RUBY_VM_CHECK_INTS
#define RUBY_VM_CHECK_INTS(ec)
Definition: vm_core.h:1862
INT2NUM
#define INT2NUM(x)
Definition: ruby.h:1609
select
int select(int __n, _types_fd_set *__readfds, _types_fd_set *__writefds, _types_fd_set *__exceptfds, struct timeval *__timeout)
rb_eIOError
RUBY_EXTERN VALUE rb_eIOError
Definition: ruby.h:2064
rb_ary_shift
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:1294
ptr
struct RIMemo * ptr
Definition: debug.c:74
rb_method_iseq_struct::iseqptr
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:127
Qfalse
#define Qfalse
Definition: ruby.h:467
THREAD_KILLED
@ THREAD_KILLED
Definition: vm_core.h:786
INTERRUPT_NEVER
@ INTERRUPT_NEVER
Definition: thread.c:1775
BUFSIZ
#define BUFSIZ
Definition: rb_mjit_min_header-2.7.0.h:1474
ssize_t
_ssize_t ssize_t
Definition: rb_mjit_min_header-2.7.0.h:1329
select_args::tv
struct timeval * tv
Definition: thread.c:4237
RARRAY_ASET
#define RARRAY_ASET(a, i, v)
Definition: ruby.h:1102
rb_nogvl
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Definition: thread.c:1452
rb_vm_struct::waiting_fds
struct list_head waiting_fds
Definition: vm_core.h:594
rb_get_coverage_mode
int rb_get_coverage_mode(void)
Definition: thread.c:5483
THREAD_STOPPED
@ THREAD_STOPPED
Definition: vm_core.h:784
NULL
#define NULL
Definition: _sdbm.c:101
exit
void exit(int __status) __attribute__((__noreturn__))
ISEQ_PC2BRANCHINDEX
#define ISEQ_PC2BRANCHINDEX(iseq)
Definition: iseq.h:39
rb_thread_io_blocking_region
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1595
uint32_t
unsigned int uint32_t
Definition: sha2.h:101
rb_fdset_t::fdset
_types_fd_set * fdset
Definition: rb_mjit_min_header-2.7.0.h:5707
select_args::fd
int fd
Definition: thread.c:4230
PRIsVALUE
#define PRIsVALUE
Definition: ruby.h:166
select_args::read
VALUE read
Definition: io.c:9208
RUBY_EVENT_CALL
#define RUBY_EVENT_CALL
Definition: ruby.h:2245
hrtime.h
st_insert
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
FIX2LONG
#define FIX2LONG(x)
Definition: ruby.h:394
ID2SYM
#define ID2SYM(x)
Definition: ruby.h:414
rb_set_coverages
void rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
Definition: thread.c:5489
rb_method_definition_struct::bmethod
rb_method_bmethod_t bmethod
Definition: method.h:174
rb_thread_struct::ec
rb_execution_context_t * ec
Definition: vm_core.h:915
rb_blocking_function_t
VALUE rb_blocking_function_t(void *)
Definition: intern.h:941
T_OBJECT
#define T_OBJECT
Definition: ruby.h:523
Init_Thread
void Init_Thread(void)
Definition: thread.c:5162
rb_ary_pop
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:1241
waiting_fd::wfd_node
struct list_node wfd_node
Definition: thread.c:137
VM_ASSERT
#define VM_ASSERT(expr)
Definition: vm_core.h:56
rb_thread_wait_for
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1347
rb_define_alias
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1800
rb_thread_fd_close
void rb_thread_fd_close(int fd)
Definition: thread.c:2386
rb_thread_struct::abort_on_exception
unsigned int abort_on_exception
Definition: vm_core.h:934
RB_BLOCK_CALL_FUNC_ARGLIST
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Definition: ruby.h:1964
rb_iseq_coverage
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
Definition: iseq.c:1086
rb_check_arity
#define rb_check_arity
Definition: intern.h:347
rb_thread_atfork
void rb_thread_atfork(void)
Definition: thread.c:4548
pc
rb_control_frame_t const VALUE * pc
Definition: rb_mjit_min_header-2.7.0.h:16923
VM_BLOCK_HANDLER_NONE
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1291
RARRAY_LENINT
#define RARRAY_LENINT(ary)
Definition: ruby.h:1071
v
int VALUE v
Definition: rb_mjit_min_header-2.7.0.h:12332
rb_mutex_lock
VALUE rb_mutex_lock(VALUE mutex)
Definition: thread_sync.c:333
rb_thread_struct::priority
int8_t priority
Definition: vm_core.h:937
rb_vm_register_special_exception
#define rb_vm_register_special_exception(sp, e, m)
Definition: vm_core.h:1726
ruby_thread_init_stack
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:647
rb_exit
void rb_exit(int status)
Definition: process.c:4190
rb_fdset_t
Definition: rb_mjit_min_header-2.7.0.h:5705
RUBY_TYPED_DEFAULT_FREE
#define RUBY_TYPED_DEFAULT_FREE
Definition: ruby.h:1203
rb_hash_delete_entry
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
Definition: hash.c:2253
rb_raise
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2669
thread_win32.c
RCLASS_ORIGIN
#define RCLASS_ORIGIN(c)
Definition: internal.h:1075
rb_sourceline
int rb_sourceline(void)
Definition: vm.c:1346
rb_ary_entry
VALUE rb_ary_entry(VALUE ary, long offset)
Definition: array.c:1512
rb_exec_recursive
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:5075
rb_ivar_get
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1070
rb_execution_context_struct::cfp
rb_control_frame_t * cfp
Definition: vm_core.h:847
select_set::wset
rb_fdset_t * wset
Definition: thread.c:3928
rb_get_coverages
VALUE rb_get_coverages(void)
Definition: thread.c:5477
rb_thread_start_timer_thread
void rb_thread_start_timer_thread(void)
Definition: thread.c:4431
if
if((ID)(DISPID) nameid !=nameid)
Definition: win32ole.c:357
timev.h
EAGAIN
#define EAGAIN
Definition: rb_mjit_min_header-2.7.0.h:10948
obj
const VALUE VALUE obj
Definition: rb_mjit_min_header-2.7.0.h:5742
rb_xrealloc_mul_add
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
Definition: gc.c:10163
rb_obj_class
VALUE rb_obj_class(VALUE)
Equivalent to Object#class in Ruby.
Definition: object.c:217
read
_ssize_t read(int __fd, void *__buf, size_t __nbyte)
rb_method_definition_struct::body
union rb_method_definition_struct::@118 body
select_args::wfd
struct waiting_fd wfd
Definition: thread.c:4236
THREAD_STOPPED_FOREVER
@ THREAD_STOPPED_FOREVER
Definition: vm_core.h:785
PRIuSIZE
#define PRIuSIZE
Definition: ruby.h:208
PRI_THREAD_ID
#define PRI_THREAD_ID
Definition: thread.c:337
T_ICLASS
#define T_ICLASS
Definition: ruby.h:525
memcpy
void * memcpy(void *__restrict, const void *__restrict, size_t)
rb_threadptr_check_signal
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4318
rb_fd_term
void rb_fd_term(rb_fdset_t *)
rb_vm_proc_local_ep
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:653
rb_thread_fd_writable
int rb_thread_fd_writable(int fd)
Definition: thread.c:4048
RUBY_INTERNAL_EVENT_SWITCH
#define RUBY_INTERNAL_EVENT_SWITCH
Definition: ruby.h:2265
strerror
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
rb_resolve_me_location
const rb_method_entry_t * rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
Definition: thread.c:5400
rb_iseq_location_struct
Definition: vm_core.h:272
do_select_update
#define do_select_update()
RUBY_EVENT_COVERAGE_LINE
#define RUBY_EVENT_COVERAGE_LINE
Definition: vm_core.h:1956
DATA_PTR
#define DATA_PTR(dta)
Definition: ruby.h:1175
fprintf
int fprintf(FILE *__restrict, const char *__restrict,...) __attribute__((__format__(__printf__
select_args::as
union select_args::@160 as
FD_SETSIZE
#define FD_SETSIZE
Definition: rb_mjit_min_header-2.7.0.h:1264
LIKELY
#define LIKELY(x)
Definition: ffi_common.h:125
__pthread_mutex_t
Definition: rb_mjit_min_header-2.7.0.h:1346
EC_POP_TAG
#define EC_POP_TAG()
Definition: eval_intern.h:137
rb_vm_struct::sleeper
int sleeper
Definition: vm_core.h:607
rb_thread_call_without_gvl2
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1581
rb_encoding
const typedef OnigEncodingType rb_encoding
Definition: encoding.h:115
rb_fdset_t::maxfd
int maxfd
Definition: rb_mjit_min_header-2.7.0.h:5706
select_args
Definition: io.c:9207
POSFIXABLE
#define POSFIXABLE(f)
Definition: ruby.h:397
rb_method_definition_struct::alias
rb_method_alias_t alias
Definition: method.h:172
RUBY_THREAD_PRIORITY_MAX
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:87
PRIu64
#define PRIu64
Definition: thread.c:1272
rb_mutex_struct::waitq
struct list_head waitq
Definition: thread_sync.c:47
rb_vm_struct::main_thread
struct rb_thread_struct * main_thread
Definition: vm_core.h:581
COVERAGE_TARGET_BRANCHES
#define COVERAGE_TARGET_BRANCHES
Definition: internal.h:2214
rb_add_event_hook2
void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flag)
Definition: vm_trace.c:170
rb_postponed_job_flush
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1656
GetThreadShieldPtr
#define GetThreadShieldPtr(obj)
Definition: thread.c:4757
iseq.h
list_head
Definition: rb_mjit_min_header-2.7.0.h:8975
rb_thread_struct::blocking_region_buffer
void * blocking_region_buffer
Definition: vm_core.h:941
thread_id_str
#define thread_id_str(th)
Definition: thread.c:336
rb_thread_struct::status
enum rb_thread_status status
Definition: rb_mjit_min_header-2.7.0.h:9892
rb_ary_tmp_new
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:768
rb_nativethread_lock_initialize
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Definition: thread.c:433
COVERAGE_TARGET_ONESHOT_LINES
#define COVERAGE_TARGET_ONESHOT_LINES
Definition: internal.h:2216
i
uint32_t i
Definition: rb_mjit_min_header-2.7.0.h:5464
list_for_each
#define list_for_each(h, i, member)
Definition: rb_mjit_min_header-2.7.0.h:9094
THREAD_LOCAL_STORAGE_INITIALISED
#define THREAD_LOCAL_STORAGE_INITIALISED
Definition: thread.c:106
rb_thread_struct::thgroup
VALUE thgroup
Definition: vm_core.h:943
rb_thread_create
VALUE rb_thread_create(VALUE(*fn)(void *), void *arg)
Definition: thread.c:966
RB_WAITFD_OUT
#define RB_WAITFD_OUT
Definition: io.h:53
idLocals
@ idLocals
Definition: rb_mjit_min_header-2.7.0.h:8730
RHASH_EMPTY_P
#define RHASH_EMPTY_P(h)
Definition: ruby.h:1131
rb_trace_arg_struct
Definition: vm_core.h:1874
select_set::orig_wset
rb_fdset_t orig_wset
Definition: thread.c:3931
waiting_fd
Definition: thread.c:136
sym
#define sym(x)
Definition: date_core.c:3716
mask
enum @11::@13::@14 mask
rb_fd_clr
void rb_fd_clr(int, rb_fdset_t *)
rb_to_id
ID rb_to_id(VALUE)
Definition: string.c:11146
rb_execution_context_struct::errinfo
VALUE errinfo
Definition: vm_core.h:875
rb_execution_context_struct::interrupt_flag
rb_atomic_t interrupt_flag
Definition: vm_core.h:853
rb_thread_schedule
void rb_thread_schedule(void)
Definition: thread.c:1408
rb_thread_group
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2966
rb_unblock_callback
Definition: vm_core.h:816
mjit.h
alloca
#define alloca(size)
Definition: rb_mjit_min_header-2.7.0.h:2487
rb_hash_lookup
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Definition: hash.c:1990
rb_ary_push
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1195
rb_code_position_struct::column
int column
Definition: node.h:132
COMPILER_WARNING_POP
#define COMPILER_WARNING_POP
Definition: internal.h:2665
VM_METHOD_TYPE_BMETHOD
@ VM_METHOD_TYPE_BMETHOD
Definition: method.h:106
EC_EXEC_TAG
#define EC_EXEC_TAG()
Definition: eval_intern.h:181
rb_vm_struct::special_exceptions
const VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:611
va_end
#define va_end(v)
Definition: rb_mjit_min_header-2.7.0.h:3979
RUBY_VM_SET_TIMER_INTERRUPT
#define RUBY_VM_SET_TIMER_INTERRUPT(ec)
Definition: vm_core.h:1836
rb_method_definition_struct::iseq
rb_method_iseq_t iseq
Definition: method.h:169
list_empty
#define list_empty(h)
Definition: rb_mjit_min_header-2.7.0.h:9030
rb_mutex_unlock
VALUE rb_mutex_unlock(VALUE mutex)
Definition: thread_sync.c:403
TypedData_Wrap_Struct
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1231
vm_core.h
stderr
#define stderr
Definition: rb_mjit_min_header-2.7.0.h:1485
RUBY_UBF_IO
#define RUBY_UBF_IO
Definition: intern.h:945
rb_sys_fail
void rb_sys_fail(const char *mesg)
Definition: error.c:2793
BLOCKING_REGION
#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:188
rb_eTypeError
VALUE rb_eTypeError
Definition: error.c:922
rb_thread_shield_release
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4835
rb_iseq_remove_coverage_all
void rb_iseq_remove_coverage_all(void)
Definition: iseq.c:1110
RBASIC_CLASS
#define RBASIC_CLASS(obj)
Definition: ruby.h:906
COVERAGE_TARGET_METHODS
#define COVERAGE_TARGET_METHODS
Definition: internal.h:2215
rb_blocking_region_buffer::prev_status
enum rb_thread_status prev_status
Definition: thread.c:153
rb_execution_context_struct::vm_stack
VALUE * vm_stack
Definition: vm_core.h:845
ALLOC
#define ALLOC(type)
Definition: ruby.h:1664
rb_vm_struct::thread_vm_stack_size
size_t thread_vm_stack_size
Definition: vm_core.h:663
rb_method_alias_struct::original_me
struct rb_method_entry_struct * original_me
Definition: method.h:143
EXIT_SUCCESS
#define EXIT_SUCCESS
Definition: error.c:39
rb_thread_main
VALUE rb_thread_main(void)
Definition: thread.c:2697
THREAD_SHIELD_WAITING_MASK
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4758
rb_eRuntimeError
VALUE rb_eRuntimeError
Definition: error.c:920
exec_recursive_params
Definition: thread.c:4985
rb_execution_context_struct::stack_start
VALUE * stack_start
Definition: vm_core.h:887
ALLOCA_N
#define ALLOCA_N(type, n)
Definition: ruby.h:1684
mod
#define mod(x, y)
Definition: date_strftime.c:28
RARRAY_AREF
#define RARRAY_AREF(a, i)
Definition: ruby.h:1101
rb_control_frame_struct
Definition: vm_core.h:760
rb_async_bug_errno
void rb_async_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:688
rb_unblock_function_t
void rb_unblock_function_t(void *)
Definition: intern.h:940
rb_eThreadError
VALUE rb_eThreadError
Definition: eval.c:923
size
int size
Definition: encoding.c:58
strlcpy
RUBY_EXTERN size_t strlcpy(char *, const char *, size_t)
Definition: strlcpy.c:29
rb_mutex_struct
Definition: thread_sync.c:44
EBADF
#define EBADF
Definition: rb_mjit_min_header-2.7.0.h:10946
rb_native_mutex_unlock
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
FALSE
#define FALSE
Definition: nkf.h:174
FIXNUM_P
#define FIXNUM_P(f)
Definition: ruby.h:396
RCLASS_SUPER
#define RCLASS_SUPER(c)
Definition: classext.h:16
rb_thread_sleep_forever
void rb_thread_sleep_forever(void)
Definition: thread.c:1314
rb_fd_resize
#define rb_fd_resize(n, f)
Definition: intern.h:410
COVERAGE_INDEX_LINES
#define COVERAGE_INDEX_LINES
Definition: internal.h:2211
rb_frozen_error_raise
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Definition: error.c:2980
rb_hash_foreach
void rb_hash_foreach(VALUE hash, rb_foreach_func *func, VALUE farg)
Definition: hash.c:1461
list
struct rb_encoding_entry * list
Definition: encoding.c:56
MEMZERO
#define MEMZERO(p, type, n)
Definition: ruby.h:1752
ruby_sigchld_handler
void ruby_sigchld_handler(rb_vm_t *)
Definition: signal.c:1073
STATIC_ASSERT
STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX<=UINT_MAX)
rb_str_new_frozen
VALUE rb_str_new_frozen(VALUE)
Definition: string.c:1203
thgroup
Definition: thread.c:4558
rb_str_new_cstr
#define rb_str_new_cstr(str)
Definition: rb_mjit_min_header-2.7.0.h:6117
rb_vm_struct::thread_abort_on_exception
unsigned int thread_abort_on_exception
Definition: vm_core.h:603
rb_time_timeval
struct timeval rb_time_timeval(VALUE time)
Definition: time.c:2675
thgroup::group
VALUE group
Definition: thread.c:4560
rb_event_hook_func_t
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Definition: ruby.h:2279
rb_default_coverage
VALUE rb_default_coverage(int n)
Definition: thread.c:5519
rb_thread_wakeup_timer_thread
void rb_thread_wakeup_timer_thread(int)
rb_fd_copy
void rb_fd_copy(rb_fdset_t *, const _types_fd_set *, int)
me
const rb_callable_method_entry_t * me
Definition: rb_mjit_min_header-2.7.0.h:13226
rb_execution_context_struct::stack_maxsize
size_t stack_maxsize
Definition: vm_core.h:889
StringValueCStr
#define StringValueCStr(v)
Definition: ruby.h:604
USE_EVENTFD
#define USE_EVENTFD
Definition: thread.c:383
GetProcPtr
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:1046
COMPILER_WARNING_PUSH
#define COMPILER_WARNING_PUSH
Definition: internal.h:2664
rb_thread_struct::locking_mutex
VALUE locking_mutex
Definition: vm_core.h:958
key
key
Definition: openssl_missing.h:181
T_HASH
#define T_HASH
Definition: ruby.h:531
rb_fd_dup
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
thread_sync.c
rb_cThread
RUBY_EXTERN VALUE rb_cThread
Definition: ruby.h:2047
rb_native_mutex_lock
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
RARRAY_CONST_PTR
#define RARRAY_CONST_PTR(a)
Definition: ruby.h:1072
rb_thread_fd_select
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:4067
rb_ary_clear
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3862
exec_recursive_params::arg
VALUE arg
Definition: thread.c:4990
CLASS_OF
#define CLASS_OF(v)
Definition: ruby.h:484
ACCESS_ONCE
#define ACCESS_ONCE(type, x)
Definition: internal.h:223
src
__inline__ const void *__restrict src
Definition: rb_mjit_min_header-2.7.0.h:2836
rb_ec_initialize_vm_stack
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2683
fmt
const VALUE int int int int int int VALUE char * fmt
Definition: rb_mjit_min_header-2.7.0.h:6462
rb_w32_get_osfhandle
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:1078
rb_threadptr_execute_interrupts
MJIT_FUNC_EXPORTED int rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:2194
RUBY_UBF_PROCESS
#define RUBY_UBF_PROCESS
Definition: intern.h:946
waiting_fd::fd
int fd
Definition: thread.c:139
rb_thread_struct::name
VALUE name
Definition: vm_core.h:989
rb_vm_frame_method_entry
const MJIT_STATIC rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
Definition: rb_mjit_min_header-2.7.0.h:12521
RARRAY_LEN
#define RARRAY_LEN(a)
Definition: ruby.h:1070
rb_thread_struct::vm
rb_vm_t * vm
Definition: vm_core.h:913
st_foreach
int st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
Definition: st.c:1718
CLOCK_MONOTONIC
#define CLOCK_MONOTONIC
Definition: win32.h:134
rb_thread_struct::join_list
rb_thread_list_t * join_list
Definition: vm_core.h:961
char
#define char
Definition: rb_mjit_min_header-2.7.0.h:2876
rb_execution_context_struct::machine
struct rb_execution_context_struct::@188 machine
rb_cObject
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:2010
rb_event_flag_t
uint32_t rb_event_flag_t
Definition: ruby.h:2278
rb_fd_zero
void rb_fd_zero(rb_fdset_t *)
TIMER_INTERRUPT_MASK
@ TIMER_INTERRUPT_MASK
Definition: vm_core.h:1830
THREAD_SHIELD_WAITING_SHIFT
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4759
n
const char size_t n
Definition: rb_mjit_min_header-2.7.0.h:5456
buf
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4322
rb_str_cat_cstr
#define rb_str_cat_cstr(str, ptr)
Definition: rb_mjit_min_header-2.7.0.h:6126
rb_threadptr_pending_interrupt_clear
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1751
mjit_child_after_fork
void mjit_child_after_fork(void)
rb_exc_raise
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:667
thread_pthread.c
TypedData_Get_Struct
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1252
join_arg::waiting
rb_thread_t * waiting
Definition: thread.c:973
eKillSignal
#define eKillSignal
Definition: thread.c:132
select_set::rset
rb_fdset_t * rset
Definition: thread.c:3927
rb_bug
void rb_bug(const char *fmt,...)
Definition: error.c:634
rb_control_frame_struct::self
VALUE self
Definition: vm_core.h:764
RUBY_EVENT_THREAD_BEGIN
#define RUBY_EVENT_THREAD_BEGIN
Definition: ruby.h:2255
select_args::error
int error
Definition: thread.c:4231
internal.h
select_args::read
rb_fdset_t * read
Definition: thread.c:4233
T_ARRAY
#define T_ARRAY
Definition: ruby.h:530
arg
VALUE arg
Definition: rb_mjit_min_header-2.7.0.h:5601
THREAD_LOCAL_STORAGE_INITIALISED_P
#define THREAD_LOCAL_STORAGE_INITIALISED_P(th)
Definition: thread.c:107
rb_native_mutex_destroy
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
argv
char ** argv
Definition: ruby.c:223
rb_execution_context_struct::fiber_ptr
rb_fiber_t * fiber_ptr
Definition: vm_core.h:856
rb_write_error_str
RUBY_EXTERN void rb_write_error_str(VALUE mesg)
Definition: io.c:7936
fd_mask
unsigned long fd_mask
Definition: rb_mjit_min_header-2.7.0.h:1265
ST_CONTINUE
@ ST_CONTINUE
Definition: st.h:99
rb_iseq_constant_body::iseq_encoded
VALUE * iseq_encoded
Definition: vm_core.h:325
NUM2TIMET
#define NUM2TIMET(v)
Definition: rb_mjit_min_header-2.7.0.h:120
select_set::orig_eset
rb_fdset_t orig_eset
Definition: thread.c:3932
select_args::except
rb_fdset_t * except
Definition: thread.c:4235
xmalloc
#define xmalloc
Definition: defines.h:211
xrealloc
#define xrealloc
Definition: defines.h:214
thread_native.h
select_set::timeout
struct timeval * timeout
Definition: thread.c:3933
rb_remove_event_hook
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:262
rb_sprintf
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1197
klass
VALUE klass
Definition: rb_mjit_min_header-2.7.0.h:13254
rb_thread_local_aset
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:3364
THREAD_SHIELD_WAITING_MAX
#define THREAD_SHIELD_WAITING_MAX
Definition: thread.c:4760
st_data_t
unsigned long st_data_t
Definition: rb_mjit_min_header-2.7.0.h:5363
rb_ary_join
VALUE rb_ary_join(VALUE ary, VALUE sep)
Definition: array.c:2347
rb_fiber_close
void rb_fiber_close(rb_fiber_t *fiber)
Definition: cont.c:2057
threadptr_initialized
#define threadptr_initialized(th)
Definition: thread.c:873
timeval
Definition: missing.h:53
RUBY_EVENT_HOOK_FLAG_SAFE
@ RUBY_EVENT_HOOK_FLAG_SAFE
Definition: debug.h:98
rb_thread_sleep
void rb_thread_sleep(int sec)
Definition: thread.c:1385
rb_thread_struct::unblock
struct rb_unblock_callback unblock
Definition: vm_core.h:957
rb_obj_alloc
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
Definition: object.c:1895
str
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
GET_THREAD
#define GET_THREAD()
Definition: vm_core.h:1765
rb_ary_delete_at
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:3419
rb_thread_struct::pending_interrupt_mask_stack
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:953
rb_thread_struct::value
VALUE value
Definition: vm_core.h:944
rb_fd_init
void rb_fd_init(rb_fdset_t *)
rb_thread_stop_timer_thread
void rb_thread_stop_timer_thread(void)
Definition: thread.c:4417
rb_control_frame_struct::iseq
const rb_iseq_t * iseq
Definition: vm_core.h:763
RUBY_TYPED_FREE_IMMEDIATELY
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1207
ruby_native_thread_p
int ruby_native_thread_p(void)
Definition: thread.c:5277
rb_thread_call_with_gvl
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1662
MEMCPY
#define MEMCPY(p1, p2, type, n)
Definition: ruby.h:1753
FD_ZERO
#define FD_ZERO(p)
Definition: rb_mjit_min_header-2.7.0.h:1275
INTERRUPT_IMMEDIATE
@ INTERRUPT_IMMEDIATE
Definition: thread.c:1773
rb_hash_aset
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:2779
rb_vm_struct::living_threads
struct list_head living_threads
Definition: vm_core.h:595
rb_iseq_clear_event_flags
void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset)
Definition: iseq.c:1786
select_set::th
rb_thread_t * th
Definition: thread.c:3926
RUBY_EVENT_THREAD_END
#define RUBY_EVENT_THREAD_END
Definition: ruby.h:2256
clock_gettime
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4612
rb_ec_backtrace_str_ary
VALUE rb_ec_backtrace_str_ary(const rb_execution_context_t *ec, long lev, long n)
Definition: vm_backtrace.c:714
NIL_P
#define NIL_P(v)
Definition: ruby.h:482
OBJ_FREEZE_RAW
#define OBJ_FREEZE_RAW(x)
Definition: ruby.h:1376
TAG_NONE
#define TAG_NONE
Definition: vm_core.h:197
rb_sigwait_fd_get
int rb_sigwait_fd_get(const rb_thread_t *)
STACK_DIR_UPPER
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:99
io.h
rb_execution_context_struct::interrupt_mask
rb_atomic_t interrupt_mask
Definition: vm_core.h:854
argc
int argc
Definition: ruby.c:222
VM_METHOD_TYPE_ISEQ
@ VM_METHOD_TYPE_ISEQ
Ruby method.
Definition: method.h:102
select_set::eset
rb_fdset_t * eset
Definition: thread.c:3929
rb_num2dbl
double rb_num2dbl(VALUE)
Converts a Numeric object to double.
Definition: object.c:3616
RB_NOGVL_UBF_ASYNC_SAFE
#define RB_NOGVL_UBF_ASYNC_SAFE
Definition: thread.h:26
rb_code_location_struct::beg_pos
rb_code_position_t beg_pos
Definition: node.h:136
rb_fd_isset
int rb_fd_isset(int, const rb_fdset_t *)
list_node
Definition: rb_mjit_min_header-2.7.0.h:8971
rb_define_const
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2880
ruby_digitmap
const char ruby_digitmap[]
Definition: bignum.c:38
ruby_debug
#define ruby_debug
Definition: ruby.h:1926
err
int err
Definition: win32.c:135
rb_thread_list_struct::th
struct rb_thread_struct * th
Definition: vm_core.h:825
rb_adjust_argv_kw_splat
VALUE rb_adjust_argv_kw_splat(int *, const VALUE **, int *)
Definition: vm_eval.c:237
rb_get_next_signal
int rb_get_next_signal(void)
Definition: signal.c:756
rb_signal_exec
int rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:1082
rb_data_type_struct
Definition: ruby.h:1148
BUILTIN_TYPE
#define BUILTIN_TYPE(x)
Definition: ruby.h:551
rb_thread_struct::report_on_exception
unsigned int report_on_exception
Definition: vm_core.h:935
rb_vm_struct
Definition: vm_core.h:576
xfree
#define xfree
Definition: defines.h:216
rb_threadptr_signal_raise
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2324
RBASIC
#define RBASIC(obj)
Definition: ruby.h:1267
rb_exec_recursive_outer
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:5098
rb_thread_struct::running_time_us
uint32_t running_time_us
Definition: vm_core.h:938
COMPILER_WARNING_IGNORED
#define COMPILER_WARNING_IGNORED(flag)
Definition: internal.h:2667
rb_proc_get_iseq
const rb_iseq_t * rb_proc_get_iseq(VALUE proc, int *is_proc)
Definition: proc.c:1194
int8_t
__int8_t int8_t
Definition: rb_mjit_min_header-2.7.0.h:1165
rb_gc_mark
void rb_gc_mark(VALUE ptr)
Definition: gc.c:5212
thgroup::enclosed
int enclosed
Definition: thread.c:4559
rb_eFatal
VALUE rb_eFatal
Definition: error.c:918
MJIT_FUNC_EXPORTED
#define MJIT_FUNC_EXPORTED
Definition: defines.h:396
rb_thread_to_be_killed
int rb_thread_to_be_killed(VALUE thread)
Definition: thread.c:2472
rb_vm_thread_backtrace
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:960
_
#define _(args)
Definition: dln.h:28
__pthread_t
Definition: rb_mjit_min_header-2.7.0.h:1345
EC_PUSH_TAG
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
rb_notify_fd_close
int rb_notify_fd_close(int fd, struct list_head *busy)
Definition: thread.c:2364
count
int count
Definition: encoding.c:57
rb_vm_struct::thgroup_default
VALUE thgroup_default
Definition: vm_core.h:596
join_arg::target
rb_thread_t * target
Definition: thread.c:973
rb_callable_method_entry_struct::def
struct rb_method_definition_struct *const def
Definition: method.h:62
rb_ec_clear_current_thread_trace_func
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
Definition: vm_trace.c:274
Qtrue
#define Qtrue
Definition: ruby.h:468
rb_str_catf
VALUE rb_str_catf(VALUE str, const char *format,...)
Definition: sprintf.c:1237
errno
int errno
rb_thread_wait_fd
void rb_thread_wait_fd(int fd)
Definition: thread.c:4042
rb_thread_shield_wait
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4806
OBJ_FROZEN
#define OBJ_FROZEN(x)
Definition: ruby.h:1375
exec_recursive_params::pairid
VALUE pairid
Definition: thread.c:4989
STACK_GROW_DIR_DETECTION
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:98
rb_thread_list
VALUE rb_thread_list(void)
Definition: thread.c:2630
rb_class_path
VALUE rb_class_path(VALUE)
Definition: variable.c:153
rb_vm_struct::ubf_async_safe
volatile int ubf_async_safe
Definition: vm_core.h:600
rb_keyword_given_p
int rb_keyword_given_p(void)
Definition: eval.c:910
rb_wait_for_single_fd
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:4276
PRIdVALUE
#define PRIdVALUE
Definition: ruby.h:161
rb_method_entry_struct
Definition: method.h:51
UINT_MAX
#define UINT_MAX
Definition: rb_mjit_min_header-2.7.0.h:4054
rb_iseq_struct::body
struct rb_iseq_constant_body * body
Definition: vm_core.h:460
rb_control_frame_struct::pc
const VALUE * pc
Definition: vm_core.h:761
rb_ary_dup
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:2238
NFDBITS
#define NFDBITS
Definition: rb_mjit_min_header-2.7.0.h:1266
timespec
Definition: missing.h:60
select_args::write
rb_fdset_t * write
Definition: thread.c:4234
LONG2FIX
#define LONG2FIX(i)
Definition: ruby.h:265
va_start
#define va_start(v, l)
Definition: rb_mjit_min_header-2.7.0.h:3978
rb_ivar_set
VALUE rb_ivar_set(VALUE, ID, VALUE)
Definition: variable.c:1300
rb_thread_wakeup_alive
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2549
T_STRING
#define T_STRING
Definition: ruby.h:528
rb_atomic_t
int rb_atomic_t
Definition: ruby_atomic.h:124
rb_sigwait_fd_put
void rb_sigwait_fd_put(const rb_thread_t *, int fd)
TIMET_MAX_PLUS_ONE
#define TIMET_MAX_PLUS_ONE
Definition: internal.h:248
rb_thread_check_trap_pending
int rb_thread_check_trap_pending(void)
Definition: thread.c:1372
rb_unblock_callback::arg
void * arg
Definition: vm_core.h:818
ATOMIC_CAS
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:136
rb_exec_recursive_paired_outer
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:5110
rb_thread_struct::interrupt_lock
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:956
va_list
__gnuc_va_list va_list
Definition: rb_mjit_min_header-2.7.0.h:836
rb_uninterruptible
VALUE rb_uninterruptible(VALUE(*b_proc)(VALUE), VALUE data)
Definition: thread.c:5554
fd_init_copy
#define fd_init_copy(f)
TRAP_INTERRUPT_MASK
@ TRAP_INTERRUPT_MASK
Definition: vm_core.h:1833
iseq
const rb_iseq_t * iseq
Definition: rb_mjit_min_header-2.7.0.h:13504
rb_thread_struct::pending_interrupt_queue
VALUE pending_interrupt_queue
Definition: vm_core.h:952
rb_thread_struct::self
VALUE self
Definition: vm_core.h:912
SAVE_ROOT_JMPBUF
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:120
rb_catch_protect
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
Definition: vm_eval.c:2324
rb_yield
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
RUBY_VM_SET_INTERRUPT
#define RUBY_VM_SET_INTERRUPT(ec)
Definition: vm_core.h:1837
eval_intern.h
COVERAGE_TARGET_LINES
#define COVERAGE_TARGET_LINES
Definition: internal.h:2213
rb_ensure
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1114
rb_vm_struct::fork_gen
rb_serial_t fork_gen
Definition: vm_core.h:590
RARRAY_CONST_PTR_TRANSIENT
#define RARRAY_CONST_PTR_TRANSIENT(a)
Definition: ruby.h:1073
SLEEP_DEADLOCKABLE
@ SLEEP_DEADLOCKABLE
Definition: thread.c:102
ruby_vm_special_exception_copy
MJIT_STATIC VALUE ruby_vm_special_exception_copy(VALUE)
Definition: rb_mjit_min_header-2.7.0.h:12219
rb_ary_new
VALUE rb_ary_new(void)
Definition: array.c:723
rb_code_location_struct::end_pos
rb_code_position_t end_pos
Definition: node.h:137
rb_execution_context_struct::root_svar
VALUE root_svar
Definition: vm_core.h:866
NUM2INT
#define NUM2INT(x)
Definition: ruby.h:715
Qnil
#define Qnil
Definition: ruby.h:469
rb_thread_struct::keeping_mutexes
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:959
RUBY_VM_CHECK_INTS_BLOCKING
#define RUBY_VM_CHECK_INTS_BLOCKING(ec)
Definition: thread.c:202
RUBY_EVENT_COVERAGE_BRANCH
#define RUBY_EVENT_COVERAGE_BRANCH
Definition: vm_core.h:1957
exc
const rb_iseq_t const VALUE exc
Definition: rb_mjit_min_header-2.7.0.h:13504
select_set::sigwait_fd
int sigwait_fd
Definition: thread.c:3925
rb_key_err_raise
#define rb_key_err_raise(mesg, recv, name)
Definition: internal.h:1578
rb_vm_struct::workqueue_lock
rb_nativethread_lock_t workqueue_lock
Definition: vm_core.h:643
thread.h
st_lookup
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
SLEEP_FLAGS
SLEEP_FLAGS
Definition: thread.c:101
rb_eStandardError
VALUE rb_eStandardError
Definition: error.c:919
RUBY_VM_SET_TRAP_INTERRUPT
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
Definition: vm_core.h:1839
RB_NOGVL_INTR_FAIL
#define RB_NOGVL_INTR_FAIL
Definition: thread.h:25
write
_ssize_t write(int __fd, const void *__buf, size_t __nbyte)
rb_fd_set
void rb_fd_set(int, rb_fdset_t *)
rb_nativethread_lock_destroy
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Definition: thread.c:439
rb_thread_struct
Definition: vm_core.h:910
rb_execution_context_struct::local_storage_recursive_hash
VALUE local_storage_recursive_hash
Definition: vm_core.h:861
rb_thread_list_struct::next
struct rb_thread_list_struct * next
Definition: vm_core.h:824
rb_thread_run
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2587
NORETURN
NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd))
rb_proc_t
Definition: vm_core.h:1049
rb_vm_gvl_destroy
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:421
list_for_each_safe
#define list_for_each_safe(h, i, nxt, member)
Definition: rb_mjit_min_header-2.7.0.h:9097
st_table
Definition: st.h:79
rb_thread_list_struct
Definition: vm_core.h:823
rb_thread_check_ints
void rb_thread_check_ints(void)
Definition: thread.c:1362
rb_native_mutex_initialize
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
rb_nativethread_lock_lock
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Definition: thread.c:445
rb_thread_status
rb_thread_status
Definition: vm_core.h:782
RUBY_THREAD_PRIORITY_MIN
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:88
rb_obj_is_kind_of
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
Definition: object.c:692
ruby_tag_type
ruby_tag_type
Definition: vm_core.h:184
rb_define_alloc_func
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
INTERRUPT_ON_BLOCKING
@ INTERRUPT_ON_BLOCKING
Definition: thread.c:1774
RTEST
#define RTEST(v)
Definition: ruby.h:481
debug.h
VM_METHOD_TYPE_ALIAS
@ VM_METHOD_TYPE_ALIAS
Definition: method.h:108
INTERRUPT_NONE
@ INTERRUPT_NONE
Definition: thread.c:1772
rb_fd_max
#define rb_fd_max(f)
Definition: intern.h:415
rb_unblock_callback::func
rb_unblock_function_t * func
Definition: vm_core.h:817
rb_thread_struct::pending_interrupt_queue_checked
unsigned int pending_interrupt_queue_checked
Definition: vm_core.h:936
ruby_thread_has_gvl_p
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1705
RAISED_EXCEPTION
@ RAISED_EXCEPTION
Definition: eval_intern.h:254
rb_ec_set_raised
int rb_ec_set_raised(rb_execution_context_t *ec)
Definition: thread.c:2344
rb_execution_context_struct::stack_end
VALUE * stack_end
Definition: vm_core.h:888
rb_eSystemExit
VALUE rb_eSystemExit
Definition: error.c:915
rb_fd_ptr
#define rb_fd_ptr(f)
Definition: intern.h:411
rb_thread_current
VALUE rb_thread_current(void)
Definition: thread.c:2676
rb_thread_id_string_t
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
Definition: vm_core.h:839
rb_threadptr_pending_interrupt_enque
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1757
rb_vm_struct::waitpid_lock
rb_nativethread_lock_t waitpid_lock
Definition: vm_core.h:591
cfp
rb_control_frame_t * cfp
Definition: rb_mjit_min_header-2.7.0.h:14544
rb_class_inherited_p
VALUE rb_class_inherited_p(VALUE mod, VALUE arg)
Determines if mod inherits arg.
Definition: object.c:1574
rb_threadptr_root_fiber_terminate
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
Definition: cont.c:1898
rb_blocking_region_buffer
Definition: thread.c:152
exec_recursive_params::func
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4986
name
const char * name
Definition: nkf.c:208
rb_execution_context_struct::root_lep
const VALUE * root_lep
Definition: vm_core.h:865
rb_execution_context_struct
Definition: vm_core.h:843
eTerminateSignal
#define eTerminateSignal
Definition: thread.c:133
rb_block_proc
VALUE rb_block_proc(void)
Definition: proc.c:837