Ruby  2.7.1p83(2020-03-31revisiona0c7c23c9cec0d0ffcba012279cd652d28ad5bf3)
thread_sync.c
Go to the documentation of this file.
1 /* included by thread.c */
2 #include "ccan/list/list.h"
3 
4 static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
5 static VALUE rb_eClosedQueueError;
6 
7 /* sync_waiter is always on-stack */
8 struct sync_waiter {
10  struct list_node node;
11 };
12 
13 #define MUTEX_ALLOW_TRAP FL_USER1
14 
15 static void
16 sync_wakeup(struct list_head *head, long max)
17 {
18  struct sync_waiter *cur = 0, *next;
19 
20  list_for_each_safe(head, cur, next, node) {
21  list_del_init(&cur->node);
22  if (cur->th->status != THREAD_KILLED) {
24  cur->th->status = THREAD_RUNNABLE;
25  if (--max == 0) return;
26  }
27  }
28 }
29 
30 static void
31 wakeup_one(struct list_head *head)
32 {
33  sync_wakeup(head, 1);
34 }
35 
36 static void
37 wakeup_all(struct list_head *head)
38 {
39  sync_wakeup(head, LONG_MAX);
40 }
41 
42 /* Mutex */
43 
44 typedef struct rb_mutex_struct {
47  struct list_head waitq; /* protected by GVL */
48 } rb_mutex_t;
49 
50 #if defined(HAVE_WORKING_FORK)
51 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
52 static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
53 static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
54 #endif
55 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th);
56 
57 /*
58  * Document-class: Mutex
59  *
60  * Mutex implements a simple semaphore that can be used to coordinate access to
61  * shared data from multiple concurrent threads.
62  *
63  * Example:
64  *
65  * semaphore = Mutex.new
66  *
67  * a = Thread.new {
68  * semaphore.synchronize {
69  * # access shared resource
70  * }
71  * }
72  *
73  * b = Thread.new {
74  * semaphore.synchronize {
75  * # access shared resource
76  * }
77  * }
78  *
79  */
80 
81 #define mutex_mark NULL
82 
83 static size_t
84 rb_mutex_num_waiting(rb_mutex_t *mutex)
85 {
86  struct sync_waiter *w = 0;
87  size_t n = 0;
88 
89  list_for_each(&mutex->waitq, w, node) {
90  n++;
91  }
92 
93  return n;
94 }
95 
96 static void
97 mutex_free(void *ptr)
98 {
99  rb_mutex_t *mutex = ptr;
100  if (mutex->th) {
101  /* rb_warn("free locked mutex"); */
102  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
103  if (err) rb_bug("%s", err);
104  }
105  ruby_xfree(ptr);
106 }
107 
108 static size_t
109 mutex_memsize(const void *ptr)
110 {
111  return sizeof(rb_mutex_t);
112 }
113 
114 static const rb_data_type_t mutex_data_type = {
115  "mutex",
116  {mutex_mark, mutex_free, mutex_memsize,},
118 };
119 
120 static rb_mutex_t *
121 mutex_ptr(VALUE obj)
122 {
123  rb_mutex_t *mutex;
124 
125  TypedData_Get_Struct(obj, rb_mutex_t, &mutex_data_type, mutex);
126 
127  return mutex;
128 }
129 
130 VALUE
132 {
133  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
134  return Qtrue;
135  }
136  else {
137  return Qfalse;
138  }
139 }
140 
141 static VALUE
142 mutex_alloc(VALUE klass)
143 {
144  VALUE obj;
145  rb_mutex_t *mutex;
146 
147  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
148  list_head_init(&mutex->waitq);
149  return obj;
150 }
151 
152 /*
153  * call-seq:
154  * Mutex.new -> mutex
155  *
156  * Creates a new Mutex
157  */
158 static VALUE
159 mutex_initialize(VALUE self)
160 {
161  return self;
162 }
163 
164 VALUE
166 {
167  return mutex_alloc(rb_cMutex);
168 }
169 
170 /*
171  * call-seq:
172  * mutex.locked? -> true or false
173  *
174  * Returns +true+ if this lock is currently held by some thread.
175  */
176 VALUE
178 {
179  rb_mutex_t *mutex = mutex_ptr(self);
180 
181  return mutex->th ? Qtrue : Qfalse;
182 }
183 
184 static void
185 mutex_locked(rb_thread_t *th, VALUE self)
186 {
187  rb_mutex_t *mutex = mutex_ptr(self);
188 
189  if (th->keeping_mutexes) {
190  mutex->next_mutex = th->keeping_mutexes;
191  }
192  th->keeping_mutexes = mutex;
193 }
194 
195 /*
196  * call-seq:
197  * mutex.try_lock -> true or false
198  *
199  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
200  * lock was granted.
201  */
202 VALUE
204 {
205  rb_mutex_t *mutex = mutex_ptr(self);
206  VALUE locked = Qfalse;
207 
208  if (mutex->th == 0) {
210  mutex->th = th;
211  locked = Qtrue;
212 
213  mutex_locked(th, self);
214  }
215 
216  return locked;
217 }
218 
219 /*
220  * At maximum, only one thread can use cond_timedwait and watch deadlock
221  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
222  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
223  */
224 static const rb_thread_t *patrol_thread = NULL;
225 
226 static VALUE
227 mutex_owned_p(rb_thread_t *th, rb_mutex_t *mutex)
228 {
229  if (mutex->th == th) {
230  return Qtrue;
231  }
232  else {
233  return Qfalse;
234  }
235 }
236 
237 static VALUE
238 do_mutex_lock(VALUE self, int interruptible_p)
239 {
241  rb_mutex_t *mutex = mutex_ptr(self);
242 
243  /* When running trap handler */
244  if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) &&
246  rb_raise(rb_eThreadError, "can't be called from trap context");
247  }
248 
249  if (rb_mutex_trylock(self) == Qfalse) {
250  struct sync_waiter w;
251 
252  if (mutex->th == th) {
253  rb_raise(rb_eThreadError, "deadlock; recursive locking");
254  }
255 
256  w.th = th;
257 
258  while (mutex->th != th) {
259  enum rb_thread_status prev_status = th->status;
260  rb_hrtime_t *timeout = 0;
261  rb_hrtime_t rel = rb_msec2hrtime(100);
262 
264  th->locking_mutex = self;
265  th->vm->sleeper++;
266  /*
267  * Carefully! while some contended threads are in native_sleep(),
268  * vm->sleeper is unstable value. we have to avoid both deadlock
269  * and busy loop.
270  */
271  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
272  !patrol_thread) {
273  timeout = &rel;
274  patrol_thread = th;
275  }
276 
277  list_add_tail(&mutex->waitq, &w.node);
278  native_sleep(th, timeout); /* release GVL */
279  list_del(&w.node);
280 
281  if (!mutex->th) {
282  mutex->th = th;
283  }
284 
285  if (patrol_thread == th)
286  patrol_thread = NULL;
287 
289  if (mutex->th && timeout && !RUBY_VM_INTERRUPTED(th->ec)) {
290  rb_check_deadlock(th->vm);
291  }
292  if (th->status == THREAD_STOPPED_FOREVER) {
293  th->status = prev_status;
294  }
295  th->vm->sleeper--;
296 
297  if (interruptible_p) {
298  /* release mutex before checking for interrupts...as interrupt checking
299  * code might call rb_raise() */
300  if (mutex->th == th) mutex->th = 0;
301  RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
302  if (!mutex->th) {
303  mutex->th = th;
304  mutex_locked(th, self);
305  }
306  }
307  else {
308  if (mutex->th == th) mutex_locked(th, self);
309  }
310  }
311  }
312 
313  // assertion
314  if (mutex_owned_p(th, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned.");
315 
316  return self;
317 }
318 
319 static VALUE
320 mutex_lock_uninterruptible(VALUE self)
321 {
322  return do_mutex_lock(self, 0);
323 }
324 
325 /*
326  * call-seq:
327  * mutex.lock -> self
328  *
329  * Attempts to grab the lock and waits if it isn't available.
330  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
331  */
332 VALUE
334 {
335  return do_mutex_lock(self, 1);
336 }
337 
338 /*
339  * call-seq:
340  * mutex.owned? -> true or false
341  *
342  * Returns +true+ if this lock is currently held by current thread.
343  */
344 VALUE
346 {
348  rb_mutex_t *mutex = mutex_ptr(self);
349 
350  return mutex_owned_p(th, mutex);
351 }
352 
353 static const char *
354 rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th)
355 {
356  const char *err = NULL;
357 
358  if (mutex->th == 0) {
359  err = "Attempt to unlock a mutex which is not locked";
360  }
361  else if (mutex->th != th) {
362  err = "Attempt to unlock a mutex which is locked by another thread";
363  }
364  else {
365  struct sync_waiter *cur = 0, *next;
366  rb_mutex_t **th_mutex = &th->keeping_mutexes;
367 
368  mutex->th = 0;
369  list_for_each_safe(&mutex->waitq, cur, next, node) {
370  list_del_init(&cur->node);
371  switch (cur->th->status) {
372  case THREAD_RUNNABLE: /* from someone else calling Thread#run */
373  case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
375  goto found;
376  case THREAD_STOPPED: /* probably impossible */
377  rb_bug("unexpected THREAD_STOPPED");
378  case THREAD_KILLED:
379  /* not sure about this, possible in exit GC? */
380  rb_bug("unexpected THREAD_KILLED");
381  continue;
382  }
383  }
384  found:
385  while (*th_mutex != mutex) {
386  th_mutex = &(*th_mutex)->next_mutex;
387  }
388  *th_mutex = mutex->next_mutex;
389  mutex->next_mutex = NULL;
390  }
391 
392  return err;
393 }
394 
395 /*
396  * call-seq:
397  * mutex.unlock -> self
398  *
399  * Releases the lock.
400  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
401  */
402 VALUE
404 {
405  const char *err;
406  rb_mutex_t *mutex = mutex_ptr(self);
407 
408  err = rb_mutex_unlock_th(mutex, GET_THREAD());
409  if (err) rb_raise(rb_eThreadError, "%s", err);
410 
411  return self;
412 }
413 
414 #if defined(HAVE_WORKING_FORK)
415 static void
416 rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
417 {
418  rb_mutex_abandon_all(th->keeping_mutexes);
420 }
421 
422 static void
423 rb_mutex_abandon_locking_mutex(rb_thread_t *th)
424 {
425  if (th->locking_mutex) {
426  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
427 
428  list_head_init(&mutex->waitq);
430  }
431 }
432 
433 static void
434 rb_mutex_abandon_all(rb_mutex_t *mutexes)
435 {
436  rb_mutex_t *mutex;
437 
438  while (mutexes) {
439  mutex = mutexes;
440  mutexes = mutex->next_mutex;
441  mutex->th = 0;
442  mutex->next_mutex = 0;
443  list_head_init(&mutex->waitq);
444  }
445 }
446 #endif
447 
448 static VALUE
449 rb_mutex_sleep_forever(VALUE time)
450 {
451  rb_thread_sleep_deadly_allow_spurious_wakeup();
452  return Qnil;
453 }
454 
455 static VALUE
456 rb_mutex_wait_for(VALUE time)
457 {
458  rb_hrtime_t *rel = (rb_hrtime_t *)time;
459  /* permit spurious check */
460  sleep_hrtime(GET_THREAD(), *rel, 0);
461  return Qnil;
462 }
463 
464 VALUE
465 rb_mutex_sleep(VALUE self, VALUE timeout)
466 {
467  time_t beg, end;
468  struct timeval t;
469 
470  if (!NIL_P(timeout)) {
471  t = rb_time_interval(timeout);
472  }
473 
474  rb_mutex_unlock(self);
475  beg = time(0);
476  if (NIL_P(timeout)) {
477  rb_ensure(rb_mutex_sleep_forever, Qnil, mutex_lock_uninterruptible, self);
478  }
479  else {
480  rb_hrtime_t rel = rb_timeval2hrtime(&t);
481 
482  rb_ensure(rb_mutex_wait_for, (VALUE)&rel,
483  mutex_lock_uninterruptible, self);
484  }
486  end = time(0) - beg;
487  return INT2FIX(end);
488 }
489 
490 /*
491  * call-seq:
492  * mutex.sleep(timeout = nil) -> number
493  *
494  * Releases the lock and sleeps +timeout+ seconds if it is given and
495  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
496  * the current thread.
497  *
498  * When the thread is next woken up, it will attempt to reacquire
499  * the lock.
500  *
501  * Note that this method can wakeup without explicit Thread#wakeup call.
502  * For example, receiving signal and so on.
503  */
504 static VALUE
505 mutex_sleep(int argc, VALUE *argv, VALUE self)
506 {
507  VALUE timeout;
508 
509  timeout = rb_check_arity(argc, 0, 1) ? argv[0] : Qnil;
510  return rb_mutex_sleep(self, timeout);
511 }
512 
513 /*
514  * call-seq:
515  * mutex.synchronize { ... } -> result of the block
516  *
517  * Obtains a lock, runs the block, and releases the lock when the block
518  * completes. See the example under +Mutex+.
519  */
520 
521 VALUE
523 {
524  rb_mutex_lock(mutex);
525  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
526 }
527 
528 /*
529  * call-seq:
530  * mutex.synchronize { ... } -> result of the block
531  *
532  * Obtains a lock, runs the block, and releases the lock when the block
533  * completes. See the example under +Mutex+.
534  */
535 static VALUE
536 rb_mutex_synchronize_m(VALUE self)
537 {
538  if (!rb_block_given_p()) {
539  rb_raise(rb_eThreadError, "must be called with a block");
540  }
541 
542  return rb_mutex_synchronize(self, rb_yield, Qundef);
543 }
544 
545 void rb_mutex_allow_trap(VALUE self, int val)
546 {
547  Check_TypedStruct(self, &mutex_data_type);
548 
549  if (val)
551  else
553 }
554 
555 /* Queue */
556 
557 #define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
558 PACKED_STRUCT_UNALIGNED(struct rb_queue {
559  struct list_head waitq;
560  rb_serial_t fork_gen;
561  const VALUE que;
562  int num_waiting;
563 });
564 
565 #define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
566 #define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
567 PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
568  struct rb_queue q;
569  int num_waiting_push;
570  struct list_head pushq;
571  long max;
572 });
573 
574 static void
575 queue_mark(void *ptr)
576 {
577  struct rb_queue *q = ptr;
578 
579  /* no need to mark threads in waitq, they are on stack */
580  rb_gc_mark(q->que);
581 }
582 
583 static size_t
584 queue_memsize(const void *ptr)
585 {
586  return sizeof(struct rb_queue);
587 }
588 
589 static const rb_data_type_t queue_data_type = {
590  "queue",
591  {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,},
593 };
594 
595 static VALUE
596 queue_alloc(VALUE klass)
597 {
598  VALUE obj;
599  struct rb_queue *q;
600 
601  obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
602  list_head_init(queue_waitq(q));
603  return obj;
604 }
605 
606 static int
607 queue_fork_check(struct rb_queue *q)
608 {
609  rb_serial_t fork_gen = GET_VM()->fork_gen;
610 
611  if (q->fork_gen == fork_gen) {
612  return 0;
613  }
614  /* forked children can't reach into parent thread stacks */
615  q->fork_gen = fork_gen;
616  list_head_init(queue_waitq(q));
617  q->num_waiting = 0;
618  return 1;
619 }
620 
621 static struct rb_queue *
622 queue_ptr(VALUE obj)
623 {
624  struct rb_queue *q;
625 
626  TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
627  queue_fork_check(q);
628 
629  return q;
630 }
631 
632 #define QUEUE_CLOSED FL_USER5
633 
634 static void
635 szqueue_mark(void *ptr)
636 {
637  struct rb_szqueue *sq = ptr;
638 
639  queue_mark(&sq->q);
640 }
641 
642 static size_t
643 szqueue_memsize(const void *ptr)
644 {
645  return sizeof(struct rb_szqueue);
646 }
647 
648 static const rb_data_type_t szqueue_data_type = {
649  "sized_queue",
650  {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,},
652 };
653 
654 static VALUE
655 szqueue_alloc(VALUE klass)
656 {
657  struct rb_szqueue *sq;
658  VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
659  &szqueue_data_type, sq);
660  list_head_init(szqueue_waitq(sq));
661  list_head_init(szqueue_pushq(sq));
662  return obj;
663 }
664 
665 static struct rb_szqueue *
666 szqueue_ptr(VALUE obj)
667 {
668  struct rb_szqueue *sq;
669 
670  TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
671  if (queue_fork_check(&sq->q)) {
672  list_head_init(szqueue_pushq(sq));
673  sq->num_waiting_push = 0;
674  }
675 
676  return sq;
677 }
678 
679 static VALUE
680 ary_buf_new(void)
681 {
682  return rb_ary_tmp_new(1);
683 }
684 
685 static VALUE
686 check_array(VALUE obj, VALUE ary)
687 {
688  if (!RB_TYPE_P(ary, T_ARRAY)) {
689  rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
690  }
691  return ary;
692 }
693 
694 static long
695 queue_length(VALUE self, struct rb_queue *q)
696 {
697  return RARRAY_LEN(check_array(self, q->que));
698 }
699 
700 static int
701 queue_closed_p(VALUE self)
702 {
703  return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
704 }
705 
706 /*
707  * Document-class: ClosedQueueError
708  *
709  * The exception class which will be raised when pushing into a closed
710  * Queue. See Queue#close and SizedQueue#close.
711  */
712 
713 NORETURN(static void raise_closed_queue_error(VALUE self));
714 
715 static void
716 raise_closed_queue_error(VALUE self)
717 {
718  rb_raise(rb_eClosedQueueError, "queue closed");
719 }
720 
721 static VALUE
722 queue_closed_result(VALUE self, struct rb_queue *q)
723 {
724  assert(queue_length(self, q) == 0);
725  return Qnil;
726 }
727 
728 /*
729  * Document-class: Queue
730  *
731  * The Queue class implements multi-producer, multi-consumer queues.
732  * It is especially useful in threaded programming when information
733  * must be exchanged safely between multiple threads. The Queue class
734  * implements all the required locking semantics.
735  *
736  * The class implements FIFO type of queue. In a FIFO queue, the first
737  * tasks added are the first retrieved.
738  *
739  * Example:
740  *
741  * queue = Queue.new
742  *
743  * producer = Thread.new do
744  * 5.times do |i|
745  * sleep rand(i) # simulate expense
746  * queue << i
747  * puts "#{i} produced"
748  * end
749  * end
750  *
751  * consumer = Thread.new do
752  * 5.times do |i|
753  * value = queue.pop
754  * sleep rand(i/2) # simulate expense
755  * puts "consumed #{value}"
756  * end
757  * end
758  *
759  * consumer.join
760  *
761  */
762 
763 /*
764  * Document-method: Queue::new
765  *
766  * Creates a new queue instance.
767  */
768 
769 static VALUE
770 rb_queue_initialize(VALUE self)
771 {
772  struct rb_queue *q = queue_ptr(self);
773  RB_OBJ_WRITE(self, &q->que, ary_buf_new());
774  list_head_init(queue_waitq(q));
775  return self;
776 }
777 
778 static VALUE
779 queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
780 {
781  if (queue_closed_p(self)) {
782  raise_closed_queue_error(self);
783  }
784  rb_ary_push(check_array(self, q->que), obj);
785  wakeup_one(queue_waitq(q));
786  return self;
787 }
788 
789 /*
790  * Document-method: Queue#close
791  * call-seq:
792  * close
793  *
794  * Closes the queue. A closed queue cannot be re-opened.
795  *
796  * After the call to close completes, the following are true:
797  *
798  * - +closed?+ will return true
799  *
800  * - +close+ will be ignored.
801  *
802  * - calling enq/push/<< will raise a +ClosedQueueError+.
803  *
804  * - when +empty?+ is false, calling deq/pop/shift will return an object
805  * from the queue as usual.
806  * - when +empty?+ is true, deq(false) will not suspend the thread and will return nil.
807  * deq(true) will raise a +ThreadError+.
808  *
809  * ClosedQueueError is inherited from StopIteration, so that you can break loop block.
810  *
811  * Example:
812  *
813  * q = Queue.new
814  * Thread.new{
815  * while e = q.deq # wait for nil to break loop
816  * # ...
817  * end
818  * }
819  * q.close
820  */
821 
822 static VALUE
823 rb_queue_close(VALUE self)
824 {
825  struct rb_queue *q = queue_ptr(self);
826 
827  if (!queue_closed_p(self)) {
828  FL_SET(self, QUEUE_CLOSED);
829 
830  wakeup_all(queue_waitq(q));
831  }
832 
833  return self;
834 }
835 
836 /*
837  * Document-method: Queue#closed?
838  * call-seq: closed?
839  *
840  * Returns +true+ if the queue is closed.
841  */
842 
843 static VALUE
844 rb_queue_closed_p(VALUE self)
845 {
846  return queue_closed_p(self) ? Qtrue : Qfalse;
847 }
848 
849 /*
850  * Document-method: Queue#push
851  * call-seq:
852  * push(object)
853  * enq(object)
854  * <<(object)
855  *
856  * Pushes the given +object+ to the queue.
857  */
858 
859 static VALUE
860 rb_queue_push(VALUE self, VALUE obj)
861 {
862  return queue_do_push(self, queue_ptr(self), obj);
863 }
864 
865 static VALUE
866 queue_sleep(VALUE arg)
867 {
868  rb_thread_sleep_deadly_allow_spurious_wakeup();
869  return Qnil;
870 }
871 
872 struct queue_waiter {
873  struct sync_waiter w;
874  union {
875  struct rb_queue *q;
876  struct rb_szqueue *sq;
877  } as;
878 };
879 
880 static VALUE
881 queue_sleep_done(VALUE p)
882 {
883  struct queue_waiter *qw = (struct queue_waiter *)p;
884 
885  list_del(&qw->w.node);
886  qw->as.q->num_waiting--;
887 
888  return Qfalse;
889 }
890 
891 static VALUE
892 szqueue_sleep_done(VALUE p)
893 {
894  struct queue_waiter *qw = (struct queue_waiter *)p;
895 
896  list_del(&qw->w.node);
897  qw->as.sq->num_waiting_push--;
898 
899  return Qfalse;
900 }
901 
902 static VALUE
903 queue_do_pop(VALUE self, struct rb_queue *q, int should_block)
904 {
905  check_array(self, q->que);
906 
907  while (RARRAY_LEN(q->que) == 0) {
908  if (!should_block) {
909  rb_raise(rb_eThreadError, "queue empty");
910  }
911  else if (queue_closed_p(self)) {
912  return queue_closed_result(self, q);
913  }
914  else {
915  struct queue_waiter qw;
916 
917  assert(RARRAY_LEN(q->que) == 0);
918  assert(queue_closed_p(self) == 0);
919 
920  qw.w.th = GET_THREAD();
921  qw.as.q = q;
922  list_add_tail(queue_waitq(qw.as.q), &qw.w.node);
923  qw.as.q->num_waiting++;
924 
925  rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&qw);
926  }
927  }
928 
929  return rb_ary_shift(q->que);
930 }
931 
932 static int
933 queue_pop_should_block(int argc, const VALUE *argv)
934 {
935  int should_block = 1;
936  rb_check_arity(argc, 0, 1);
937  if (argc > 0) {
938  should_block = !RTEST(argv[0]);
939  }
940  return should_block;
941 }
942 
943 /*
944  * Document-method: Queue#pop
945  * call-seq:
946  * pop(non_block=false)
947  * deq(non_block=false)
948  * shift(non_block=false)
949  *
950  * Retrieves data from the queue.
951  *
952  * If the queue is empty, the calling thread is suspended until data is pushed
953  * onto the queue. If +non_block+ is true, the thread isn't suspended, and
954  * +ThreadError+ is raised.
955  */
956 
957 static VALUE
958 rb_queue_pop(int argc, VALUE *argv, VALUE self)
959 {
960  int should_block = queue_pop_should_block(argc, argv);
961  return queue_do_pop(self, queue_ptr(self), should_block);
962 }
963 
964 /*
965  * Document-method: Queue#empty?
966  * call-seq: empty?
967  *
968  * Returns +true+ if the queue is empty.
969  */
970 
971 static VALUE
972 rb_queue_empty_p(VALUE self)
973 {
974  return queue_length(self, queue_ptr(self)) == 0 ? Qtrue : Qfalse;
975 }
976 
977 /*
978  * Document-method: Queue#clear
979  *
980  * Removes all objects from the queue.
981  */
982 
983 static VALUE
984 rb_queue_clear(VALUE self)
985 {
986  struct rb_queue *q = queue_ptr(self);
987 
988  rb_ary_clear(check_array(self, q->que));
989  return self;
990 }
991 
992 /*
993  * Document-method: Queue#length
994  * call-seq:
995  * length
996  * size
997  *
998  * Returns the length of the queue.
999  */
1000 
1001 static VALUE
1002 rb_queue_length(VALUE self)
1003 {
1004  return LONG2NUM(queue_length(self, queue_ptr(self)));
1005 }
1006 
1007 /*
1008  * Document-method: Queue#num_waiting
1009  *
1010  * Returns the number of threads waiting on the queue.
1011  */
1012 
1013 static VALUE
1014 rb_queue_num_waiting(VALUE self)
1015 {
1016  struct rb_queue *q = queue_ptr(self);
1017 
1018  return INT2NUM(q->num_waiting);
1019 }
1020 
1021 /*
1022  * Document-class: SizedQueue
1023  *
1024  * This class represents queues of specified size capacity. The push operation
1025  * may be blocked if the capacity is full.
1026  *
1027  * See Queue for an example of how a SizedQueue works.
1028  */
1029 
1030 /*
1031  * Document-method: SizedQueue::new
1032  * call-seq: new(max)
1033  *
1034  * Creates a fixed-length queue with a maximum size of +max+.
1035  */
1036 
1037 static VALUE
1038 rb_szqueue_initialize(VALUE self, VALUE vmax)
1039 {
1040  long max;
1041  struct rb_szqueue *sq = szqueue_ptr(self);
1042 
1043  max = NUM2LONG(vmax);
1044  if (max <= 0) {
1045  rb_raise(rb_eArgError, "queue size must be positive");
1046  }
1047 
1048  RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new());
1049  list_head_init(szqueue_waitq(sq));
1050  list_head_init(szqueue_pushq(sq));
1051  sq->max = max;
1052 
1053  return self;
1054 }
1055 
1056 /*
1057  * Document-method: SizedQueue#close
1058  * call-seq:
1059  * close
1060  *
1061  * Similar to Queue#close.
1062  *
1063  * The difference is behavior with waiting enqueuing threads.
1064  *
1065  * If there are waiting enqueuing threads, they are interrupted by
1066  * raising ClosedQueueError('queue closed').
1067  */
1068 static VALUE
1069 rb_szqueue_close(VALUE self)
1070 {
1071  if (!queue_closed_p(self)) {
1072  struct rb_szqueue *sq = szqueue_ptr(self);
1073 
1074  FL_SET(self, QUEUE_CLOSED);
1075  wakeup_all(szqueue_waitq(sq));
1076  wakeup_all(szqueue_pushq(sq));
1077  }
1078  return self;
1079 }
1080 
1081 /*
1082  * Document-method: SizedQueue#max
1083  *
1084  * Returns the maximum size of the queue.
1085  */
1086 
1087 static VALUE
1088 rb_szqueue_max_get(VALUE self)
1089 {
1090  return LONG2NUM(szqueue_ptr(self)->max);
1091 }
1092 
1093 /*
1094  * Document-method: SizedQueue#max=
1095  * call-seq: max=(number)
1096  *
1097  * Sets the maximum size of the queue to the given +number+.
1098  */
1099 
1100 static VALUE
1101 rb_szqueue_max_set(VALUE self, VALUE vmax)
1102 {
1103  long max = NUM2LONG(vmax);
1104  long diff = 0;
1105  struct rb_szqueue *sq = szqueue_ptr(self);
1106 
1107  if (max <= 0) {
1108  rb_raise(rb_eArgError, "queue size must be positive");
1109  }
1110  if (max > sq->max) {
1111  diff = max - sq->max;
1112  }
1113  sq->max = max;
1114  sync_wakeup(szqueue_pushq(sq), diff);
1115  return vmax;
1116 }
1117 
1118 static int
1119 szqueue_push_should_block(int argc, const VALUE *argv)
1120 {
1121  int should_block = 1;
1122  rb_check_arity(argc, 1, 2);
1123  if (argc > 1) {
1124  should_block = !RTEST(argv[1]);
1125  }
1126  return should_block;
1127 }
1128 
1129 /*
1130  * Document-method: SizedQueue#push
1131  * call-seq:
1132  * push(object, non_block=false)
1133  * enq(object, non_block=false)
1134  * <<(object)
1135  *
1136  * Pushes +object+ to the queue.
1137  *
1138  * If there is no space left in the queue, waits until space becomes
1139  * available, unless +non_block+ is true. If +non_block+ is true, the
1140  * thread isn't suspended, and +ThreadError+ is raised.
1141  */
1142 
1143 static VALUE
1144 rb_szqueue_push(int argc, VALUE *argv, VALUE self)
1145 {
1146  struct rb_szqueue *sq = szqueue_ptr(self);
1147  int should_block = szqueue_push_should_block(argc, argv);
1148 
1149  while (queue_length(self, &sq->q) >= sq->max) {
1150  if (!should_block) {
1151  rb_raise(rb_eThreadError, "queue full");
1152  }
1153  else if (queue_closed_p(self)) {
1154  goto closed;
1155  }
1156  else {
1157  struct queue_waiter qw;
1158  struct list_head *pushq = szqueue_pushq(sq);
1159 
1160  qw.w.th = GET_THREAD();
1161  qw.as.sq = sq;
1162  list_add_tail(pushq, &qw.w.node);
1163  sq->num_waiting_push++;
1164 
1165  rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&qw);
1166  }
1167  }
1168 
1169  if (queue_closed_p(self)) {
1170  closed:
1171  raise_closed_queue_error(self);
1172  }
1173 
1174  return queue_do_push(self, &sq->q, argv[0]);
1175 }
1176 
1177 static VALUE
1178 szqueue_do_pop(VALUE self, int should_block)
1179 {
1180  struct rb_szqueue *sq = szqueue_ptr(self);
1181  VALUE retval = queue_do_pop(self, &sq->q, should_block);
1182 
1183  if (queue_length(self, &sq->q) < sq->max) {
1184  wakeup_one(szqueue_pushq(sq));
1185  }
1186 
1187  return retval;
1188 }
1189 
1190 /*
1191  * Document-method: SizedQueue#pop
1192  * call-seq:
1193  * pop(non_block=false)
1194  * deq(non_block=false)
1195  * shift(non_block=false)
1196  *
1197  * Retrieves data from the queue.
1198  *
1199  * If the queue is empty, the calling thread is suspended until data is pushed
1200  * onto the queue. If +non_block+ is true, the thread isn't suspended, and
1201  * +ThreadError+ is raised.
1202  */
1203 
1204 static VALUE
1205 rb_szqueue_pop(int argc, VALUE *argv, VALUE self)
1206 {
1207  int should_block = queue_pop_should_block(argc, argv);
1208  return szqueue_do_pop(self, should_block);
1209 }
1210 
1211 /*
1212  * Document-method: SizedQueue#clear
1213  *
1214  * Removes all objects from the queue.
1215  */
1216 
1217 static VALUE
1218 rb_szqueue_clear(VALUE self)
1219 {
1220  struct rb_szqueue *sq = szqueue_ptr(self);
1221 
1222  rb_ary_clear(check_array(self, sq->q.que));
1223  wakeup_all(szqueue_pushq(sq));
1224  return self;
1225 }
1226 
1227 /*
1228  * Document-method: SizedQueue#length
1229  * call-seq:
1230  * length
1231  * size
1232  *
1233  * Returns the length of the queue.
1234  */
1235 
1236 static VALUE
1237 rb_szqueue_length(VALUE self)
1238 {
1239  struct rb_szqueue *sq = szqueue_ptr(self);
1240 
1241  return LONG2NUM(queue_length(self, &sq->q));
1242 }
1243 
1244 /*
1245  * Document-method: SizedQueue#num_waiting
1246  *
1247  * Returns the number of threads waiting on the queue.
1248  */
1249 
1250 static VALUE
1251 rb_szqueue_num_waiting(VALUE self)
1252 {
1253  struct rb_szqueue *sq = szqueue_ptr(self);
1254 
1255  return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1256 }
1257 
1258 /*
1259  * Document-method: SizedQueue#empty?
1260  * call-seq: empty?
1261  *
1262  * Returns +true+ if the queue is empty.
1263  */
1264 
1265 static VALUE
1266 rb_szqueue_empty_p(VALUE self)
1267 {
1268  struct rb_szqueue *sq = szqueue_ptr(self);
1269 
1270  return queue_length(self, &sq->q) == 0 ? Qtrue : Qfalse;
1271 }
1272 
1273 
1274 /* ConditionalVariable */
1275 struct rb_condvar {
1278 };
1279 
1280 /*
1281  * Document-class: ConditionVariable
1282  *
1283  * ConditionVariable objects augment class Mutex. Using condition variables,
1284  * it is possible to suspend while in the middle of a critical section until a
1285  * resource becomes available.
1286  *
1287  * Example:
1288  *
1289  * mutex = Mutex.new
1290  * resource = ConditionVariable.new
1291  *
1292  * a = Thread.new {
1293  * mutex.synchronize {
1294  * # Thread 'a' now needs the resource
1295  * resource.wait(mutex)
1296  * # 'a' can now have the resource
1297  * }
1298  * }
1299  *
1300  * b = Thread.new {
1301  * mutex.synchronize {
1302  * # Thread 'b' has finished using the resource
1303  * resource.signal
1304  * }
1305  * }
1306  */
1307 
1308 static size_t
1309 condvar_memsize(const void *ptr)
1310 {
1311  return sizeof(struct rb_condvar);
1312 }
1313 
1314 static const rb_data_type_t cv_data_type = {
1315  "condvar",
1316  {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
1318 };
1319 
1320 static struct rb_condvar *
1321 condvar_ptr(VALUE self)
1322 {
1323  struct rb_condvar *cv;
1324  rb_serial_t fork_gen = GET_VM()->fork_gen;
1325 
1326  TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
1327 
1328  /* forked children can't reach into parent thread stacks */
1329  if (cv->fork_gen != fork_gen) {
1330  cv->fork_gen = fork_gen;
1331  list_head_init(&cv->waitq);
1332  }
1333 
1334  return cv;
1335 }
1336 
1337 static VALUE
1338 condvar_alloc(VALUE klass)
1339 {
1340  struct rb_condvar *cv;
1341  VALUE obj;
1342 
1343  obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
1344  list_head_init(&cv->waitq);
1345 
1346  return obj;
1347 }
1348 
1349 /*
1350  * Document-method: ConditionVariable::new
1351  *
1352  * Creates a new condition variable instance.
1353  */
1354 
1355 static VALUE
1356 rb_condvar_initialize(VALUE self)
1357 {
1358  struct rb_condvar *cv = condvar_ptr(self);
1359  list_head_init(&cv->waitq);
1360  return self;
1361 }
1362 
1363 struct sleep_call {
1366 };
1367 
1368 static ID id_sleep;
1369 
1370 static VALUE
1371 do_sleep(VALUE args)
1372 {
1373  struct sleep_call *p = (struct sleep_call *)args;
1374  return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
1375 }
1376 
1377 static VALUE
1378 delete_from_waitq(VALUE v)
1379 {
1380  struct sync_waiter *w = (void *)v;
1381  list_del(&w->node);
1382 
1383  return Qnil;
1384 }
1385 
1386 /*
1387  * Document-method: ConditionVariable#wait
1388  * call-seq: wait(mutex, timeout=nil)
1389  *
1390  * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
1391  *
1392  * If +timeout+ is given, this method returns after +timeout+ seconds passed,
1393  * even if no other thread doesn't signal.
1394  */
1395 
1396 static VALUE
1397 rb_condvar_wait(int argc, VALUE *argv, VALUE self)
1398 {
1399  struct rb_condvar *cv = condvar_ptr(self);
1400  struct sleep_call args;
1401  struct sync_waiter w;
1402 
1403  rb_scan_args(argc, argv, "11", &args.mutex, &args.timeout);
1404 
1405  w.th = GET_THREAD();
1406  list_add_tail(&cv->waitq, &w.node);
1407  rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&w);
1408 
1409  return self;
1410 }
1411 
1412 /*
1413  * Document-method: ConditionVariable#signal
1414  *
1415  * Wakes up the first thread in line waiting for this lock.
1416  */
1417 
1418 static VALUE
1419 rb_condvar_signal(VALUE self)
1420 {
1421  struct rb_condvar *cv = condvar_ptr(self);
1422  wakeup_one(&cv->waitq);
1423  return self;
1424 }
1425 
1426 /*
1427  * Document-method: ConditionVariable#broadcast
1428  *
1429  * Wakes up all threads waiting for this lock.
1430  */
1431 
1432 static VALUE
1433 rb_condvar_broadcast(VALUE self)
1434 {
1435  struct rb_condvar *cv = condvar_ptr(self);
1436  wakeup_all(&cv->waitq);
1437  return self;
1438 }
1439 
1440 /* :nodoc: */
1441 static VALUE
1442 undumpable(VALUE obj)
1443 {
1444  rb_raise(rb_eTypeError, "can't dump %"PRIsVALUE, rb_obj_class(obj));
1446 }
1447 
1448 static VALUE
1449 define_thread_class(VALUE outer, const char *name, VALUE super)
1450 {
1451  VALUE klass = rb_define_class_under(outer, name, super);
1453  return klass;
1454 }
1455 
1456 static void
1457 Init_thread_sync(void)
1458 {
1459 #undef rb_intern
1460 #if 0
1461  rb_cMutex = rb_define_class("Mutex", rb_cObject); /* teach rdoc Mutex */
1462  rb_cConditionVariable = rb_define_class("ConditionVariable", rb_cObject); /* teach rdoc ConditionVariable */
1463  rb_cQueue = rb_define_class("Queue", rb_cObject); /* teach rdoc Queue */
1464  rb_cSizedQueue = rb_define_class("SizedQueue", rb_cObject); /* teach rdoc SizedQueue */
1465 #endif
1466 
1467 #define DEFINE_CLASS(name, super) \
1468  rb_c##name = define_thread_class(rb_cThread, #name, rb_c##super)
1469 
1470  /* Mutex */
1471  DEFINE_CLASS(Mutex, Object);
1472  rb_define_alloc_func(rb_cMutex, mutex_alloc);
1473  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
1474  rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1475  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
1476  rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1477  rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1478  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
1479  rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
1480  rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
1481 
1482  /* Queue */
1483  DEFINE_CLASS(Queue, Object);
1484  rb_define_alloc_func(rb_cQueue, queue_alloc);
1485 
1486  rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
1487 
1488  rb_define_method(rb_cQueue, "initialize", rb_queue_initialize, 0);
1489  rb_undef_method(rb_cQueue, "initialize_copy");
1490  rb_define_method(rb_cQueue, "marshal_dump", undumpable, 0);
1491  rb_define_method(rb_cQueue, "close", rb_queue_close, 0);
1492  rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0);
1493  rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1494  rb_define_method(rb_cQueue, "pop", rb_queue_pop, -1);
1495  rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1496  rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1497  rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1498  rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1499 
1500  rb_define_alias(rb_cQueue, "enq", "push");
1501  rb_define_alias(rb_cQueue, "<<", "push");
1502  rb_define_alias(rb_cQueue, "deq", "pop");
1503  rb_define_alias(rb_cQueue, "shift", "pop");
1504  rb_define_alias(rb_cQueue, "size", "length");
1505 
1506  DEFINE_CLASS(SizedQueue, Queue);
1507  rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
1508 
1509  rb_define_method(rb_cSizedQueue, "initialize", rb_szqueue_initialize, 1);
1510  rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, 0);
1511  rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0);
1512  rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1);
1513  rb_define_method(rb_cSizedQueue, "push", rb_szqueue_push, -1);
1514  rb_define_method(rb_cSizedQueue, "pop", rb_szqueue_pop, -1);
1515  rb_define_method(rb_cSizedQueue, "empty?", rb_szqueue_empty_p, 0);
1516  rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0);
1517  rb_define_method(rb_cSizedQueue, "length", rb_szqueue_length, 0);
1518  rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0);
1519 
1520  rb_define_alias(rb_cSizedQueue, "enq", "push");
1521  rb_define_alias(rb_cSizedQueue, "<<", "push");
1522  rb_define_alias(rb_cSizedQueue, "deq", "pop");
1523  rb_define_alias(rb_cSizedQueue, "shift", "pop");
1524  rb_define_alias(rb_cSizedQueue, "size", "length");
1525 
1526  /* CVar */
1527  DEFINE_CLASS(ConditionVariable, Object);
1528  rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
1529 
1530  id_sleep = rb_intern("sleep");
1531 
1532  rb_define_method(rb_cConditionVariable, "initialize", rb_condvar_initialize, 0);
1533  rb_undef_method(rb_cConditionVariable, "initialize_copy");
1534  rb_define_method(rb_cConditionVariable, "marshal_dump", undumpable, 0);
1535  rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, -1);
1536  rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1537  rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1538 
1539  rb_provide("thread.rb");
1540 }
ID
unsigned long ID
Definition: ruby.h:103
rb_threadptr_interrupt
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:510
ruby_xfree
void ruby_xfree(void *x)
Definition: gc.c:10169
list_for_each
#define list_for_each(h, i, member)
Definition: rb_mjit_min_header-2.7.1.h:9099
rb_define_class
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:649
obj
const VALUE VALUE obj
Definition: rb_mjit_min_header-2.7.1.h:5742
TypedData_Make_Struct
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1244
rb_obj_is_mutex
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread_sync.c:131
RUBY_VM_INTERRUPTED
#define RUBY_VM_INTERRUPTED(ec)
Definition: vm_core.h:1840
assert
#define assert(x)
Definition: dlmalloc.c:1176
THREAD_RUNNABLE
@ THREAD_RUNNABLE
Definition: vm_core.h:783
klass
VALUE klass
Definition: rb_mjit_min_header-2.7.1.h:13259
LONG_MAX
#define LONG_MAX
Definition: ruby.h:220
rb_mutex_new
VALUE rb_mutex_new(void)
Definition: thread_sync.c:165
list_del_init
#define list_del_init(n)
Definition: rb_mjit_min_header-2.7.1.h:9053
rb_mutex_owned_p
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread_sync.c:345
rb_block_given_p
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:898
queue_waiter::q
struct rb_queue * q
Definition: thread_sync.c:875
rb_mutex_struct::th
rb_thread_t * th
Definition: thread_sync.c:45
INT2FIX
#define INT2FIX(i)
Definition: ruby.h:263
n
const char size_t n
Definition: rb_mjit_min_header-2.7.1.h:5456
NUM2LONG
#define NUM2LONG(x)
Definition: ruby.h:679
VALUE
unsigned long VALUE
Definition: ruby.h:102
GET_VM
#define GET_VM()
Definition: vm_core.h:1764
rb_eArgError
VALUE rb_eArgError
Definition: error.c:923
rb_intern
#define rb_intern(str)
RB_TYPE_P
#define RB_TYPE_P(obj, type)
Definition: ruby.h:560
rb_hrtime_t
uint64_t rb_hrtime_t
Definition: hrtime.h:47
rb_mutex_struct::next_mutex
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:46
sleep_call::timeout
VALUE timeout
Definition: thread_sync.c:1365
arg
VALUE arg
Definition: rb_mjit_min_header-2.7.1.h:5601
Qundef
#define Qundef
Definition: ruby.h:470
Check_TypedStruct
#define Check_TypedStruct(v, t)
Definition: ruby.h:1200
rb_define_method
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1551
GET_EC
#define GET_EC()
Definition: vm_core.h:1766
INT2NUM
#define INT2NUM(x)
Definition: ruby.h:1609
rb_ary_shift
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:1294
ptr
struct RIMemo * ptr
Definition: debug.c:74
Qfalse
#define Qfalse
Definition: ruby.h:467
THREAD_KILLED
@ THREAD_KILLED
Definition: vm_core.h:786
THREAD_STOPPED
@ THREAD_STOPPED
Definition: vm_core.h:784
NULL
#define NULL
Definition: _sdbm.c:101
PRIsVALUE
#define PRIsVALUE
Definition: ruby.h:166
rb_mutex_locked_p
VALUE rb_mutex_locked_p(VALUE self)
Definition: thread_sync.c:177
FL_SET
#define FL_SET(x, f)
Definition: ruby.h:1359
rb_thread_struct::ec
rb_execution_context_t * ec
Definition: vm_core.h:915
rb_define_alias
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1800
rb_undef_method
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1575
rb_check_arity
#define rb_check_arity
Definition: intern.h:347
RUBY_TYPED_DEFAULT_FREE
#define RUBY_TYPED_DEFAULT_FREE
Definition: ruby.h:1203
rb_raise
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2669
mutex_mark
#define mutex_mark
Definition: thread_sync.c:81
LONG2NUM
#define LONG2NUM(x)
Definition: ruby.h:1644
NORETURN
NORETURN(static void raise_closed_queue_error(VALUE self))
rb_obj_class
VALUE rb_obj_class(VALUE)
Equivalent to Object#class in Ruby.
Definition: object.c:217
list_for_each_safe
#define list_for_each_safe(h, i, nxt, member)
Definition: rb_mjit_min_header-2.7.1.h:9102
THREAD_STOPPED_FOREVER
@ THREAD_STOPPED_FOREVER
Definition: vm_core.h:785
rb_vm_struct::sleeper
int sleeper
Definition: vm_core.h:607
rb_condvar::fork_gen
rb_serial_t fork_gen
Definition: thread_sync.c:1277
rb_mutex_struct::waitq
struct list_head waitq
Definition: thread_sync.c:47
rb_mutex_lock
VALUE rb_mutex_lock(VALUE self)
Definition: thread_sync.c:333
list_head
Definition: rb_mjit_min_header-2.7.1.h:8980
rb_mutex_sleep
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread_sync.c:465
queue_waiter::sq
struct rb_szqueue * sq
Definition: thread_sync.c:876
sync_waiter::th
rb_thread_t * th
Definition: thread_sync.c:9
rb_thread_struct::status
enum rb_thread_status status
Definition: rb_mjit_min_header-2.7.1.h:9897
rb_ary_tmp_new
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:768
rb_serial_t
unsigned long rb_serial_t
Definition: internal.h:1014
MUTEX_ALLOW_TRAP
#define MUTEX_ALLOW_TRAP
Definition: thread_sync.c:13
szqueue_pushq
#define szqueue_pushq(sq)
Definition: thread_sync.c:566
rb_ary_push
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1195
rb_eTypeError
VALUE rb_eTypeError
Definition: error.c:922
rb_condvar::waitq
struct list_head waitq
Definition: thread_sync.c:1276
rb_time_interval
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2669
queue_waiter::w
struct sync_waiter w
Definition: thread_sync.c:873
rb_mutex_trylock
VALUE rb_mutex_trylock(VALUE self)
Definition: thread_sync.c:203
rb_eThreadError
VALUE rb_eThreadError
Definition: eval.c:924
rb_mutex_struct
Definition: thread_sync.c:44
time_t
long time_t
Definition: rb_mjit_min_header-2.7.1.h:1236
FL_SET_RAW
#define FL_SET_RAW(x, f)
Definition: ruby.h:1358
rb_mutex_synchronize
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread_sync.c:522
RB_OBJ_WRITE
#define RB_OBJ_WRITE(a, slot, b)
Definition: ruby.h:1508
list_del
#define list_del(n)
Definition: rb_mjit_min_header-2.7.1.h:9046
rb_thread_struct::locking_mutex
VALUE locking_mutex
Definition: vm_core.h:958
sync_waiter
Definition: thread_sync.c:8
PACKED_STRUCT_UNALIGNED
PACKED_STRUCT_UNALIGNED(struct rb_queue { struct list_head waitq;rb_serial_t fork_gen;const VALUE que;int num_waiting;})
rb_typeddata_is_kind_of
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:872
rb_ary_clear
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3862
RARRAY_LEN
#define RARRAY_LEN(a)
Definition: ruby.h:1070
rb_thread_struct::vm
rb_vm_t * vm
Definition: vm_core.h:913
rb_scan_args
#define rb_scan_args(argc, argvp, fmt,...)
Definition: rb_mjit_min_header-2.7.1.h:6372
FL_TEST_RAW
#define FL_TEST_RAW(x, f)
Definition: ruby.h:1352
rb_cObject
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:2010
FL_UNSET_RAW
#define FL_UNSET_RAW(x, f)
Definition: ruby.h:1360
TypedData_Get_Struct
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1252
sleep_call::mutex
VALUE mutex
Definition: thread_sync.c:1364
rb_bug
void rb_bug(const char *fmt,...)
Definition: error.c:634
queue_waitq
#define queue_waitq(q)
Definition: thread_sync.c:557
rb_provide
void rb_provide(const char *)
Definition: load.c:563
T_ARRAY
#define T_ARRAY
Definition: ruby.h:530
argv
char ** argv
Definition: ruby.c:223
time
time_t time(time_t *_timer)
rb_mutex_unlock
VALUE rb_mutex_unlock(VALUE self)
Definition: thread_sync.c:403
rb_mutex_allow_trap
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread_sync.c:545
timeval
Definition: missing.h:53
GET_THREAD
#define GET_THREAD()
Definition: vm_core.h:1765
RUBY_TYPED_FREE_IMMEDIATELY
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1207
QUEUE_CLOSED
#define QUEUE_CLOSED
Definition: thread_sync.c:632
NIL_P
#define NIL_P(v)
Definition: ruby.h:482
sync_waiter::node
struct list_node node
Definition: thread_sync.c:10
rb_execution_context_struct::interrupt_mask
rb_atomic_t interrupt_mask
Definition: vm_core.h:854
argc
int argc
Definition: ruby.c:222
list_node
Definition: rb_mjit_min_header-2.7.1.h:8976
rb_define_const
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2880
err
int err
Definition: win32.c:135
rb_data_type_struct
Definition: ruby.h:1148
v
int VALUE v
Definition: rb_mjit_min_header-2.7.1.h:12337
rb_gc_mark
void rb_gc_mark(VALUE ptr)
Definition: gc.c:5214
Qtrue
#define Qtrue
Definition: ruby.h:468
rb_eStopIteration
VALUE rb_eStopIteration
Definition: enumerator.c:124
DEFINE_CLASS
#define DEFINE_CLASS(name, super)
szqueue_waitq
#define szqueue_waitq(sq)
Definition: thread_sync.c:565
queue_waiter::as
union queue_waiter::@163 as
sleep_call
Definition: thread_sync.c:1363
rb_define_class_under
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:698
TRAP_INTERRUPT_MASK
@ TRAP_INTERRUPT_MASK
Definition: vm_core.h:1833
rb_yield
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
rb_ensure
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1115
Qnil
#define Qnil
Definition: ruby.h:469
rb_thread_struct::keeping_mutexes
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:959
RUBY_VM_CHECK_INTS_BLOCKING
#define RUBY_VM_CHECK_INTS_BLOCKING(ec)
Definition: thread.c:202
rb_mutex_t
struct rb_mutex_struct rb_mutex_t
rb_thread_struct
Definition: vm_core.h:910
queue_waiter
Definition: thread_sync.c:872
UNREACHABLE_RETURN
#define UNREACHABLE_RETURN(val)
Definition: ruby.h:59
rb_thread_status
rb_thread_status
Definition: vm_core.h:782
rb_define_alloc_func
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
RTEST
#define RTEST(v)
Definition: ruby.h:481
list_add_tail
#define list_add_tail(h, n)
Definition: rb_mjit_min_header-2.7.1.h:9028
rb_condvar
Definition: thread_sync.c:1275
RUBY_TYPED_WB_PROTECTED
#define RUBY_TYPED_WB_PROTECTED
Definition: ruby.h:1208
name
const char * name
Definition: nkf.c:208
rb_funcallv
#define rb_funcallv(recv, mid, argc, argv)
Definition: rb_mjit_min_header-2.7.1.h:7904