2 #include "ccan/list/list.h"
4 static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
5 static VALUE rb_eClosedQueueError;
13 #define MUTEX_ALLOW_TRAP FL_USER1
16 sync_wakeup(
struct list_head *head,
long max)
25 if (--max == 0)
return;
50 #if defined(HAVE_WORKING_FORK)
51 static void rb_mutex_abandon_all(
rb_mutex_t *mutexes);
52 static void rb_mutex_abandon_keeping_mutexes(
rb_thread_t *th);
53 static void rb_mutex_abandon_locking_mutex(
rb_thread_t *th);
81 #define mutex_mark NULL
102 const char *
err = rb_mutex_unlock_th(mutex, mutex->
th);
109 mutex_memsize(
const void *
ptr)
148 list_head_init(&mutex->
waitq);
159 mutex_initialize(
VALUE self)
167 return mutex_alloc(rb_cMutex);
208 if (mutex->
th == 0) {
213 mutex_locked(
th,
self);
229 if (mutex->
th ==
th) {
238 do_mutex_lock(
VALUE self,
int interruptible_p)
252 if (mutex->
th ==
th) {
258 while (mutex->
th !=
th) {
278 native_sleep(
th, timeout);
285 if (patrol_thread ==
th)
286 patrol_thread =
NULL;
290 rb_check_deadlock(
th->
vm);
297 if (interruptible_p) {
300 if (mutex->
th ==
th) mutex->
th = 0;
304 mutex_locked(
th,
self);
308 if (mutex->
th ==
th) mutex_locked(
th,
self);
314 if (mutex_owned_p(
th, mutex) ==
Qfalse)
rb_bug(
"do_mutex_lock: mutex is not owned.");
320 mutex_lock_uninterruptible(
VALUE self)
322 return do_mutex_lock(
self, 0);
335 return do_mutex_lock(
self, 1);
350 return mutex_owned_p(
th, mutex);
358 if (mutex->
th == 0) {
359 err =
"Attempt to unlock a mutex which is not locked";
361 else if (mutex->
th !=
th) {
362 err =
"Attempt to unlock a mutex which is locked by another thread";
377 rb_bug(
"unexpected THREAD_STOPPED");
380 rb_bug(
"unexpected THREAD_KILLED");
385 while (*th_mutex != mutex) {
414 #if defined(HAVE_WORKING_FORK)
428 list_head_init(&mutex->
waitq);
443 list_head_init(&mutex->
waitq);
451 rb_thread_sleep_deadly_allow_spurious_wakeup();
470 if (!
NIL_P(timeout)) {
476 if (
NIL_P(timeout)) {
477 rb_ensure(rb_mutex_sleep_forever,
Qnil, mutex_lock_uninterruptible,
self);
483 mutex_lock_uninterruptible,
self);
536 rb_mutex_synchronize_m(
VALUE self)
557 #define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
565 #define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
566 #define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
569 int num_waiting_push;
575 queue_mark(
void *
ptr)
577 struct rb_queue *q =
ptr;
584 queue_memsize(
const void *
ptr)
586 return sizeof(
struct rb_queue);
607 queue_fork_check(
struct rb_queue *q)
611 if (q->fork_gen == fork_gen) {
615 q->fork_gen = fork_gen;
621 static struct rb_queue *
632 #define QUEUE_CLOSED FL_USER5
635 szqueue_mark(
void *
ptr)
637 struct rb_szqueue *sq =
ptr;
643 szqueue_memsize(
const void *
ptr)
645 return sizeof(
struct rb_szqueue);
657 struct rb_szqueue *sq;
659 &szqueue_data_type, sq);
665 static struct rb_szqueue *
668 struct rb_szqueue *sq;
671 if (queue_fork_check(&sq->q)) {
673 sq->num_waiting_push = 0;
695 queue_length(
VALUE self,
struct rb_queue *q)
701 queue_closed_p(
VALUE self)
716 raise_closed_queue_error(
VALUE self)
718 rb_raise(rb_eClosedQueueError,
"queue closed");
722 queue_closed_result(
VALUE self,
struct rb_queue *q)
724 assert(queue_length(
self, q) == 0);
770 rb_queue_initialize(
VALUE self)
772 struct rb_queue *q = queue_ptr(
self);
781 if (queue_closed_p(
self)) {
782 raise_closed_queue_error(
self);
823 rb_queue_close(
VALUE self)
825 struct rb_queue *q = queue_ptr(
self);
827 if (!queue_closed_p(
self)) {
844 rb_queue_closed_p(
VALUE self)
862 return queue_do_push(
self, queue_ptr(
self),
obj);
868 rb_thread_sleep_deadly_allow_spurious_wakeup();
876 struct rb_szqueue *
sq;
881 queue_sleep_done(
VALUE p)
886 qw->
as.
q->num_waiting--;
892 szqueue_sleep_done(
VALUE p)
897 qw->
as.
sq->num_waiting_push--;
903 queue_do_pop(
VALUE self,
struct rb_queue *
q,
int should_block)
905 check_array(
self,
q->que);
911 else if (queue_closed_p(
self)) {
912 return queue_closed_result(
self,
q);
918 assert(queue_closed_p(
self) == 0);
923 qw.as.q->num_waiting++;
935 int should_block = 1;
960 int should_block = queue_pop_should_block(
argc,
argv);
961 return queue_do_pop(
self, queue_ptr(
self), should_block);
972 rb_queue_empty_p(
VALUE self)
974 return queue_length(
self, queue_ptr(
self)) == 0 ?
Qtrue :
Qfalse;
984 rb_queue_clear(
VALUE self)
986 struct rb_queue *q = queue_ptr(
self);
1002 rb_queue_length(
VALUE self)
1004 return LONG2NUM(queue_length(
self, queue_ptr(
self)));
1014 rb_queue_num_waiting(
VALUE self)
1016 struct rb_queue *q = queue_ptr(
self);
1018 return INT2NUM(q->num_waiting);
1038 rb_szqueue_initialize(
VALUE self,
VALUE vmax)
1041 struct rb_szqueue *sq = szqueue_ptr(
self);
1069 rb_szqueue_close(
VALUE self)
1071 if (!queue_closed_p(
self)) {
1072 struct rb_szqueue *sq = szqueue_ptr(
self);
1088 rb_szqueue_max_get(
VALUE self)
1090 return LONG2NUM(szqueue_ptr(
self)->max);
1105 struct rb_szqueue *sq = szqueue_ptr(
self);
1110 if (max > sq->max) {
1111 diff = max - sq->max;
1121 int should_block = 1;
1126 return should_block;
1146 struct rb_szqueue *sq = szqueue_ptr(
self);
1147 int should_block = szqueue_push_should_block(
argc,
argv);
1149 while (queue_length(
self, &sq->q) >= sq->max) {
1150 if (!should_block) {
1153 else if (queue_closed_p(
self)) {
1163 sq->num_waiting_push++;
1169 if (queue_closed_p(
self)) {
1171 raise_closed_queue_error(
self);
1174 return queue_do_push(
self, &sq->q,
argv[0]);
1178 szqueue_do_pop(
VALUE self,
int should_block)
1180 struct rb_szqueue *sq = szqueue_ptr(
self);
1181 VALUE retval = queue_do_pop(
self, &sq->q, should_block);
1183 if (queue_length(
self, &sq->q) < sq->max) {
1207 int should_block = queue_pop_should_block(
argc,
argv);
1208 return szqueue_do_pop(
self, should_block);
1218 rb_szqueue_clear(
VALUE self)
1220 struct rb_szqueue *sq = szqueue_ptr(
self);
1237 rb_szqueue_length(
VALUE self)
1239 struct rb_szqueue *sq = szqueue_ptr(
self);
1241 return LONG2NUM(queue_length(
self, &sq->q));
1251 rb_szqueue_num_waiting(
VALUE self)
1253 struct rb_szqueue *sq = szqueue_ptr(
self);
1255 return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1266 rb_szqueue_empty_p(
VALUE self)
1268 struct rb_szqueue *sq = szqueue_ptr(
self);
1270 return queue_length(
self, &sq->q) == 0 ?
Qtrue :
Qfalse;
1309 condvar_memsize(
const void *
ptr)
1321 condvar_ptr(
VALUE self)
1331 list_head_init(&cv->
waitq);
1344 list_head_init(&cv->
waitq);
1356 rb_condvar_initialize(
VALUE self)
1359 list_head_init(&cv->
waitq);
1371 do_sleep(
VALUE args)
1378 delete_from_waitq(
VALUE v)
1419 rb_condvar_signal(
VALUE self)
1422 wakeup_one(&cv->
waitq);
1433 rb_condvar_broadcast(
VALUE self)
1436 wakeup_all(&cv->
waitq);
1457 Init_thread_sync(
void)
1467 #define DEFINE_CLASS(name, super) \
1468 rb_c##name = define_thread_class(rb_cThread, #name, rb_c##super)
1518 rb_define_method(rb_cSizedQueue,
"num_waiting", rb_szqueue_num_waiting, 0);
1532 rb_define_method(rb_cConditionVariable,
"initialize", rb_condvar_initialize, 0);
1537 rb_define_method(rb_cConditionVariable,
"broadcast", rb_condvar_broadcast, 0);