Ruby 3.3.5p100 (2024-09-03 revision ef084cc8f4958c1b6e4ead99136631bef6d8ddba)
thread_pthread.c
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_pthread.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include "internal/gc.h"
15#include "rjit.h"
16
17#ifdef HAVE_SYS_RESOURCE_H
18#include <sys/resource.h>
19#endif
20#ifdef HAVE_THR_STKSEGMENT
21#include <thread.h>
22#endif
23#if defined(HAVE_FCNTL_H)
24#include <fcntl.h>
25#elif defined(HAVE_SYS_FCNTL_H)
26#include <sys/fcntl.h>
27#endif
28#ifdef HAVE_SYS_PRCTL_H
29#include <sys/prctl.h>
30#endif
31#if defined(HAVE_SYS_TIME_H)
32#include <sys/time.h>
33#endif
34#if defined(__HAIKU__)
35#include <kernel/OS.h>
36#endif
37#ifdef __linux__
38#include <sys/syscall.h> /* for SYS_gettid */
39#endif
40#include <time.h>
41#include <signal.h>
42
43#if defined __APPLE__
44# include <AvailabilityMacros.h>
45#endif
46
47#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
48# define USE_EVENTFD (1)
49# include <sys/eventfd.h>
50#else
51# define USE_EVENTFD (0)
52#endif
53
54#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
55 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
56 defined(HAVE_CLOCK_GETTIME)
57static pthread_condattr_t condattr_mono;
58static pthread_condattr_t *condattr_monotonic = &condattr_mono;
59#else
60static const void *const condattr_monotonic = NULL;
61#endif
62
63#include COROUTINE_H
64
65#ifndef HAVE_SYS_EVENT_H
66#define HAVE_SYS_EVENT_H 0
67#endif
68
69#ifndef HAVE_SYS_EPOLL_H
70#define HAVE_SYS_EPOLL_H 0
71#else
72// force setting for debug
73// #undef HAVE_SYS_EPOLL_H
74// #define HAVE_SYS_EPOLL_H 0
75#endif
76
77#ifndef USE_MN_THREADS
78 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
79 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
80 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
81 #define USE_MN_THREADS 0
82 #elif HAVE_SYS_EPOLL_H
83 #include <sys/epoll.h>
84 #define USE_MN_THREADS 1
85 #elif HAVE_SYS_EVENT_H
86 #include <sys/event.h>
87 #define USE_MN_THREADS 1
88 #else
89 #define USE_MN_THREADS 0
90 #endif
91#endif
92
93// native thread wrappers
94
95#define NATIVE_MUTEX_LOCK_DEBUG 0
96
97static void
98mutex_debug(const char *msg, void *lock)
99{
100 if (NATIVE_MUTEX_LOCK_DEBUG) {
101 int r;
102 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
103
104 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
105 fprintf(stdout, "%s: %p\n", msg, lock);
106 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
107 }
108}
109
110void
111rb_native_mutex_lock(pthread_mutex_t *lock)
112{
113 int r;
114 mutex_debug("lock", lock);
115 if ((r = pthread_mutex_lock(lock)) != 0) {
116 rb_bug_errno("pthread_mutex_lock", r);
117 }
118}
119
120void
121rb_native_mutex_unlock(pthread_mutex_t *lock)
122{
123 int r;
124 mutex_debug("unlock", lock);
125 if ((r = pthread_mutex_unlock(lock)) != 0) {
126 rb_bug_errno("pthread_mutex_unlock", r);
127 }
128}
129
130int
131rb_native_mutex_trylock(pthread_mutex_t *lock)
132{
133 int r;
134 mutex_debug("trylock", lock);
135 if ((r = pthread_mutex_trylock(lock)) != 0) {
136 if (r == EBUSY) {
137 return EBUSY;
138 }
139 else {
140 rb_bug_errno("pthread_mutex_trylock", r);
141 }
142 }
143 return 0;
144}
145
146void
147rb_native_mutex_initialize(pthread_mutex_t *lock)
148{
149 int r = pthread_mutex_init(lock, 0);
150 mutex_debug("init", lock);
151 if (r != 0) {
152 rb_bug_errno("pthread_mutex_init", r);
153 }
154}
155
156void
157rb_native_mutex_destroy(pthread_mutex_t *lock)
158{
159 int r = pthread_mutex_destroy(lock);
160 mutex_debug("destroy", lock);
161 if (r != 0) {
162 rb_bug_errno("pthread_mutex_destroy", r);
163 }
164}
165
166void
167rb_native_cond_initialize(rb_nativethread_cond_t *cond)
168{
169 int r = pthread_cond_init(cond, condattr_monotonic);
170 if (r != 0) {
171 rb_bug_errno("pthread_cond_init", r);
172 }
173}
174
175void
176rb_native_cond_destroy(rb_nativethread_cond_t *cond)
177{
178 int r = pthread_cond_destroy(cond);
179 if (r != 0) {
180 rb_bug_errno("pthread_cond_destroy", r);
181 }
182}
183
184/*
185 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
186 * EAGAIN after retrying 8192 times. You can see them in the following page:
187 *
188 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
189 *
190 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
191 * need to retrying until pthread functions don't return EAGAIN.
192 */
193
194void
195rb_native_cond_signal(rb_nativethread_cond_t *cond)
196{
197 int r;
198 do {
199 r = pthread_cond_signal(cond);
200 } while (r == EAGAIN);
201 if (r != 0) {
202 rb_bug_errno("pthread_cond_signal", r);
203 }
204}
205
206void
207rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
208{
209 int r;
210 do {
211 r = pthread_cond_broadcast(cond);
212 } while (r == EAGAIN);
213 if (r != 0) {
214 rb_bug_errno("rb_native_cond_broadcast", r);
215 }
216}
217
218void
219rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
220{
221 int r = pthread_cond_wait(cond, mutex);
222 if (r != 0) {
223 rb_bug_errno("pthread_cond_wait", r);
224 }
225}
226
227static int
228native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
229{
230 int r;
231 struct timespec ts;
232
233 /*
234 * An old Linux may return EINTR. Even though POSIX says
235 * "These functions shall not return an error code of [EINTR]".
236 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
237 * Let's hide it from arch generic code.
238 */
239 do {
240 rb_hrtime2timespec(&ts, abs);
241 r = pthread_cond_timedwait(cond, mutex, &ts);
242 } while (r == EINTR);
243
244 if (r != 0 && r != ETIMEDOUT) {
245 rb_bug_errno("pthread_cond_timedwait", r);
246 }
247
248 return r;
249}
250
251static rb_hrtime_t
252native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
253{
254 if (condattr_monotonic) {
255 return rb_hrtime_add(rb_hrtime_now(), rel);
256 }
257 else {
258 struct timespec ts;
259
260 rb_timespec_now(&ts);
261 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
262 }
263}
264
265void
266rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
267{
268 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
269 native_cond_timedwait(cond, mutex, &hrmsec);
270}
271
272// thread scheduling
273
274static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
275static void rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th);
276
277#if 0
278static const char *
279event_name(rb_event_flag_t event)
280{
281 switch (event) {
283 return "STARTED";
285 return "READY";
287 return "RESUMED";
289 return "SUSPENDED";
291 return "EXITED";
292 }
293 return "no-event";
294}
295
296#define RB_INTERNAL_THREAD_HOOK(event, th) \
297 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
298 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
299 rb_thread_execute_hooks(event, th); \
300 }
301#else
302#define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
303#endif
304
305static rb_serial_t current_fork_gen = 1; /* We can't use GET_VM()->fork_gen */
306
307#if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__EMSCRIPTEN__)
308# define USE_UBF_LIST 1
309#endif
310
311static void threadptr_trap_interrupt(rb_thread_t *);
312
313#ifdef HAVE_SCHED_YIELD
314#define native_thread_yield() (void)sched_yield()
315#else
316#define native_thread_yield() ((void)0)
317#endif
318
319/* 100ms. 10ms is too small for user level thread scheduling
320 * on recent Linux (tested on 2.6.35)
321 */
322#define TIME_QUANTUM_MSEC (100)
323#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
324#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
325
326static void native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
327static void native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
328static void native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th);
329
330static void ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r);
331static void timer_thread_wakeup(void);
332static void timer_thread_wakeup_locked(rb_vm_t *vm);
333static void timer_thread_wakeup_force(void);
334static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
335
336#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
337
338static bool
339th_has_dedicated_nt(const rb_thread_t *th)
340{
341 // TODO: th->has_dedicated_nt
342 return th->nt->dedicated > 0;
343}
344
346static void
347thread_sched_dump_(const char *file, int line, struct rb_thread_sched *sched)
348{
349 fprintf(stderr, "@%s:%d running:%d\n", file, line, sched->running ? (int)sched->running->serial : -1);
350 rb_thread_t *th;
351 int i = 0;
352 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
353 i++; if (i>10) rb_bug("too many");
354 fprintf(stderr, " ready:%d (%sNT:%d)\n", th->serial,
355 th->nt ? (th->nt->dedicated ? "D" : "S") : "x",
356 th->nt ? (int)th->nt->serial : -1);
357 }
358}
359
360#define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
361
363static void
364ractor_sched_dump_(const char *file, int line, rb_vm_t *vm)
365{
366 rb_ractor_t *r;
367
368 fprintf(stderr, "ractor_sched_dump %s:%d\n", file, line);
369
370 int i = 0;
371 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
372 i++;
373 if (i>10) rb_bug("!!");
374 fprintf(stderr, " %d ready:%d\n", i, rb_ractor_id(r));
375 }
376}
377
378#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
379#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
380
381static void
382thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
383{
384 rb_native_mutex_lock(&sched->lock_);
385
386#if VM_CHECK_MODE
387 RUBY_DEBUG_LOG2(file, line, "th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
388 VM_ASSERT(sched->lock_owner == NULL);
389 sched->lock_owner = th;
390#else
391 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
392#endif
393}
394
395static void
396thread_sched_unlock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
397{
398 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
399
400#if VM_CHECK_MODE
401 VM_ASSERT(sched->lock_owner == th);
402 sched->lock_owner = NULL;
403#endif
404
405 rb_native_mutex_unlock(&sched->lock_);
406}
407
408static void
409thread_sched_set_lock_owner(struct rb_thread_sched *sched, rb_thread_t *th)
410{
411 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
412
413#if VM_CHECK_MODE > 0
414 sched->lock_owner = th;
415#endif
416}
417
418static void
419ASSERT_thread_sched_locked(struct rb_thread_sched *sched, rb_thread_t *th)
420{
421 VM_ASSERT(rb_native_mutex_trylock(&sched->lock_) == EBUSY);
422
423#if VM_CHECK_MODE
424 if (th) {
425 VM_ASSERT(sched->lock_owner == th);
426 }
427 else {
428 VM_ASSERT(sched->lock_owner != NULL);
429 }
430#endif
431}
432
433#define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
434#define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
435
437static unsigned int
438rb_ractor_serial(const rb_ractor_t *r) {
439 if (r) {
440 return rb_ractor_id(r);
441 }
442 else {
443 return 0;
444 }
445}
446
447static void
448ractor_sched_set_locked(rb_vm_t *vm, rb_ractor_t *cr)
449{
450#if VM_CHECK_MODE > 0
451 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
452 VM_ASSERT(vm->ractor.sched.locked == false);
453
454 vm->ractor.sched.lock_owner = cr;
455 vm->ractor.sched.locked = true;
456#endif
457}
458
459static void
460ractor_sched_set_unlocked(rb_vm_t *vm, rb_ractor_t *cr)
461{
462#if VM_CHECK_MODE > 0
463 VM_ASSERT(vm->ractor.sched.locked);
464 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
465
466 vm->ractor.sched.locked = false;
467 vm->ractor.sched.lock_owner = NULL;
468#endif
469}
470
471static void
472ractor_sched_lock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
473{
474 rb_native_mutex_lock(&vm->ractor.sched.lock);
475
476#if VM_CHECK_MODE
477 RUBY_DEBUG_LOG2(file, line, "cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
478#else
479 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
480#endif
481
482 ractor_sched_set_locked(vm, cr);
483}
484
485static void
486ractor_sched_unlock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
487{
488 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
489
490 ractor_sched_set_unlocked(vm, cr);
491 rb_native_mutex_unlock(&vm->ractor.sched.lock);
492}
493
494static void
495ASSERT_ractor_sched_locked(rb_vm_t *vm, rb_ractor_t *cr)
496{
497 VM_ASSERT(rb_native_mutex_trylock(&vm->ractor.sched.lock) == EBUSY);
498 VM_ASSERT(vm->ractor.sched.locked);
499 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
500}
501
503static bool
504ractor_sched_running_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
505{
506 rb_thread_t *rth;
507 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
508 if (rth == th) return true;
509 }
510 return false;
511}
512
514static unsigned int
515ractor_sched_running_threads_size(rb_vm_t *vm)
516{
517 rb_thread_t *th;
518 unsigned int i = 0;
519 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
520 i++;
521 }
522 return i;
523}
524
526static unsigned int
527ractor_sched_timeslice_threads_size(rb_vm_t *vm)
528{
529 rb_thread_t *th;
530 unsigned int i = 0;
531 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
532 i++;
533 }
534 return i;
535}
536
538static bool
539ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
540{
541 rb_thread_t *rth;
542 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
543 if (rth == th) return true;
544 }
545 return false;
546}
547
548static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
549static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
550
551// setup timeslice signals by the timer thread.
552static void
553thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *cr, rb_vm_t *vm,
554 rb_thread_t *add_th, rb_thread_t *del_th, rb_thread_t *add_timeslice_th)
555{
556#if USE_RUBY_DEBUG_LOG
557 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
558#endif
559
560 rb_thread_t *del_timeslice_th;
561
562 if (del_th && sched->is_running_timeslice) {
563 del_timeslice_th = del_th;
564 sched->is_running_timeslice = false;
565 }
566 else {
567 del_timeslice_th = NULL;
568 }
569
570 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
571 rb_th_serial(add_th), rb_th_serial(del_th),
572 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
573
574 ractor_sched_lock(vm, cr);
575 {
576 // update running_threads
577 if (del_th) {
578 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
579 VM_ASSERT(del_timeslice_th != NULL ||
580 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
581
582 ccan_list_del_init(&del_th->sched.node.running_threads);
583 vm->ractor.sched.running_cnt--;
584
585 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
586 ractor_sched_barrier_join_signal_locked(vm);
587 }
588 sched->is_running = false;
589 }
590
591 if (add_th) {
592 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
593 RUBY_DEBUG_LOG("barrier-wait");
594
595 ractor_sched_barrier_join_signal_locked(vm);
596 ractor_sched_barrier_join_wait_locked(vm, add_th);
597 }
598
599 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
600 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
601
602 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
603 vm->ractor.sched.running_cnt++;
604 sched->is_running = true;
605 }
606
607 if (add_timeslice_th) {
608 // update timeslice threads
609 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
610 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
611 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
612 sched->is_running_timeslice = true;
613 if (was_empty) {
614 timer_thread_wakeup_locked(vm);
615 }
616 }
617
618 if (del_timeslice_th) {
619 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
620 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
621 }
622
623 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
624 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
625 }
626 ractor_sched_unlock(vm, cr);
627
628 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
629 // it can be after barrier synchronization by another ractor
630 rb_thread_t *lock_owner = NULL;
631#if VM_CHECK_MODE
632 lock_owner = sched->lock_owner;
633#endif
634 thread_sched_unlock(sched, lock_owner);
635 {
636 RB_VM_LOCK_ENTER();
637 RB_VM_LOCK_LEAVE();
638 }
639 thread_sched_lock(sched, lock_owner);
640 }
641
642 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
643 // rb_th_serial(add_th), rb_th_serial(del_th),
644 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
645 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
646}
647
648static void
649thread_sched_add_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
650{
651 ASSERT_thread_sched_locked(sched, th);
652 VM_ASSERT(sched->running == th);
653
654 rb_vm_t *vm = th->vm;
655 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
656}
657
658static void
659thread_sched_del_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
660{
661 ASSERT_thread_sched_locked(sched, th);
662
663 rb_vm_t *vm = th->vm;
664 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
665}
666
667void
668rb_add_running_thread(rb_thread_t *th)
669{
670 struct rb_thread_sched *sched = TH_SCHED(th);
671
672 thread_sched_lock(sched, th);
673 {
674 thread_sched_add_running_thread(sched, th);
675 }
676 thread_sched_unlock(sched, th);
677}
678
679void
680rb_del_running_thread(rb_thread_t *th)
681{
682 struct rb_thread_sched *sched = TH_SCHED(th);
683
684 thread_sched_lock(sched, th);
685 {
686 thread_sched_del_running_thread(sched, th);
687 }
688 thread_sched_unlock(sched, th);
689}
690
691// setup current or next running thread
692// sched->running should be set only on this function.
693//
694// if th is NULL, there is no running threads.
695static void
696thread_sched_set_running(struct rb_thread_sched *sched, rb_thread_t *th)
697{
698 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
699 VM_ASSERT(sched->running != th);
700
701 sched->running = th;
702}
703
705static bool
706thread_sched_readyq_contain_p(struct rb_thread_sched *sched, rb_thread_t *th)
707{
708 rb_thread_t *rth;
709 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
710 if (rth == th) return true;
711 }
712 return false;
713}
714
715// deque thread from the ready queue.
716// if the ready queue is empty, return NULL.
717//
718// return deque'ed running thread (or NULL).
719static rb_thread_t *
720thread_sched_deq(struct rb_thread_sched *sched)
721{
722 ASSERT_thread_sched_locked(sched, NULL);
723 rb_thread_t *next_th;
724
725 VM_ASSERT(sched->running != NULL);
726
727 if (ccan_list_empty(&sched->readyq)) {
728 next_th = NULL;
729 }
730 else {
731 next_th = ccan_list_pop(&sched->readyq, rb_thread_t, sched.node.readyq);
732
733 VM_ASSERT(sched->readyq_cnt > 0);
734 sched->readyq_cnt--;
735 ccan_list_node_init(&next_th->sched.node.readyq);
736 }
737
738 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
739
740 return next_th;
741}
742
743// enqueue ready thread to the ready queue.
744static void
745thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
746{
747 ASSERT_thread_sched_locked(sched, NULL);
748 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
749
750 VM_ASSERT(sched->running != NULL);
751 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
752
753 if (sched->is_running) {
754 if (ccan_list_empty(&sched->readyq)) {
755 // add sched->running to timeslice
756 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
757 }
758 }
759 else {
760 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
761 }
762
763 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
764 sched->readyq_cnt++;
765}
766
767// DNT: kick condvar
768// SNT: TODO
769static void
770thread_sched_wakeup_running_thread(struct rb_thread_sched *sched, rb_thread_t *next_th, bool will_switch)
771{
772 ASSERT_thread_sched_locked(sched, NULL);
773 VM_ASSERT(sched->running == next_th);
774
775 if (next_th) {
776 if (next_th->nt) {
777 if (th_has_dedicated_nt(next_th)) {
778 RUBY_DEBUG_LOG("pinning th:%u", next_th->serial);
779 rb_native_cond_signal(&next_th->nt->cond.readyq);
780 }
781 else {
782 // TODO
783 RUBY_DEBUG_LOG("th:%u is already running.", next_th->serial);
784 }
785 }
786 else {
787 if (will_switch) {
788 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th));
789 }
790 else {
791 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th));
792 ractor_sched_enq(next_th->vm, next_th->ractor);
793 }
794 }
795 }
796 else {
797 RUBY_DEBUG_LOG("no waiting threads%s", "");
798 }
799}
800
801// waiting -> ready (locked)
802static void
803thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, bool wakeup, bool will_switch)
804{
805 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
806
807 VM_ASSERT(sched->running != th);
808 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
809 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY, th);
810
811 if (sched->running == NULL) {
812 thread_sched_set_running(sched, th);
813 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
814 }
815 else {
816 thread_sched_enq(sched, th);
817 }
818}
819
820// waiting -> ready
821//
822// `th` had became "waiting" state by `thread_sched_to_waiting`
823// and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
825static void
826thread_sched_to_ready(struct rb_thread_sched *sched, rb_thread_t *th)
827{
828 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
829
830 thread_sched_lock(sched, th);
831 {
832 thread_sched_to_ready_common(sched, th, true, false);
833 }
834 thread_sched_unlock(sched, th);
835}
836
837// wait until sched->running is `th`.
838static void
839thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, bool can_direct_transfer)
840{
841 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
842
843 ASSERT_thread_sched_locked(sched, th);
844 VM_ASSERT(th == GET_THREAD());
845
846 if (th != sched->running) {
847 // already deleted from running threads
848 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
849
850 // wait for execution right
851 rb_thread_t *next_th;
852 while((next_th = sched->running) != th) {
853 if (th_has_dedicated_nt(th)) {
854 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
855
856 thread_sched_set_lock_owner(sched, NULL);
857 {
858 RUBY_DEBUG_LOG("nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
859 rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock_);
860 }
861 thread_sched_set_lock_owner(sched, th);
862
863 RUBY_DEBUG_LOG("(nt) wakeup %s", sched->running == th ? "success" : "failed");
864 if (th == sched->running) {
865 rb_ractor_thread_switch(th->ractor, th);
866 }
867 }
868 else {
869 // search another ready thread
870 if (can_direct_transfer &&
871 (next_th = sched->running) != NULL &&
872 !next_th->nt // next_th is running or has dedicated nt
873 ) {
874
875 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
876
877 thread_sched_set_lock_owner(sched, NULL);
878 {
879 rb_ractor_set_current_ec(th->ractor, NULL);
880 thread_sched_switch(th, next_th);
881 }
882 thread_sched_set_lock_owner(sched, th);
883 }
884 else {
885 // search another ready ractor
886 struct rb_native_thread *nt = th->nt;
887 native_thread_assign(NULL, th);
888
889 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
890
891 thread_sched_set_lock_owner(sched, NULL);
892 {
893 rb_ractor_set_current_ec(th->ractor, NULL);
894 coroutine_transfer(th->sched.context, nt->nt_context);
895 }
896 thread_sched_set_lock_owner(sched, th);
897 }
898
899 VM_ASSERT(GET_EC() == th->ec);
900 }
901 }
902
903 VM_ASSERT(th->nt != NULL);
904 VM_ASSERT(GET_EC() == th->ec);
905 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
906
907 // add th to running threads
908 thread_sched_add_running_thread(sched, th);
909 }
910
911 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
912 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED, th);
913}
914
915// waiting -> ready -> running (locked)
916static void
917thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
918{
919 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
920
921 VM_ASSERT(sched->running != th);
922 VM_ASSERT(th_has_dedicated_nt(th));
923 VM_ASSERT(GET_THREAD() == th);
924
925 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
926
927 // waiting -> ready
928 thread_sched_to_ready_common(sched, th, false, false);
929
930 if (sched->running == th) {
931 thread_sched_add_running_thread(sched, th);
932 }
933
934 // TODO: check SNT number
935 thread_sched_wait_running_turn(sched, th, false);
936}
937
938// waiting -> ready -> running
939//
940// `th` had been waiting by `thread_sched_to_waiting()`
941// and run a dedicated task (like waitpid and so on).
942// After the dedicated task, this function is called
943// to join a normal thread-scheduling.
944static void
945thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
946{
947 thread_sched_lock(sched, th);
948 {
949 thread_sched_to_running_common(sched, th);
950 }
951 thread_sched_unlock(sched, th);
952}
953
954// resume a next thread in the thread ready queue.
955//
956// deque next running thread from the ready thread queue and
957// resume this thread if available.
958//
959// If the next therad has a dedicated native thraed, simply signal to resume.
960// Otherwise, make the ractor ready and other nt will run the ractor and the thread.
961static void
962thread_sched_wakeup_next_thread(struct rb_thread_sched *sched, rb_thread_t *th, bool will_switch)
963{
964 ASSERT_thread_sched_locked(sched, th);
965
966 VM_ASSERT(sched->running == th);
967 VM_ASSERT(sched->running->nt != NULL);
968
969 rb_thread_t *next_th = thread_sched_deq(sched);
970
971 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
972 VM_ASSERT(th != next_th);
973
974 thread_sched_set_running(sched, next_th);
975 VM_ASSERT(next_th == sched->running);
976 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
977
978 if (th != next_th) {
979 thread_sched_del_running_thread(sched, th);
980 }
981}
982
983// running -> waiting
984//
985// to_dead: false
986// th will run dedicated task.
987// run another ready thread.
988// to_dead: true
989// th will be dead.
990// run another ready thread.
991static void
992thread_sched_to_waiting_common0(struct rb_thread_sched *sched, rb_thread_t *th, bool to_dead)
993{
994 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
995
996 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
997
998 RUBY_DEBUG_LOG("%sth:%u", to_dead ? "to_dead " : "", rb_th_serial(th));
999
1000 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
1001 thread_sched_wakeup_next_thread(sched, th, can_switch);
1002}
1003
1004// running -> dead (locked)
1005static void
1006thread_sched_to_dead_common(struct rb_thread_sched *sched, rb_thread_t *th)
1007{
1008 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1009 thread_sched_to_waiting_common0(sched, th, true);
1010 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED, th);
1011}
1012
1013// running -> dead
1014static void
1015thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
1016{
1017 thread_sched_lock(sched, th);
1018 {
1019 thread_sched_to_dead_common(sched, th);
1020 }
1021 thread_sched_unlock(sched, th);
1022}
1023
1024// running -> waiting (locked)
1025//
1026// This thread will run dedicated task (th->nt->dedicated++).
1027static void
1028thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
1029{
1030 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1031 thread_sched_to_waiting_common0(sched, th, false);
1032}
1033
1034// running -> waiting
1035//
1036// This thread will run a dedicated task.
1037static void
1038thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
1039{
1040 thread_sched_lock(sched, th);
1041 {
1042 thread_sched_to_waiting_common(sched, th);
1043 }
1044 thread_sched_unlock(sched, th);
1045}
1046
1047// mini utility func
1048static void
1049setup_ubf(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
1050{
1051 rb_native_mutex_lock(&th->interrupt_lock);
1052 {
1053 th->unblock.func = func;
1054 th->unblock.arg = arg;
1055 }
1056 rb_native_mutex_unlock(&th->interrupt_lock);
1057}
1058
1059static void
1060ubf_waiting(void *ptr)
1061{
1062 rb_thread_t *th = (rb_thread_t *)ptr;
1063 struct rb_thread_sched *sched = TH_SCHED(th);
1064
1065 // only once. it is safe because th->interrupt_lock is already acquired.
1066 th->unblock.func = NULL;
1067 th->unblock.arg = NULL;
1068
1069 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1070
1071 thread_sched_lock(sched, th);
1072 {
1073 if (sched->running == th) {
1074 // not sleeping yet.
1075 }
1076 else {
1077 thread_sched_to_ready_common(sched, th, true, false);
1078 }
1079 }
1080 thread_sched_unlock(sched, th);
1081}
1082
1083// running -> waiting
1084//
1085// This thread will sleep until other thread wakeup the thread.
1086static void
1087thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t *th)
1088{
1089 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1090
1091 RB_VM_SAVE_MACHINE_CONTEXT(th);
1092 setup_ubf(th, ubf_waiting, (void *)th);
1093
1094 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1095
1096 thread_sched_lock(sched, th);
1097 {
1098 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1099 bool can_direct_transfer = !th_has_dedicated_nt(th);
1100 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1101 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1102 }
1103 else {
1104 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
1105 }
1106 }
1107 thread_sched_unlock(sched, th);
1108
1109 setup_ubf(th, NULL, NULL);
1110}
1111
1112// run another thread in the ready queue.
1113// continue to run if there are no ready threads.
1114static void
1115thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
1116{
1117 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th->serial, sched->readyq_cnt);
1118
1119 thread_sched_lock(sched, th);
1120 {
1121 if (!ccan_list_empty(&sched->readyq)) {
1122 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1123 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1124 bool can_direct_transfer = !th_has_dedicated_nt(th);
1125 thread_sched_to_ready_common(sched, th, false, can_direct_transfer);
1126 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1127 }
1128 else {
1129 VM_ASSERT(sched->readyq_cnt == 0);
1130 }
1131 }
1132 thread_sched_unlock(sched, th);
1133}
1134
1135void
1136rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
1137{
1138 rb_native_mutex_initialize(&sched->lock_);
1139
1140#if VM_CHECK_MODE
1141 sched->lock_owner = NULL;
1142#endif
1143
1144 ccan_list_head_init(&sched->readyq);
1145 sched->readyq_cnt = 0;
1146
1147#if USE_MN_THREADS
1148 if (!atfork) sched->enable_mn_threads = true; // MN is enabled on Ractors
1149#endif
1150}
1151
1152static void
1153thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt)
1154{
1155 VM_ASSERT(!nt->dedicated);
1156 VM_ASSERT(next_th->nt == NULL);
1157
1158 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
1159
1160 ruby_thread_set_native(next_th);
1161 native_thread_assign(nt, next_th);
1162 coroutine_transfer(current_cont, next_th->sched.context);
1163}
1164
1165static void
1166thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
1167{
1168 struct rb_native_thread *nt = cth->nt;
1169 native_thread_assign(NULL, cth);
1170 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1171 thread_sched_switch0(cth->sched.context, next_th, nt);
1172}
1173
1174#if VM_CHECK_MODE > 0
1176static unsigned int
1177grq_size(rb_vm_t *vm, rb_ractor_t *cr)
1178{
1179 ASSERT_ractor_sched_locked(vm, cr);
1180
1181 rb_ractor_t *r, *prev_r = NULL;
1182 unsigned int i = 0;
1183
1184 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1185 i++;
1186
1187 VM_ASSERT(r != prev_r);
1188 prev_r = r;
1189 }
1190 return i;
1191}
1192#endif
1193
1194static void
1195ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r)
1196{
1197 struct rb_thread_sched *sched = &r->threads.sched;
1198 rb_ractor_t *cr = NULL; // timer thread can call this function
1199
1200 VM_ASSERT(sched->running != NULL);
1201 VM_ASSERT(sched->running->nt == NULL);
1202
1203 ractor_sched_lock(vm, cr);
1204 {
1205#if VM_CHECK_MODE > 0
1206 // check if grq contains r
1207 rb_ractor_t *tr;
1208 ccan_list_for_each(&vm->ractor.sched.grq, tr, threads.sched.grq_node) {
1209 VM_ASSERT(r != tr);
1210 }
1211#endif
1212
1213 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1214 vm->ractor.sched.grq_cnt++;
1215 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1216
1217 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1218
1219 rb_native_cond_signal(&vm->ractor.sched.cond);
1220
1221 // ractor_sched_dump(vm);
1222 }
1223 ractor_sched_unlock(vm, cr);
1224}
1225
1226
1227#ifndef SNT_KEEP_SECONDS
1228#define SNT_KEEP_SECONDS 0
1229#endif
1230
1231#ifndef MINIMUM_SNT
1232// make at least MINIMUM_SNT snts for debug.
1233#define MINIMUM_SNT 0
1234#endif
1235
1236static rb_ractor_t *
1237ractor_sched_deq(rb_vm_t *vm, rb_ractor_t *cr)
1238{
1239 rb_ractor_t *r;
1240
1241 ractor_sched_lock(vm, cr);
1242 {
1243 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1244 // ractor_sched_dump(vm);
1245
1246 VM_ASSERT(rb_current_execution_context(false) == NULL);
1247 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1248
1249 while ((r = ccan_list_pop(&vm->ractor.sched.grq, rb_ractor_t, threads.sched.grq_node)) == NULL) {
1250 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1251
1252#if SNT_KEEP_SECONDS > 0
1253 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1254 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1255 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1256 VM_ASSERT(r == NULL);
1257 vm->ractor.sched.snt_cnt--;
1258 vm->ractor.sched.running_cnt--;
1259 break;
1260 }
1261 else {
1262 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1263 }
1264#else
1265 ractor_sched_set_unlocked(vm, cr);
1266 rb_native_cond_wait(&vm->ractor.sched.cond, &vm->ractor.sched.lock);
1267 ractor_sched_set_locked(vm, cr);
1268
1269 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1270#endif
1271 }
1272
1273 VM_ASSERT(rb_current_execution_context(false) == NULL);
1274
1275 if (r) {
1276 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1277 vm->ractor.sched.grq_cnt--;
1278 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1279 }
1280 else {
1281 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1282 // timeout
1283 }
1284 }
1285 ractor_sched_unlock(vm, cr);
1286
1287 return r;
1288}
1289
1290void rb_ractor_lock_self(rb_ractor_t *r);
1291void rb_ractor_unlock_self(rb_ractor_t *r);
1292
1293void
1294rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf)
1295{
1296 // ractor lock of cr is acquired
1297 // r is sleeping statuss
1298 rb_thread_t *th = rb_ec_thread_ptr(ec);
1299 struct rb_thread_sched *sched = TH_SCHED(th);
1300 cr->sync.wait.waiting_thread = th; // TODO: multi-thread
1301
1302 setup_ubf(th, ubf, (void *)cr);
1303
1304 thread_sched_lock(sched, th);
1305 {
1306 rb_ractor_unlock_self(cr);
1307 {
1308 if (RUBY_VM_INTERRUPTED(th->ec)) {
1309 RUBY_DEBUG_LOG("interrupted");
1310 }
1311 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1312 RUBY_DEBUG_LOG("awaken:%d", (int)cr->sync.wait.wakeup_status);
1313 }
1314 else {
1315 // sleep
1316 RB_VM_SAVE_MACHINE_CONTEXT(th);
1317 th->status = THREAD_STOPPED_FOREVER;
1318
1319 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1320
1321 bool can_direct_transfer = !th_has_dedicated_nt(th);
1322 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1323 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1324 th->status = THREAD_RUNNABLE;
1325 // wakeup
1326 }
1327 }
1328 }
1329 thread_sched_unlock(sched, th);
1330
1331 setup_ubf(th, NULL, NULL);
1332
1333 rb_ractor_lock_self(cr);
1334 cr->sync.wait.waiting_thread = NULL;
1335}
1336
1337void
1338rb_ractor_sched_wakeup(rb_ractor_t *r)
1339{
1340 rb_thread_t *r_th = r->sync.wait.waiting_thread;
1341 // ractor lock of r is acquired
1342 struct rb_thread_sched *sched = TH_SCHED(r_th);
1343
1344 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1345
1346 thread_sched_lock(sched, r_th);
1347 {
1348 if (r_th->status == THREAD_STOPPED_FOREVER) {
1349 thread_sched_to_ready_common(sched, r_th, true, false);
1350 }
1351 }
1352 thread_sched_unlock(sched, r_th);
1353}
1354
1355static bool
1356ractor_sched_barrier_completed_p(rb_vm_t *vm)
1357{
1358 RUBY_DEBUG_LOG("run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1359 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1360 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1361}
1362
1363void
1364rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
1365{
1366 VM_ASSERT(cr == GET_RACTOR());
1367 VM_ASSERT(vm->ractor.sync.lock_owner == cr); // VM is locked
1368 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1369 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1370
1371 RUBY_DEBUG_LOG("start serial:%u", vm->ractor.sched.barrier_serial);
1372
1373 unsigned int lock_rec;
1374
1375 ractor_sched_lock(vm, cr);
1376 {
1377 vm->ractor.sched.barrier_waiting = true;
1378
1379 // release VM lock
1380 lock_rec = vm->ractor.sync.lock_rec;
1381 vm->ractor.sync.lock_rec = 0;
1382 vm->ractor.sync.lock_owner = NULL;
1383 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1384 {
1385 // interrupts all running threads
1386 rb_thread_t *ith;
1387 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1388 if (ith->ractor != cr) {
1389 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith));
1390 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1391 }
1392 }
1393
1394 // wait for other ractors
1395 while (!ractor_sched_barrier_completed_p(vm)) {
1396 ractor_sched_set_unlocked(vm, cr);
1397 rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
1398 ractor_sched_set_locked(vm, cr);
1399 }
1400 }
1401 }
1402 ractor_sched_unlock(vm, cr);
1403
1404 // acquire VM lock
1405 rb_native_mutex_lock(&vm->ractor.sync.lock);
1406 vm->ractor.sync.lock_rec = lock_rec;
1407 vm->ractor.sync.lock_owner = cr;
1408
1409 RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
1410
1411 ractor_sched_lock(vm, cr);
1412 {
1413 vm->ractor.sched.barrier_waiting = false;
1414 vm->ractor.sched.barrier_serial++;
1415 vm->ractor.sched.barrier_waiting_cnt = 0;
1416 rb_native_cond_broadcast(&vm->ractor.sched.barrier_release_cond);
1417 }
1418 ractor_sched_unlock(vm, cr);
1419}
1420
1421static void
1422ractor_sched_barrier_join_signal_locked(rb_vm_t *vm)
1423{
1424 if (ractor_sched_barrier_completed_p(vm)) {
1425 rb_native_cond_signal(&vm->ractor.sched.barrier_complete_cond);
1426 }
1427}
1428
1429static void
1430ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th)
1431{
1432 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1433
1434 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1435
1436 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1437 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial);
1438 RB_VM_SAVE_MACHINE_CONTEXT(th);
1439
1440 rb_ractor_t *cr = th->ractor;
1441 ractor_sched_set_unlocked(vm, cr);
1442 rb_native_cond_wait(&vm->ractor.sched.barrier_release_cond, &vm->ractor.sched.lock);
1443 ractor_sched_set_locked(vm, cr);
1444
1445 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial);
1446 }
1447}
1448
1449void
1450rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
1451{
1452 VM_ASSERT(cr->threads.sched.running != NULL); // running ractor
1453 VM_ASSERT(cr == GET_RACTOR());
1454 VM_ASSERT(vm->ractor.sync.lock_owner == NULL); // VM is locked, but owner == NULL
1455 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1456
1457#if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1458 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1459#endif
1460
1461 RUBY_DEBUG_LOG("join");
1462
1463 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1464 {
1465 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1466 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1467
1468 ractor_sched_lock(vm, cr);
1469 {
1470 // running_cnt
1471 vm->ractor.sched.barrier_waiting_cnt++;
1472 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1473
1474 ractor_sched_barrier_join_signal_locked(vm);
1475 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1476 }
1477 ractor_sched_unlock(vm, cr);
1478 }
1479
1480 rb_native_mutex_lock(&vm->ractor.sync.lock);
1481 // VM locked here
1482}
1483
1484#if 0
1485// TODO
1486
1487static void clear_thread_cache_altstack(void);
1488
1489static void
1490rb_thread_sched_destroy(struct rb_thread_sched *sched)
1491{
1492 /*
1493 * only called once at VM shutdown (not atfork), another thread
1494 * may still grab vm->gvl.lock when calling gvl_release at
1495 * the end of thread_start_func_2
1496 */
1497 if (0) {
1498 rb_native_mutex_destroy(&sched->lock);
1499 }
1500 clear_thread_cache_altstack();
1501}
1502#endif
1503
1504#ifdef RB_THREAD_T_HAS_NATIVE_ID
1505static int
1506get_native_thread_id(void)
1507{
1508#ifdef __linux__
1509 return (int)syscall(SYS_gettid);
1510#elif defined(__FreeBSD__)
1511 return pthread_getthreadid_np();
1512#endif
1513}
1514#endif
1515
1516#if defined(HAVE_WORKING_FORK)
1517static void
1518thread_sched_atfork(struct rb_thread_sched *sched)
1519{
1520 current_fork_gen++;
1521 rb_thread_sched_init(sched, true);
1522 rb_thread_t *th = GET_THREAD();
1523 rb_vm_t *vm = GET_VM();
1524
1525 if (th_has_dedicated_nt(th)) {
1526 vm->ractor.sched.snt_cnt = 0;
1527 }
1528 else {
1529 vm->ractor.sched.snt_cnt = 1;
1530 }
1531 vm->ractor.sched.running_cnt = 0;
1532
1533 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1534 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1535 rb_native_cond_initialize(&vm->ractor.sched.cond);
1536 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1537 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1538
1539 ccan_list_head_init(&vm->ractor.sched.grq);
1540 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1541 ccan_list_head_init(&vm->ractor.sched.running_threads);
1542
1543 VM_ASSERT(sched->is_running);
1544 sched->is_running_timeslice = false;
1545
1546 if (sched->running != th) {
1547 thread_sched_to_running(sched, th);
1548 }
1549 else {
1550 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1551 }
1552
1553#ifdef RB_THREAD_T_HAS_NATIVE_ID
1554 if (th->nt) {
1555 th->nt->tid = get_native_thread_id();
1556 }
1557#endif
1558}
1559
1560#endif
1561
1562#ifdef RB_THREAD_LOCAL_SPECIFIER
1563static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
1564#else
1565static pthread_key_t ruby_native_thread_key;
1566#endif
1567
1568static void
1569null_func(int i)
1570{
1571 /* null */
1572 // This function can be called from signal handler
1573 // RUBY_DEBUG_LOG("i:%d", i);
1574}
1575
1577ruby_thread_from_native(void)
1578{
1579#ifdef RB_THREAD_LOCAL_SPECIFIER
1580 return ruby_native_thread;
1581#else
1582 return pthread_getspecific(ruby_native_thread_key);
1583#endif
1584}
1585
1586int
1587ruby_thread_set_native(rb_thread_t *th)
1588{
1589 if (th) {
1590#ifdef USE_UBF_LIST
1591 ccan_list_node_init(&th->sched.node.ubf);
1592#endif
1593 }
1594
1595 // setup TLS
1596
1597 if (th && th->ec) {
1598 rb_ractor_set_current_ec(th->ractor, th->ec);
1599 }
1600#ifdef RB_THREAD_LOCAL_SPECIFIER
1601 ruby_native_thread = th;
1602 return 1;
1603#else
1604 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1605#endif
1606}
1607
1608static void native_thread_setup(struct rb_native_thread *nt);
1609static void native_thread_setup_on_thread(struct rb_native_thread *nt);
1610
1611void
1612Init_native_thread(rb_thread_t *main_th)
1613{
1614#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1615 if (condattr_monotonic) {
1616 int r = pthread_condattr_init(condattr_monotonic);
1617 if (r == 0) {
1618 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1619 }
1620 if (r) condattr_monotonic = NULL;
1621 }
1622#endif
1623
1624#ifndef RB_THREAD_LOCAL_SPECIFIER
1625 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1626 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1627 }
1628 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1629 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1630 }
1631#endif
1632 ruby_posix_signal(SIGVTALRM, null_func);
1633
1634 // setup vm
1635 rb_vm_t *vm = main_th->vm;
1636 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1637 rb_native_cond_initialize(&vm->ractor.sched.cond);
1638 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1639 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1640
1641 ccan_list_head_init(&vm->ractor.sched.grq);
1642 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1643 ccan_list_head_init(&vm->ractor.sched.running_threads);
1644
1645 // setup main thread
1646 main_th->nt->thread_id = pthread_self();
1647 main_th->nt->serial = 1;
1648#ifdef RUBY_NT_SERIAL
1649 ruby_nt_serial = 1;
1650#endif
1651 ruby_thread_set_native(main_th);
1652 native_thread_setup(main_th->nt);
1653 native_thread_setup_on_thread(main_th->nt);
1654
1655 TH_SCHED(main_th)->running = main_th;
1656 main_th->has_dedicated_nt = 1;
1657
1658 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1659
1660 // setup main NT
1661 main_th->nt->dedicated = 1;
1662 main_th->nt->vm = vm;
1663
1664 // setup mn
1665 vm->ractor.sched.dnt_cnt = 1;
1666}
1667
1668extern int ruby_mn_threads_enabled;
1669
1670void
1671ruby_mn_threads_params(void)
1672{
1673 rb_vm_t *vm = GET_VM();
1674 rb_ractor_t *main_ractor = GET_RACTOR();
1675
1676 const char *mn_threads_cstr = getenv("RUBY_MN_THREADS");
1677 bool enable_mn_threads = false;
1678
1679 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1680 // enabled
1681 ruby_mn_threads_enabled = 1;
1682 }
1683 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1684
1685 const char *max_cpu_cstr = getenv("RUBY_MAX_CPU");
1686 const int default_max_cpu = 8; // TODO: CPU num?
1687 int max_cpu = default_max_cpu;
1688
1689 if (USE_MN_THREADS && max_cpu_cstr && (max_cpu = atoi(max_cpu_cstr)) > 0) {
1690 max_cpu = default_max_cpu;
1691 }
1692
1693 vm->ractor.sched.max_cpu = max_cpu;
1694}
1695
1696static void
1697native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1698{
1699 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1700
1701 if (nt->dedicated == 0) {
1702 ractor_sched_lock(vm, cr);
1703 {
1704 vm->ractor.sched.snt_cnt--;
1705 vm->ractor.sched.dnt_cnt++;
1706 }
1707 ractor_sched_unlock(vm, cr);
1708 }
1709
1710 nt->dedicated++;
1711}
1712
1713static void
1714native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1715{
1716 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1717 VM_ASSERT(nt->dedicated > 0);
1718 nt->dedicated--;
1719
1720 if (nt->dedicated == 0) {
1721 ractor_sched_lock(vm, cr);
1722 {
1723 nt->vm->ractor.sched.snt_cnt++;
1724 nt->vm->ractor.sched.dnt_cnt--;
1725 }
1726 ractor_sched_unlock(vm, cr);
1727 }
1728}
1729
1730static void
1731native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
1732{
1733#if USE_RUBY_DEBUG_LOG
1734 if (nt) {
1735 if (th->nt) {
1736 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th->serial, (int)th->nt->serial, (int)nt->serial);
1737 }
1738 else {
1739 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th->serial, (int)nt->serial);
1740 }
1741 }
1742 else {
1743 if (th->nt) {
1744 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th->serial, (int)th->nt->serial);
1745 }
1746 else {
1747 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th->serial);
1748 }
1749 }
1750#endif
1751
1752 th->nt = nt;
1753}
1754
1755static void
1756native_thread_destroy(struct rb_native_thread *nt)
1757{
1758 if (nt) {
1759 rb_native_cond_destroy(&nt->cond.readyq);
1760
1761 if (&nt->cond.readyq != &nt->cond.intr) {
1762 rb_native_cond_destroy(&nt->cond.intr);
1763 }
1764
1765 RB_ALTSTACK_FREE(nt->altstack);
1766 ruby_xfree(nt->nt_context);
1767 ruby_xfree(nt);
1768 }
1769}
1770
1771#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1772#define STACKADDR_AVAILABLE 1
1773#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1774#define STACKADDR_AVAILABLE 1
1775#undef MAINSTACKADDR_AVAILABLE
1776#define MAINSTACKADDR_AVAILABLE 1
1777void *pthread_get_stackaddr_np(pthread_t);
1778size_t pthread_get_stacksize_np(pthread_t);
1779#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1780#define STACKADDR_AVAILABLE 1
1781#elif defined HAVE_PTHREAD_GETTHRDS_NP
1782#define STACKADDR_AVAILABLE 1
1783#elif defined __HAIKU__
1784#define STACKADDR_AVAILABLE 1
1785#endif
1786
1787#ifndef MAINSTACKADDR_AVAILABLE
1788# ifdef STACKADDR_AVAILABLE
1789# define MAINSTACKADDR_AVAILABLE 1
1790# else
1791# define MAINSTACKADDR_AVAILABLE 0
1792# endif
1793#endif
1794#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1795# define get_main_stack(addr, size) get_stack(addr, size)
1796#endif
1797
1798#ifdef STACKADDR_AVAILABLE
1799/*
1800 * Get the initial address and size of current thread's stack
1801 */
1802static int
1803get_stack(void **addr, size_t *size)
1804{
1805#define CHECK_ERR(expr) \
1806 {int err = (expr); if (err) return err;}
1807#ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1808 pthread_attr_t attr;
1809 size_t guard = 0;
1810 STACK_GROW_DIR_DETECTION;
1811 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1812# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1813 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1814 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1815# else
1816 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1817 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1818# endif
1819# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1820 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1821# else
1822 guard = getpagesize();
1823# endif
1824 *size -= guard;
1825 pthread_attr_destroy(&attr);
1826#elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1827 pthread_attr_t attr;
1828 CHECK_ERR(pthread_attr_init(&attr));
1829 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1830# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1831 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1832# else
1833 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1834 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1835# endif
1836 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1837 pthread_attr_destroy(&attr);
1838#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1839 pthread_t th = pthread_self();
1840 *addr = pthread_get_stackaddr_np(th);
1841 *size = pthread_get_stacksize_np(th);
1842#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1843 stack_t stk;
1844# if defined HAVE_THR_STKSEGMENT /* Solaris */
1845 CHECK_ERR(thr_stksegment(&stk));
1846# else /* OpenBSD */
1847 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1848# endif
1849 *addr = stk.ss_sp;
1850 *size = stk.ss_size;
1851#elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1852 pthread_t th = pthread_self();
1853 struct __pthrdsinfo thinfo;
1854 char reg[256];
1855 int regsiz=sizeof(reg);
1856 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1857 &thinfo, sizeof(thinfo),
1858 &reg, &regsiz));
1859 *addr = thinfo.__pi_stackaddr;
1860 /* Must not use thinfo.__pi_stacksize for size.
1861 It is around 3KB smaller than the correct size
1862 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1863 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1864 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1865#elif defined __HAIKU__
1866 thread_info info;
1867 STACK_GROW_DIR_DETECTION;
1868 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1869 *addr = info.stack_base;
1870 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1871 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1872#else
1873#error STACKADDR_AVAILABLE is defined but not implemented.
1874#endif
1875 return 0;
1876#undef CHECK_ERR
1877}
1878#endif
1879
1880static struct {
1881 rb_nativethread_id_t id;
1882 size_t stack_maxsize;
1883 VALUE *stack_start;
1884} native_main_thread;
1885
1886#ifdef STACK_END_ADDRESS
1887extern void *STACK_END_ADDRESS;
1888#endif
1889
1890enum {
1891 RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
1892 RUBY_STACK_SPACE_RATIO = 5
1893};
1894
1895static size_t
1896space_size(size_t stack_size)
1897{
1898 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1899 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1900 return RUBY_STACK_SPACE_LIMIT;
1901 }
1902 else {
1903 return space_size;
1904 }
1905}
1906
1907#ifdef __linux__
1908static __attribute__((noinline)) void
1909reserve_stack(volatile char *limit, size_t size)
1910{
1911# ifdef C_ALLOCA
1912# error needs alloca()
1913# endif
1914 struct rlimit rl;
1915 volatile char buf[0x100];
1916 enum {stack_check_margin = 0x1000}; /* for -fstack-check */
1917
1918 STACK_GROW_DIR_DETECTION;
1919
1920 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
1921 return;
1922
1923 if (size < stack_check_margin) return;
1924 size -= stack_check_margin;
1925
1926 size -= sizeof(buf); /* margin */
1927 if (IS_STACK_DIR_UPPER()) {
1928 const volatile char *end = buf + sizeof(buf);
1929 limit += size;
1930 if (limit > end) {
1931 /* |<-bottom (=limit(a)) top->|
1932 * | .. |<-buf 256B |<-end | stack check |
1933 * | 256B | =size= | margin (4KB)|
1934 * | =size= limit(b)->| 256B | |
1935 * | | alloca(sz) | | |
1936 * | .. |<-buf |<-limit(c) [sz-1]->0> | |
1937 */
1938 size_t sz = limit - end;
1939 limit = alloca(sz);
1940 limit[sz-1] = 0;
1941 }
1942 }
1943 else {
1944 limit -= size;
1945 if (buf > limit) {
1946 /* |<-top (=limit(a)) bottom->|
1947 * | .. | 256B buf->| | stack check |
1948 * | 256B | =size= | margin (4KB)|
1949 * | =size= limit(b)->| 256B | |
1950 * | | alloca(sz) | | |
1951 * | .. | buf->| limit(c)-><0> | |
1952 */
1953 size_t sz = buf - limit;
1954 limit = alloca(sz);
1955 limit[0] = 0;
1956 }
1957 }
1958}
1959#else
1960# define reserve_stack(limit, size) ((void)(limit), (void)(size))
1961#endif
1962
1963#undef ruby_init_stack
1964void
1965ruby_init_stack(volatile VALUE *addr)
1966{
1967 native_main_thread.id = pthread_self();
1968
1969#if MAINSTACKADDR_AVAILABLE
1970 if (native_main_thread.stack_maxsize) return;
1971 {
1972 void* stackaddr;
1973 size_t size;
1974 if (get_main_stack(&stackaddr, &size) == 0) {
1975 native_main_thread.stack_maxsize = size;
1976 native_main_thread.stack_start = stackaddr;
1977 reserve_stack(stackaddr, size);
1978 goto bound_check;
1979 }
1980 }
1981#endif
1982#ifdef STACK_END_ADDRESS
1983 native_main_thread.stack_start = STACK_END_ADDRESS;
1984#else
1985 if (!native_main_thread.stack_start ||
1986 STACK_UPPER((VALUE *)(void *)&addr,
1987 native_main_thread.stack_start > addr,
1988 native_main_thread.stack_start < addr)) {
1989 native_main_thread.stack_start = (VALUE *)addr;
1990 }
1991#endif
1992 {
1993#if defined(HAVE_GETRLIMIT)
1994#if defined(PTHREAD_STACK_DEFAULT)
1995# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1996# error "PTHREAD_STACK_DEFAULT is too small"
1997# endif
1998 size_t size = PTHREAD_STACK_DEFAULT;
1999#else
2000 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
2001#endif
2002 size_t space;
2003 int pagesize = getpagesize();
2004 struct rlimit rlim;
2005 STACK_GROW_DIR_DETECTION;
2006 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
2007 size = (size_t)rlim.rlim_cur;
2008 }
2009 addr = native_main_thread.stack_start;
2010 if (IS_STACK_DIR_UPPER()) {
2011 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
2012 }
2013 else {
2014 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
2015 }
2016 native_main_thread.stack_maxsize = space;
2017#endif
2018 }
2019
2020#if MAINSTACKADDR_AVAILABLE
2021 bound_check:
2022#endif
2023 /* If addr is out of range of main-thread stack range estimation, */
2024 /* it should be on co-routine (alternative stack). [Feature #2294] */
2025 {
2026 void *start, *end;
2027 STACK_GROW_DIR_DETECTION;
2028
2029 if (IS_STACK_DIR_UPPER()) {
2030 start = native_main_thread.stack_start;
2031 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2032 }
2033 else {
2034 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2035 end = native_main_thread.stack_start;
2036 }
2037
2038 if ((void *)addr < start || (void *)addr > end) {
2039 /* out of range */
2040 native_main_thread.stack_start = (VALUE *)addr;
2041 native_main_thread.stack_maxsize = 0; /* unknown */
2042 }
2043 }
2044}
2045
2046#define CHECK_ERR(expr) \
2047 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2048
2049static int
2050native_thread_init_stack(rb_thread_t *th)
2051{
2052 rb_nativethread_id_t curr = pthread_self();
2053
2054 if (pthread_equal(curr, native_main_thread.id)) {
2055 th->ec->machine.stack_start = native_main_thread.stack_start;
2056 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2057 }
2058 else {
2059#ifdef STACKADDR_AVAILABLE
2060 if (th_has_dedicated_nt(th)) {
2061 void *start;
2062 size_t size;
2063
2064 if (get_stack(&start, &size) == 0) {
2065 uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr;
2066 th->ec->machine.stack_start = (VALUE *)&curr;
2067 th->ec->machine.stack_maxsize = size - diff;
2068 }
2069 }
2070#else
2071 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
2072#endif
2073 }
2074
2075 return 0;
2076}
2077
2078struct nt_param {
2079 rb_vm_t *vm;
2080 struct rb_native_thread *nt;
2081};
2082
2083static void *
2084nt_start(void *ptr);
2085
2086static int
2087native_thread_create0(struct rb_native_thread *nt)
2088{
2089 int err = 0;
2090 pthread_attr_t attr;
2091
2092 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2093 const size_t space = space_size(stack_size);
2094
2095 nt->machine_stack_maxsize = stack_size - space;
2096
2097#ifdef USE_SIGALTSTACK
2098 nt->altstack = rb_allocate_sigaltstack();
2099#endif
2100
2101 CHECK_ERR(pthread_attr_init(&attr));
2102
2103# ifdef PTHREAD_STACK_MIN
2104 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size);
2105 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2106# endif
2107
2108# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2109 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2110# endif
2111 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2112
2113 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2114
2115 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt->serial, err);
2116
2117 CHECK_ERR(pthread_attr_destroy(&attr));
2118
2119 return err;
2120}
2121
2122static void
2123native_thread_setup(struct rb_native_thread *nt)
2124{
2125 // init cond
2126 rb_native_cond_initialize(&nt->cond.readyq);
2127
2128 if (&nt->cond.readyq != &nt->cond.intr) {
2129 rb_native_cond_initialize(&nt->cond.intr);
2130 }
2131}
2132
2133static void
2134native_thread_setup_on_thread(struct rb_native_thread *nt)
2135{
2136 // init tid
2137#ifdef RB_THREAD_T_HAS_NATIVE_ID
2138 nt->tid = get_native_thread_id();
2139#endif
2140
2141 // init signal handler
2142 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2143}
2144
2145static struct rb_native_thread *
2146native_thread_alloc(void)
2147{
2148 struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
2149 native_thread_setup(nt);
2150
2151#if USE_MN_THREADS
2152 nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
2153#endif
2154
2155#if USE_RUBY_DEBUG_LOG
2156 static rb_atomic_t nt_serial = 2;
2157 nt->serial = RUBY_ATOMIC_FETCH_ADD(nt_serial, 1);
2158#endif
2159 return nt;
2160}
2161
2162static int
2163native_thread_create_dedicated(rb_thread_t *th)
2164{
2165 th->nt = native_thread_alloc();
2166 th->nt->vm = th->vm;
2167 th->nt->running_thread = th;
2168 th->nt->dedicated = 1;
2169
2170 // vm stack
2171 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2172 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
2173 th->sched.malloc_stack = true;
2174 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2175 th->sched.context_stack = vm_stack;
2176
2177 // setup
2178 thread_sched_to_ready(TH_SCHED(th), th);
2179
2180 return native_thread_create0(th->nt);
2181}
2182
2183static void
2184call_thread_start_func_2(rb_thread_t *th)
2185{
2186 native_thread_init_stack(th);
2187 thread_start_func_2(th, th->ec->machine.stack_start);
2188}
2189
2190static void *
2191nt_start(void *ptr)
2192{
2193 struct rb_native_thread *nt = (struct rb_native_thread *)ptr;
2194 rb_vm_t *vm = nt->vm;
2195
2196 native_thread_setup_on_thread(nt);
2197
2198 // init tid
2199#ifdef RB_THREAD_T_HAS_NATIVE_ID
2200 nt->tid = get_native_thread_id();
2201#endif
2202
2203#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2204 ruby_nt_serial = nt->serial;
2205#endif
2206
2207 RUBY_DEBUG_LOG("nt:%u", nt->serial);
2208
2209 if (!nt->dedicated) {
2210 coroutine_initialize_main(nt->nt_context);
2211 }
2212
2213 while (1) {
2214 if (nt->dedicated) {
2215 // wait running turn
2216 rb_thread_t *th = nt->running_thread;
2217 struct rb_thread_sched *sched = TH_SCHED(th);
2218
2219 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th));
2220 ruby_thread_set_native(th);
2221
2222 thread_sched_lock(sched, th);
2223 {
2224 if (sched->running == th) {
2225 thread_sched_add_running_thread(sched, th);
2226 }
2227 thread_sched_wait_running_turn(sched, th, false);
2228 }
2229 thread_sched_unlock(sched, th);
2230
2231 // start threads
2232 call_thread_start_func_2(th);
2233 break; // TODO: allow to change to the SNT
2234 }
2235 else {
2236 RUBY_DEBUG_LOG("check next");
2237 rb_ractor_t *r = ractor_sched_deq(vm, NULL);
2238
2239 if (r) {
2240 struct rb_thread_sched *sched = &r->threads.sched;
2241
2242 thread_sched_lock(sched, NULL);
2243 {
2244 rb_thread_t *next_th = sched->running;
2245
2246 if (next_th && next_th->nt == NULL) {
2247 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
2248 thread_sched_switch0(nt->nt_context, next_th, nt);
2249 }
2250 else {
2251 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
2252 }
2253 }
2254 thread_sched_unlock(sched, NULL);
2255 }
2256 else {
2257 // timeout -> deleted.
2258 break;
2259 }
2260 }
2261 }
2262
2263 return NULL;
2264}
2265
2266static int native_thread_create_shared(rb_thread_t *th);
2267
2268#if USE_MN_THREADS
2269static void nt_free_stack(void *mstack);
2270#endif
2271
2272void
2273rb_threadptr_remove(rb_thread_t *th)
2274{
2275#if USE_MN_THREADS
2276 if (th->sched.malloc_stack) {
2277 // dedicated
2278 return;
2279 }
2280 else {
2281 rb_vm_t *vm = th->vm;
2282 th->sched.finished = false;
2283
2284 RB_VM_LOCK_ENTER();
2285 {
2286 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2287 }
2288 RB_VM_LOCK_LEAVE();
2289 }
2290#endif
2291}
2292
2293void
2294rb_threadptr_sched_free(rb_thread_t *th)
2295{
2296#if USE_MN_THREADS
2297 if (th->sched.malloc_stack) {
2298 // has dedicated
2299 ruby_xfree(th->sched.context_stack);
2300 native_thread_destroy(th->nt);
2301 }
2302 else {
2303 nt_free_stack(th->sched.context_stack);
2304 // TODO: how to free nt and nt->altstack?
2305 }
2306
2307 if (th->sched.context) {
2308 ruby_xfree(th->sched.context);
2309 VM_ASSERT((th->sched.context = NULL) == NULL);
2310 }
2311#else
2312 ruby_xfree(th->sched.context_stack);
2313 native_thread_destroy(th->nt);
2314#endif
2315
2316 th->nt = NULL;
2317}
2318
2319void
2320rb_thread_sched_mark_zombies(rb_vm_t *vm)
2321{
2322 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2323 rb_thread_t *zombie_th, *next_zombie_th;
2324 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2325 if (zombie_th->sched.finished) {
2326 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2327 }
2328 else {
2329 rb_gc_mark(zombie_th->self);
2330 }
2331 }
2332 }
2333}
2334
2335static int
2336native_thread_create(rb_thread_t *th)
2337{
2338 VM_ASSERT(th->nt == 0);
2339 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2340 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED, th);
2341
2342 if (!th->ractor->threads.sched.enable_mn_threads) {
2343 th->has_dedicated_nt = 1;
2344 }
2345
2346 if (th->has_dedicated_nt) {
2347 return native_thread_create_dedicated(th);
2348 }
2349 else {
2350 return native_thread_create_shared(th);
2351 }
2352}
2353
2354#if USE_NATIVE_THREAD_PRIORITY
2355
2356static void
2357native_thread_apply_priority(rb_thread_t *th)
2358{
2359#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2360 struct sched_param sp;
2361 int policy;
2362 int priority = 0 - th->priority;
2363 int max, min;
2364 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2365 max = sched_get_priority_max(policy);
2366 min = sched_get_priority_min(policy);
2367
2368 if (min > priority) {
2369 priority = min;
2370 }
2371 else if (max < priority) {
2372 priority = max;
2373 }
2374
2375 sp.sched_priority = priority;
2376 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2377#else
2378 /* not touched */
2379#endif
2380}
2381
2382#endif /* USE_NATIVE_THREAD_PRIORITY */
2383
2384static int
2385native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
2386{
2387 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2388}
2389
2390static void
2391ubf_pthread_cond_signal(void *ptr)
2392{
2393 rb_thread_t *th = (rb_thread_t *)ptr;
2394 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th), (int)th->nt->serial);
2395 rb_native_cond_signal(&th->nt->cond.intr);
2396}
2397
2398static void
2399native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2400{
2401 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2402 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2403
2404 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2405 * current_time + 100,000,000. So cut up to 100,000,000. This is
2406 * considered as a kind of spurious wakeup. The caller to native_sleep
2407 * should care about spurious wakeup.
2408 *
2409 * See also [Bug #1341] [ruby-core:29702]
2410 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2411 */
2412 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2413
2414 THREAD_BLOCKING_BEGIN(th);
2415 {
2417 th->unblock.func = ubf_pthread_cond_signal;
2418 th->unblock.arg = th;
2419
2420 if (RUBY_VM_INTERRUPTED(th->ec)) {
2421 /* interrupted. return immediate */
2422 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th));
2423 }
2424 else {
2425 if (!rel) {
2426 rb_native_cond_wait(cond, lock);
2427 }
2428 else {
2429 rb_hrtime_t end;
2430
2431 if (*rel > max) {
2432 *rel = max;
2433 }
2434
2435 end = native_cond_timeout(cond, *rel);
2436 native_cond_timedwait(cond, lock, &end);
2437 }
2438 }
2439 th->unblock.func = 0;
2440
2442 }
2443 THREAD_BLOCKING_END(th);
2444
2445 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th));
2446}
2447
2448#ifdef USE_UBF_LIST
2449static CCAN_LIST_HEAD(ubf_list_head);
2450static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2451
2452static void
2453ubf_list_atfork(void)
2454{
2455 ccan_list_head_init(&ubf_list_head);
2456 rb_native_mutex_initialize(&ubf_list_lock);
2457}
2458
2460static bool
2461ubf_list_contain_p(rb_thread_t *th)
2462{
2463 rb_thread_t *list_th;
2464 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2465 if (list_th == th) return true;
2466 }
2467 return false;
2468}
2469
2470/* The thread 'th' is registered to be trying unblock. */
2471static void
2472register_ubf_list(rb_thread_t *th)
2473{
2474 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2475 struct ccan_list_node *node = &th->sched.node.ubf;
2476
2477 VM_ASSERT(th->unblock.func != NULL);
2478
2479 rb_native_mutex_lock(&ubf_list_lock);
2480 {
2481 // check not connected yet
2482 if (ccan_list_empty((struct ccan_list_head*)node)) {
2483 VM_ASSERT(!ubf_list_contain_p(th));
2484 ccan_list_add(&ubf_list_head, node);
2485 }
2486 }
2487 rb_native_mutex_unlock(&ubf_list_lock);
2488
2489 timer_thread_wakeup();
2490}
2491
2492/* The thread 'th' is unblocked. It no longer need to be registered. */
2493static void
2494unregister_ubf_list(rb_thread_t *th)
2495{
2496 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2497 struct ccan_list_node *node = &th->sched.node.ubf;
2498
2499 /* we can't allow re-entry into ubf_list_head */
2500 VM_ASSERT(th->unblock.func == NULL);
2501
2502 if (!ccan_list_empty((struct ccan_list_head*)node)) {
2503 rb_native_mutex_lock(&ubf_list_lock);
2504 {
2505 VM_ASSERT(ubf_list_contain_p(th));
2506 ccan_list_del_init(node);
2507 }
2508 rb_native_mutex_unlock(&ubf_list_lock);
2509 }
2510}
2511
2512/*
2513 * send a signal to intent that a target thread return from blocking syscall.
2514 * Maybe any signal is ok, but we chose SIGVTALRM.
2515 */
2516static void
2517ubf_wakeup_thread(rb_thread_t *th)
2518{
2519 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th), (void *)th->nt->thread_id);
2520
2521 pthread_kill(th->nt->thread_id, SIGVTALRM);
2522}
2523
2524static void
2525ubf_select(void *ptr)
2526{
2527 rb_thread_t *th = (rb_thread_t *)ptr;
2528 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2529 ubf_wakeup_thread(th);
2530 register_ubf_list(th);
2531}
2532
2533static bool
2534ubf_threads_empty(void)
2535{
2536 return ccan_list_empty(&ubf_list_head) != 0;
2537}
2538
2539static void
2540ubf_wakeup_all_threads(void)
2541{
2542 if (!ubf_threads_empty()) {
2543 rb_thread_t *th;
2544 rb_native_mutex_lock(&ubf_list_lock);
2545 {
2546 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2547 ubf_wakeup_thread(th);
2548 }
2549 }
2550 rb_native_mutex_unlock(&ubf_list_lock);
2551 }
2552}
2553
2554#else /* USE_UBF_LIST */
2555#define register_ubf_list(th) (void)(th)
2556#define unregister_ubf_list(th) (void)(th)
2557#define ubf_select 0
2558static void ubf_wakeup_all_threads(void) { return; }
2559static bool ubf_threads_empty(void) { return true; }
2560#define ubf_list_atfork() do {} while (0)
2561#endif /* USE_UBF_LIST */
2562
2563#define TT_DEBUG 0
2564#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2565
2566void
2567rb_thread_wakeup_timer_thread(int sig)
2568{
2569 // This function can be called from signal handlers so that
2570 // pthread_mutex_lock() should not be used.
2571
2572 // wakeup timer thread
2573 timer_thread_wakeup_force();
2574
2575 // interrupt main thread if main thread is available
2576 if (system_working) {
2577 rb_vm_t *vm = GET_VM();
2578 rb_thread_t *main_th = vm->ractor.main_thread;
2579
2580 if (main_th) {
2581 volatile rb_execution_context_t *main_th_ec = ACCESS_ONCE(rb_execution_context_t *, main_th->ec);
2582
2583 if (main_th_ec) {
2584 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2585
2586 if (vm->ubf_async_safe && main_th->unblock.func) {
2587 (main_th->unblock.func)(main_th->unblock.arg);
2588 }
2589 }
2590 }
2591 }
2592}
2593
2594#define CLOSE_INVALIDATE_PAIR(expr) \
2595 close_invalidate_pair(expr,"close_invalidate: "#expr)
2596static void
2597close_invalidate(int *fdp, const char *msg)
2598{
2599 int fd = *fdp;
2600
2601 *fdp = -1;
2602 if (close(fd) < 0) {
2603 async_bug_fd(msg, errno, fd);
2604 }
2605}
2606
2607static void
2608close_invalidate_pair(int fds[2], const char *msg)
2609{
2610 if (USE_EVENTFD && fds[0] == fds[1]) {
2611 fds[1] = -1; // disable write port first
2612 close_invalidate(&fds[0], msg);
2613 }
2614 else {
2615 close_invalidate(&fds[1], msg);
2616 close_invalidate(&fds[0], msg);
2617 }
2618}
2619
2620static void
2621set_nonblock(int fd)
2622{
2623 int oflags;
2624 int err;
2625
2626 oflags = fcntl(fd, F_GETFL);
2627 if (oflags == -1)
2628 rb_sys_fail(0);
2629 oflags |= O_NONBLOCK;
2630 err = fcntl(fd, F_SETFL, oflags);
2631 if (err == -1)
2632 rb_sys_fail(0);
2633}
2634
2635/* communication pipe with timer thread and signal handler */
2636static void
2637setup_communication_pipe_internal(int pipes[2])
2638{
2639 int err;
2640
2641 if (pipes[0] > 0 || pipes[1] > 0) {
2642 VM_ASSERT(pipes[0] > 0);
2643 VM_ASSERT(pipes[1] > 0);
2644 return;
2645 }
2646
2647 /*
2648 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2649 * missing EFD_* flags, they can fall back to pipe
2650 */
2651#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2652 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2653
2654 if (pipes[0] >= 0) {
2655 rb_update_max_fd(pipes[0]);
2656 return;
2657 }
2658#endif
2659
2660 err = rb_cloexec_pipe(pipes);
2661 if (err != 0) {
2662 rb_bug("can not create communication pipe");
2663 }
2664 rb_update_max_fd(pipes[0]);
2665 rb_update_max_fd(pipes[1]);
2666 set_nonblock(pipes[0]);
2667 set_nonblock(pipes[1]);
2668}
2669
2670#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2671# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2672#endif
2673
2674enum {
2675 THREAD_NAME_MAX =
2676#if defined(__linux__)
2677 16
2678#elif defined(__APPLE__)
2679/* Undocumented, and main thread seems unlimited */
2680 64
2681#else
2682 16
2683#endif
2684};
2685
2686static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
2687
2688static void
2689native_set_thread_name(rb_thread_t *th)
2690{
2691#ifdef SET_CURRENT_THREAD_NAME
2692 VALUE loc;
2693 if (!NIL_P(loc = th->name)) {
2694 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2695 }
2696 else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
2697 char *name, *p;
2698 char buf[THREAD_NAME_MAX];
2699 size_t len;
2700 int n;
2701
2702 name = RSTRING_PTR(RARRAY_AREF(loc, 0));
2703 p = strrchr(name, '/'); /* show only the basename of the path. */
2704 if (p && p[1])
2705 name = p + 1;
2706
2707 n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
2708 RB_GC_GUARD(loc);
2709
2710 len = (size_t)n;
2711 if (len >= sizeof(buf)) {
2712 buf[sizeof(buf)-2] = '*';
2713 buf[sizeof(buf)-1] = '\0';
2714 }
2715 SET_CURRENT_THREAD_NAME(buf);
2716 }
2717#endif
2718}
2719
2720static void
2721native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
2722{
2723#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2724 char buf[THREAD_NAME_MAX];
2725 const char *s = "";
2726# if !defined SET_ANOTHER_THREAD_NAME
2727 if (!pthread_equal(pthread_self(), thread_id)) return;
2728# endif
2729 if (!NIL_P(name)) {
2730 long n;
2731 RSTRING_GETMEM(name, s, n);
2732 if (n >= (int)sizeof(buf)) {
2733 memcpy(buf, s, sizeof(buf)-1);
2734 buf[sizeof(buf)-1] = '\0';
2735 s = buf;
2736 }
2737 }
2738# if defined SET_ANOTHER_THREAD_NAME
2739 SET_ANOTHER_THREAD_NAME(thread_id, s);
2740# elif defined SET_CURRENT_THREAD_NAME
2741 SET_CURRENT_THREAD_NAME(s);
2742# endif
2743#endif
2744}
2745
2746#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2747static VALUE
2748native_thread_native_thread_id(rb_thread_t *target_th)
2749{
2750 if (!target_th->nt) return Qnil;
2751
2752#ifdef RB_THREAD_T_HAS_NATIVE_ID
2753 int tid = target_th->nt->tid;
2754 if (tid == 0) return Qnil;
2755 return INT2FIX(tid);
2756#elif defined(__APPLE__)
2757 uint64_t tid;
2758/* The first condition is needed because MAC_OS_X_VERSION_10_6
2759 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2760 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2761 so it has C++17 and everything needed to build modern Ruby. */
2762# if (!defined(MAC_OS_X_VERSION_10_6) || \
2763 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2764 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2765 const bool no_pthread_threadid_np = true;
2766# define NO_PTHREAD_MACH_THREAD_NP 1
2767# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2768 const bool no_pthread_threadid_np = false;
2769# else
2770# if !(defined(__has_attribute) && __has_attribute(availability))
2771 /* __API_AVAILABLE macro does nothing on gcc */
2772 __attribute__((weak)) int pthread_threadid_np(pthread_t, uint64_t*);
2773# endif
2774 /* Check weakly linked symbol */
2775 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2776# endif
2777 if (no_pthread_threadid_np) {
2778 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2779 }
2780# ifndef NO_PTHREAD_MACH_THREAD_NP
2781 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2782 if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
2783 return ULL2NUM((unsigned long long)tid);
2784# endif
2785#endif
2786}
2787# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2788#else
2789# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2790#endif
2791
2792static struct {
2793 rb_serial_t created_fork_gen;
2794 pthread_t pthread_id;
2795
2796 int comm_fds[2]; // r, w
2797
2798#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2799 int event_fd; // kernel event queue fd (epoll/kqueue)
2800#endif
2801#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2802#define EPOLL_EVENTS_MAX 0x10
2803 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2804#elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2805#define KQUEUE_EVENTS_MAX 0x10
2806 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2807#endif
2808
2809 // waiting threads list
2810 struct ccan_list_head waiting; // waiting threads in ractors
2811 pthread_mutex_t waiting_lock;
2812} timer_th = {
2813 .created_fork_gen = 0,
2814};
2815
2816#define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2817
2818static void timer_thread_check_timeslice(rb_vm_t *vm);
2819static int timer_thread_set_timeout(rb_vm_t *vm);
2820static void timer_thread_wakeup_thread(rb_thread_t *th);
2821
2822#include "thread_pthread_mn.c"
2823
2824static int
2825timer_thread_set_timeout(rb_vm_t *vm)
2826{
2827#if 0
2828 return 10; // ms
2829#else
2830 int timeout = -1;
2831
2832 ractor_sched_lock(vm, NULL);
2833 {
2834 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads) // (1-1) Provide time slice for active NTs
2835 || !ubf_threads_empty() // (1-3) Periodic UBF
2836 || vm->ractor.sched.grq_cnt > 0 // (1-4) Lazy GRQ deq start
2837 ) {
2838
2839 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2840 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2841 !ubf_threads_empty(),
2842 (vm->ractor.sched.grq_cnt > 0));
2843
2844 timeout = 10; // ms
2845 vm->ractor.sched.timeslice_wait_inf = false;
2846 }
2847 else {
2848 vm->ractor.sched.timeslice_wait_inf = true;
2849 }
2850 }
2851 ractor_sched_unlock(vm, NULL);
2852
2853 if (vm->ractor.sched.timeslice_wait_inf) {
2854 rb_native_mutex_lock(&timer_th.waiting_lock);
2855 {
2856 rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
2857 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2858 rb_hrtime_t now = rb_hrtime_now();
2859 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2860
2861 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
2862
2863 // TODO: overflow?
2864 timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
2865 }
2866 }
2867 rb_native_mutex_unlock(&timer_th.waiting_lock);
2868 }
2869
2870 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout, (int)vm->ractor.sched.timeslice_wait_inf);
2871
2872 // fprintf(stderr, "timeout:%d\n", timeout);
2873 return timeout;
2874#endif
2875}
2876
2877static void
2878timer_thread_check_signal(rb_vm_t *vm)
2879{
2880 // ruby_sigchld_handler(vm); TODO
2881
2882 int signum = rb_signal_buff_size();
2883 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2884 RUBY_DEBUG_LOG("signum:%d", signum);
2885 threadptr_trap_interrupt(vm->ractor.main_thread);
2886 }
2887}
2888
2889static bool
2890timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2891{
2892 if (abs < now) {
2893 return true;
2894 }
2895 else if (abs - now < RB_HRTIME_PER_MSEC) {
2896 return true; // too short time
2897 }
2898 else {
2899 return false;
2900 }
2901}
2902
2903static rb_thread_t *
2904timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
2905{
2906 rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
2907
2908 if (th != NULL &&
2909 (th->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
2910 timer_thread_check_exceed(th->sched.waiting_reason.data.timeout, now)) {
2911
2912 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2913
2914 // delete from waiting list
2915 ccan_list_del_init(&th->sched.waiting_reason.node);
2916
2917 // setup result
2918 th->sched.waiting_reason.flags = thread_sched_waiting_none;
2919 th->sched.waiting_reason.data.result = 0;
2920
2921 return th;
2922 }
2923
2924 return NULL;
2925}
2926
2927static void
2928timer_thread_wakeup_thread(rb_thread_t *th)
2929{
2930 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2931 struct rb_thread_sched *sched = TH_SCHED(th);
2932
2933 thread_sched_lock(sched, th);
2934 {
2935 if (sched->running != th) {
2936 thread_sched_to_ready_common(sched, th, true, false);
2937 }
2938 else {
2939 // will be release the execution right
2940 }
2941 }
2942 thread_sched_unlock(sched, th);
2943}
2944
2945static void
2946timer_thread_check_timeout(rb_vm_t *vm)
2947{
2948 rb_hrtime_t now = rb_hrtime_now();
2949 rb_thread_t *th;
2950
2951 rb_native_mutex_lock(&timer_th.waiting_lock);
2952 {
2953 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2954 timer_thread_wakeup_thread(th);
2955 }
2956 }
2957 rb_native_mutex_unlock(&timer_th.waiting_lock);
2958}
2959
2960static void
2961timer_thread_check_timeslice(rb_vm_t *vm)
2962{
2963 // TODO: check time
2964 rb_thread_t *th;
2965 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2966 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th));
2967 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2968 }
2969}
2970
2971void
2972rb_assert_sig(void)
2973{
2974 sigset_t oldmask;
2975 pthread_sigmask(0, NULL, &oldmask);
2976 if (sigismember(&oldmask, SIGVTALRM)) {
2977 rb_bug("!!!");
2978 }
2979 else {
2980 RUBY_DEBUG_LOG("ok");
2981 }
2982}
2983
2984static void *
2985timer_thread_func(void *ptr)
2986{
2987 rb_vm_t *vm = (rb_vm_t *)ptr;
2988#if defined(RUBY_NT_SERIAL)
2989 ruby_nt_serial = (rb_atomic_t)-1;
2990#endif
2991
2992 RUBY_DEBUG_LOG("started%s", "");
2993
2994 while (system_working) {
2995 timer_thread_check_signal(vm);
2996 timer_thread_check_timeout(vm);
2997 ubf_wakeup_all_threads();
2998
2999 RUBY_DEBUG_LOG("system_working:%d", system_working);
3000 timer_thread_polling(vm);
3001 }
3002
3003 RUBY_DEBUG_LOG("terminated");
3004 return NULL;
3005}
3006
3007/* only use signal-safe system calls here */
3008static void
3009signal_communication_pipe(int fd)
3010{
3011#if USE_EVENTFD
3012 const uint64_t buff = 1;
3013#else
3014 const char buff = '!';
3015#endif
3016 ssize_t result;
3017
3018 /* already opened */
3019 if (fd >= 0) {
3020 retry:
3021 if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
3022 int e = errno;
3023 switch (e) {
3024 case EINTR: goto retry;
3025 case EAGAIN:
3026#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3027 case EWOULDBLOCK:
3028#endif
3029 break;
3030 default:
3031 async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
3032 }
3033 }
3034 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3035 }
3036 else {
3037 // ignore wakeup
3038 }
3039}
3040
3041static void
3042timer_thread_wakeup_force(void)
3043{
3044 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3045 signal_communication_pipe(timer_th.comm_fds[1]);
3046}
3047
3048static void
3049timer_thread_wakeup_locked(rb_vm_t *vm)
3050{
3051 // should be locked before.
3052 ASSERT_ractor_sched_locked(vm, NULL);
3053
3054 if (timer_th.created_fork_gen == current_fork_gen) {
3055 if (vm->ractor.sched.timeslice_wait_inf) {
3056 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th.comm_fds[1]);
3057 timer_thread_wakeup_force();
3058 }
3059 else {
3060 RUBY_DEBUG_LOG("will be wakeup...");
3061 }
3062 }
3063}
3064
3065static void
3066timer_thread_wakeup(void)
3067{
3068 rb_vm_t *vm = GET_VM();
3069
3070 ractor_sched_lock(vm, NULL);
3071 {
3072 timer_thread_wakeup_locked(vm);
3073 }
3074 ractor_sched_unlock(vm, NULL);
3075}
3076
3077static void
3078rb_thread_create_timer_thread(void)
3079{
3080 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3081
3082 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen, (int)current_fork_gen);
3083
3084 timer_th.created_fork_gen = current_fork_gen;
3085
3086 if (created_fork_gen != current_fork_gen) {
3087 if (created_fork_gen != 0) {
3088 RUBY_DEBUG_LOG("forked child process");
3089
3090 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3091#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3092 close_invalidate(&timer_th.event_fd, "close event_fd");
3093#endif
3094 rb_native_mutex_destroy(&timer_th.waiting_lock);
3095 }
3096
3097 ccan_list_head_init(&timer_th.waiting);
3098 rb_native_mutex_initialize(&timer_th.waiting_lock);
3099
3100 // open communication channel
3101 setup_communication_pipe_internal(timer_th.comm_fds);
3102
3103 // open event fd
3104 timer_thread_setup_mn();
3105 }
3106
3107 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3108}
3109
3110static int
3111native_stop_timer_thread(void)
3112{
3113 int stopped;
3114 stopped = --system_working <= 0;
3115
3116 if (stopped) {
3117 RUBY_DEBUG_LOG("wakeup send %d", timer_th.comm_fds[1]);
3118 timer_thread_wakeup_force();
3119 RUBY_DEBUG_LOG("wakeup sent");
3120 pthread_join(timer_th.pthread_id, NULL);
3121 }
3122
3123 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
3124 return stopped;
3125}
3126
3127static void
3128native_reset_timer_thread(void)
3129{
3130 //
3131}
3132
3133#ifdef HAVE_SIGALTSTACK
3134int
3135ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
3136{
3137 void *base;
3138 size_t size;
3139 const size_t water_mark = 1024 * 1024;
3140 STACK_GROW_DIR_DETECTION;
3141
3142#ifdef STACKADDR_AVAILABLE
3143 if (get_stack(&base, &size) == 0) {
3144# ifdef __APPLE__
3145 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3146 struct rlimit rlim;
3147 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3148 size = (size_t)rlim.rlim_cur;
3149 }
3150 }
3151# endif
3152 base = (char *)base + STACK_DIR_UPPER(+size, -size);
3153 }
3154 else
3155#endif
3156 if (th) {
3157 size = th->ec->machine.stack_maxsize;
3158 base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3159 }
3160 else {
3161 return 0;
3162 }
3163 size /= RUBY_STACK_SPACE_RATIO;
3164 if (size > water_mark) size = water_mark;
3165 if (IS_STACK_DIR_UPPER()) {
3166 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
3167 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
3168 }
3169 else {
3170 if (size > (size_t)base) size = (size_t)base;
3171 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
3172 }
3173 return 0;
3174}
3175#endif
3176
3177int
3178rb_reserved_fd_p(int fd)
3179{
3180 /* no false-positive if out-of-FD at startup */
3181 if (fd < 0) return 0;
3182
3183 if (fd == timer_th.comm_fds[0] ||
3184 fd == timer_th.comm_fds[1]
3185#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3186 || fd == timer_th.event_fd
3187#endif
3188 ) {
3189 goto check_fork_gen;
3190 }
3191 return 0;
3192
3193 check_fork_gen:
3194 if (timer_th.created_fork_gen == current_fork_gen) {
3195 /* async-signal-safe */
3196 return 1;
3197 }
3198 else {
3199 return 0;
3200 }
3201}
3202
3203rb_nativethread_id_t
3205{
3206 return pthread_self();
3207}
3208
3209#if defined(USE_POLL) && !defined(HAVE_PPOLL)
3210/* TODO: don't ignore sigmask */
3211static int
3212ruby_ppoll(struct pollfd *fds, nfds_t nfds,
3213 const struct timespec *ts, const sigset_t *sigmask)
3214{
3215 int timeout_ms;
3216
3217 if (ts) {
3218 int tmp, tmp2;
3219
3220 if (ts->tv_sec > INT_MAX/1000)
3221 timeout_ms = INT_MAX;
3222 else {
3223 tmp = (int)(ts->tv_sec * 1000);
3224 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3225 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3226 if (INT_MAX - tmp < tmp2)
3227 timeout_ms = INT_MAX;
3228 else
3229 timeout_ms = (int)(tmp + tmp2);
3230 }
3231 }
3232 else
3233 timeout_ms = -1;
3234
3235 return poll(fds, nfds, timeout_ms);
3236}
3237# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3238#endif
3239
3240/*
3241 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3242 * since threads may be too starved to enter the GVL waitqueue for
3243 * us to detect contention. Instead, we want to kick other threads
3244 * so they can run and possibly prevent us from entering slow paths
3245 * in ppoll() or similar syscalls.
3246 *
3247 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3248 * [ruby-core:90417] [Bug #15398]
3249 */
3250#define THREAD_BLOCKING_YIELD(th) do { \
3251 const rb_thread_t *next_th; \
3252 struct rb_thread_sched *sched = TH_SCHED(th); \
3253 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3254 thread_sched_to_waiting(sched, (th)); \
3255 next_th = sched->running; \
3256 rb_native_mutex_unlock(&sched->lock_); \
3257 native_thread_yield(); /* TODO: needed? */ \
3258 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3259 native_thread_yield(); \
3260 }
3261
3262static void
3263native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
3264{
3265 struct rb_thread_sched *sched = TH_SCHED(th);
3266
3267 RUBY_DEBUG_LOG("rel:%d", rel ? (int)*rel : 0);
3268 if (rel) {
3269 if (th_has_dedicated_nt(th)) {
3270 native_cond_sleep(th, rel);
3271 }
3272 else {
3273 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3274 }
3275 }
3276 else {
3277 thread_sched_to_waiting_until_wakeup(sched, th);
3278 }
3279
3280 RUBY_DEBUG_LOG("wakeup");
3281}
3282
3283// thread internal event hooks (only for pthread)
3284
3285struct rb_internal_thread_event_hook {
3286 rb_internal_thread_event_callback callback;
3287 rb_event_flag_t event;
3288 void *user_data;
3289
3290 struct rb_internal_thread_event_hook *next;
3291};
3292
3293static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3294
3295rb_internal_thread_event_hook_t *
3296rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
3297{
3298 rb_internal_thread_event_hook_t *hook = ALLOC_N(rb_internal_thread_event_hook_t, 1);
3299 hook->callback = callback;
3300 hook->user_data = user_data;
3301 hook->event = internal_event;
3302
3303 int r;
3304 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3305 rb_bug_errno("pthread_rwlock_wrlock", r);
3306 }
3307
3308 hook->next = rb_internal_thread_event_hooks;
3309 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3310
3311 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3312 rb_bug_errno("pthread_rwlock_unlock", r);
3313 }
3314 return hook;
3315}
3316
3317bool
3318rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
3319{
3320 int r;
3321 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3322 rb_bug_errno("pthread_rwlock_wrlock", r);
3323 }
3324
3325 bool success = FALSE;
3326
3327 if (rb_internal_thread_event_hooks == hook) {
3328 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3329 success = TRUE;
3330 }
3331 else {
3332 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3333
3334 do {
3335 if (h->next == hook) {
3336 h->next = hook->next;
3337 success = TRUE;
3338 break;
3339 }
3340 } while ((h = h->next));
3341 }
3342
3343 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3344 rb_bug_errno("pthread_rwlock_unlock", r);
3345 }
3346
3347 if (success) {
3348 ruby_xfree(hook);
3349 }
3350 return success;
3351}
3352
3353static void
3354rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th)
3355{
3356 int r;
3357 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3358 rb_bug_errno("pthread_rwlock_rdlock", r);
3359 }
3360
3361 if (rb_internal_thread_event_hooks) {
3362 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3363 do {
3364 if (h->event & event) {
3365 rb_internal_thread_event_data_t event_data = {
3366 .thread = th->self,
3367 };
3368 (*h->callback)(event, &event_data, h->user_data);
3369 }
3370 } while((h = h->next));
3371 }
3372 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3373 rb_bug_errno("pthread_rwlock_unlock", r);
3374 }
3375}
3376
3377#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:91
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:396
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:393
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define Qnil
Old name of RUBY_Qnil.
#define NIL_P
Old name of RB_NIL_P.
void ruby_init_stack(volatile VALUE *addr)
Set stack bottom of Ruby implementation.
VALUE rb_eNotImpError
NotImplementedError exception.
Definition error.c:1354
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
Definition error.c:3567
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
Definition error.c:1075
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
Definition io.c:405
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
Definition io.c:226
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1943
int len
Length of the buffer.
Definition io.h:8
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
Definition thread.h:212
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
#define RUBY_INTERNAL_THREAD_EVENT_EXITED
Triggered when a thread exits.
Definition thread.h:226
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
Definition thread.h:219
#define RUBY_INTERNAL_THREAD_EVENT_STARTED
Triggered when a new thread is started.
Definition thread.h:198
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RUBY_INTERNAL_THREAD_EVENT_READY
Triggered when a thread attempt to acquire the GVL.
Definition thread.h:205
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
Definition rstring.h:488
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
Definition string.c:7848
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40