12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
16 #ifdef HAVE_SYS_RESOURCE_H
17 #include <sys/resource.h>
19 #ifdef HAVE_THR_STKSEGMENT
24 #elif HAVE_SYS_FCNTL_H
25 #include <sys/fcntl.h>
27 #ifdef HAVE_SYS_PRCTL_H
28 #include <sys/prctl.h>
30 #if defined(__native_client__) && defined(NACL_NEWLIB)
36 #if defined(HAVE_SYS_TIME_H)
40 static void native_mutex_lock(pthread_mutex_t *lock);
41 static void native_mutex_unlock(pthread_mutex_t *lock);
42 static int native_mutex_trylock(pthread_mutex_t *lock);
43 static void native_mutex_initialize(pthread_mutex_t *lock);
44 static void native_mutex_destroy(pthread_mutex_t *lock);
50 static void rb_thread_wakeup_timer_thread_low(
void);
51 static pthread_t timer_thread_id;
53 #define RB_CONDATTR_CLOCK_MONOTONIC 1
55 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
56 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
57 defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREAD_CONDATTR_INIT)
58 #define USE_MONOTONIC_COND 1
60 #define USE_MONOTONIC_COND 0
63 #if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL) && defined(O_NONBLOCK) && !defined(__native_client__)
65 # define USE_SLEEPY_TIMER_THREAD 1
67 # define USE_SLEEPY_TIMER_THREAD 0
71 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
86 rb_thread_wakeup_timer_thread_low();
107 native_mutex_lock(&vm->
gvl.
lock);
108 gvl_acquire_common(vm);
109 native_mutex_unlock(&vm->
gvl.
lock);
113 gvl_release_common(
rb_vm_t *vm)
117 native_cond_signal(&vm->
gvl.
cond);
123 native_mutex_lock(&vm->
gvl.
lock);
124 gvl_release_common(vm);
125 native_mutex_unlock(&vm->
gvl.
lock);
131 native_mutex_lock(&vm->
gvl.
lock);
133 gvl_release_common(vm);
151 native_mutex_unlock(&vm->
gvl.
lock);
153 native_mutex_lock(&vm->
gvl.
lock);
158 gvl_acquire_common(vm);
159 native_mutex_unlock(&vm->
gvl.
lock);
165 native_mutex_initialize(&vm->
gvl.
lock);
166 native_cond_initialize(&vm->
gvl.
cond, RB_CONDATTR_CLOCK_MONOTONIC);
167 native_cond_initialize(&vm->
gvl.
switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
180 native_cond_destroy(&vm->
gvl.
cond);
181 native_mutex_destroy(&vm->
gvl.
lock);
191 #define NATIVE_MUTEX_LOCK_DEBUG 0
194 mutex_debug(
const char *
msg, pthread_mutex_t *lock)
196 if (NATIVE_MUTEX_LOCK_DEBUG) {
198 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
200 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
201 fprintf(stdout,
"%s: %p\n", msg, (
void *)lock);
202 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
207 native_mutex_lock(pthread_mutex_t *lock)
210 mutex_debug(
"lock", lock);
211 if ((r = pthread_mutex_lock(lock)) != 0) {
217 native_mutex_unlock(pthread_mutex_t *lock)
220 mutex_debug(
"unlock", lock);
221 if ((r = pthread_mutex_unlock(lock)) != 0) {
227 native_mutex_trylock(pthread_mutex_t *lock)
230 mutex_debug(
"trylock", lock);
231 if ((r = pthread_mutex_trylock(lock)) != 0) {
243 native_mutex_initialize(pthread_mutex_t *lock)
245 int r = pthread_mutex_init(lock, 0);
246 mutex_debug(
"init", lock);
253 native_mutex_destroy(pthread_mutex_t *lock)
255 int r = pthread_mutex_destroy(lock);
256 mutex_debug(
"destroy", lock);
265 #ifdef HAVE_PTHREAD_COND_INIT
267 # if USE_MONOTONIC_COND
268 pthread_condattr_t attr;
270 pthread_condattr_init(&attr);
272 cond->clockid = CLOCK_REALTIME;
273 if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
274 r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
276 cond->clockid = CLOCK_MONOTONIC;
280 r = pthread_cond_init(&cond->
cond, &attr);
281 pthread_condattr_destroy(&attr);
283 r = pthread_cond_init(&cond->
cond,
NULL);
296 #ifdef HAVE_PTHREAD_COND_INIT
297 int r = pthread_cond_destroy(&cond->
cond);
319 r = pthread_cond_signal(&cond->
cond);
320 }
while (r == EAGAIN);
331 r = pthread_cond_broadcast(&cond->
cond);
332 }
while (r == EAGAIN);
341 int r = pthread_cond_wait(&cond->
cond, mutex);
359 r = pthread_cond_timedwait(&cond->
cond, mutex, ts);
360 }
while (r == EINTR);
369 #if SIZEOF_TIME_T == SIZEOF_LONG
371 #elif SIZEOF_TIME_T == SIZEOF_INT
373 #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
376 # error cannot find integer type which size is same as time_t.
379 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
389 #if USE_MONOTONIC_COND
390 if (cond->clockid == CLOCK_MONOTONIC) {
391 ret = clock_gettime(cond->clockid, &now);
397 if (cond->clockid != CLOCK_REALTIME)
404 now.tv_sec = tv.tv_sec;
405 now.tv_nsec = tv.tv_usec * 1000;
407 #if USE_MONOTONIC_COND
410 timeout.tv_sec = now.tv_sec;
411 timeout.tv_nsec = now.tv_nsec;
412 timeout.tv_sec += timeout_rel.tv_sec;
413 timeout.tv_nsec += timeout_rel.tv_nsec;
415 if (timeout.tv_nsec >= 1000*1000*1000) {
417 timeout.tv_nsec -= 1000*1000*1000;
420 if (timeout.tv_sec < now.tv_sec)
426 #define native_cleanup_push pthread_cleanup_push
427 #define native_cleanup_pop pthread_cleanup_pop
428 #ifdef HAVE_SCHED_YIELD
429 #define native_thread_yield() (void)sched_yield()
431 #define native_thread_yield() ((void)0)
434 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
435 #define USE_SIGNAL_THREAD_LIST 1
437 #ifdef USE_SIGNAL_THREAD_LIST
438 static void add_signal_thread_list(
rb_thread_t *th);
439 static void remove_signal_thread_list(
rb_thread_t *th);
443 static pthread_key_t ruby_native_thread_key;
452 ruby_thread_from_native(
void)
454 return pthread_getspecific(ruby_native_thread_key);
460 return pthread_setspecific(ruby_native_thread_key, th) == 0;
470 pthread_key_create(&ruby_native_thread_key,
NULL);
472 native_thread_init(th);
473 #ifdef USE_SIGNAL_THREAD_LIST
474 native_mutex_initialize(&signal_thread_list_lock);
476 #ifndef __native_client__
485 ruby_thread_set_native(th);
494 #ifndef USE_THREAD_CACHE
495 #define USE_THREAD_CACHE 0
499 static rb_thread_t *register_cached_thread_and_wait(
void);
502 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
503 #define STACKADDR_AVAILABLE 1
504 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
505 #define STACKADDR_AVAILABLE 1
506 #undef MAINSTACKADDR_AVAILABLE
507 #define MAINSTACKADDR_AVAILABLE 0
508 void *pthread_get_stackaddr_np(pthread_t);
509 size_t pthread_get_stacksize_np(pthread_t);
510 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
511 #define STACKADDR_AVAILABLE 1
512 #elif defined HAVE_PTHREAD_GETTHRDS_NP
513 #define STACKADDR_AVAILABLE 1
516 #ifndef MAINSTACKADDR_AVAILABLE
517 # ifdef STACKADDR_AVAILABLE
518 # define MAINSTACKADDR_AVAILABLE 1
520 # define MAINSTACKADDR_AVAILABLE 0
524 #ifdef STACKADDR_AVAILABLE
529 get_stack(
void **addr,
size_t *
size)
531 #define CHECK_ERR(expr) \
532 {int err = (expr); if (err) return err;}
533 #ifdef HAVE_PTHREAD_GETATTR_NP
537 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
538 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
539 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
542 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
543 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
545 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
547 pthread_attr_destroy(&attr);
548 #elif defined HAVE_PTHREAD_ATTR_GET_NP
550 CHECK_ERR(pthread_attr_init(&attr));
551 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
552 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
553 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
556 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
557 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
560 pthread_attr_destroy(&attr);
561 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
562 pthread_t th = pthread_self();
563 *addr = pthread_get_stackaddr_np(th);
564 *size = pthread_get_stacksize_np(th);
565 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
567 # if defined HAVE_THR_STKSEGMENT
568 CHECK_ERR(thr_stksegment(&stk));
570 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
574 #elif defined HAVE_PTHREAD_GETTHRDS_NP
575 pthread_t th = pthread_self();
576 struct __pthrdsinfo thinfo;
578 int regsiz=
sizeof(reg);
579 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
580 &thinfo,
sizeof(thinfo),
582 *addr = thinfo.__pi_stackaddr;
583 *size = thinfo.__pi_stacksize;
586 #error STACKADDR_AVAILABLE is defined but not implemented.
595 size_t stack_maxsize;
598 VALUE *register_stack_start;
600 } native_main_thread;
602 #ifdef STACK_END_ADDRESS
603 extern void *STACK_END_ADDRESS;
607 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
608 RUBY_STACK_SPACE_RATIO = 5
612 space_size(
size_t stack_size)
614 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
615 if (space_size > RUBY_STACK_SPACE_LIMIT) {
616 return RUBY_STACK_SPACE_LIMIT;
623 #undef ruby_init_stack
635 native_main_thread.id = pthread_self();
636 #ifdef STACK_END_ADDRESS
637 native_main_thread.stack_start = STACK_END_ADDRESS;
639 if (!native_main_thread.stack_start ||
641 native_main_thread.stack_start > addr,
642 native_main_thread.stack_start < addr)) {
643 native_main_thread.stack_start = (
VALUE *)addr;
647 if (!native_main_thread.register_stack_start ||
648 (
VALUE*)bsp < native_main_thread.register_stack_start) {
649 native_main_thread.register_stack_start = (
VALUE*)bsp;
653 #if defined(PTHREAD_STACK_DEFAULT)
654 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
655 # error "PTHREAD_STACK_DEFAULT is too small"
657 size_t size = PTHREAD_STACK_DEFAULT;
661 size_t space = space_size(size);
662 #if MAINSTACKADDR_AVAILABLE
665 if (get_stack(&stackaddr, &size) == 0) {
666 space =
STACK_DIR_UPPER((
char *)addr - (
char *)stackaddr, (
char *)stackaddr - (
char *)addr);
668 native_main_thread.stack_maxsize = size - space;
669 #elif defined(HAVE_GETRLIMIT)
670 int pagesize = getpagesize();
673 if (
getrlimit(RLIMIT_STACK, &rlim) == 0) {
674 size = (size_t)rlim.rlim_cur;
676 addr = native_main_thread.stack_start;
678 space = ((size_t)((
char *)addr +
size) / pagesize) * pagesize - (size_t)addr;
681 space = (size_t)addr - ((
size_t)((
char *)addr - size) / pagesize + 1) * pagesize;
683 native_main_thread.stack_maxsize = space;
694 start = native_main_thread.stack_start;
695 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
698 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
699 end = native_main_thread.stack_start;
702 if ((
void *)addr < start || (void *)addr > end) {
704 native_main_thread.stack_start = (
VALUE *)addr;
705 native_main_thread.stack_maxsize = 0;
710 #define CHECK_ERR(expr) \
711 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
718 if (pthread_equal(curr, native_main_thread.id)) {
723 #ifdef STACKADDR_AVAILABLE
727 if (get_stack(&start, &size) == 0) {
736 th->machine_register_stack_start = native_main_thread.register_stack_start;
744 #define USE_NATIVE_THREAD_INIT 1
748 thread_start_func_1(
void *th_ptr)
755 #if !defined USE_NATIVE_THREAD_INIT
759 #if defined USE_NATIVE_THREAD_INIT
760 native_thread_init_stack(th);
762 native_thread_init(th);
764 #if defined USE_NATIVE_THREAD_INIT
774 if ((th = register_cached_thread_and_wait()) != 0) {
784 struct cached_thread_entry {
787 struct cached_thread_entry *next;
792 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
793 struct cached_thread_entry *cached_thread_root;
796 register_cached_thread_and_wait(
void)
802 struct cached_thread_entry *entry =
803 (
struct cached_thread_entry *)
malloc(
sizeof(
struct cached_thread_entry));
810 ts.
tv_sec = tv.tv_sec + 60;
811 ts.
tv_nsec = tv.tv_usec * 1000;
813 pthread_mutex_lock(&thread_cache_lock);
815 entry->th_area = &th_area;
817 entry->next = cached_thread_root;
818 cached_thread_root = entry;
820 native_cond_timedwait(&cond, &thread_cache_lock, &ts);
823 struct cached_thread_entry *e = cached_thread_root;
824 struct cached_thread_entry *prev = cached_thread_root;
828 if (prev == cached_thread_root) {
829 cached_thread_root = e->next;
832 prev->next = e->next;
842 native_cond_destroy(&cond);
844 pthread_mutex_unlock(&thread_cache_lock);
855 struct cached_thread_entry *entry;
857 if (cached_thread_root) {
858 pthread_mutex_lock(&thread_cache_lock);
859 entry = cached_thread_root;
861 if (cached_thread_root) {
862 cached_thread_root = entry->next;
863 *entry->th_area = th;
868 native_cond_signal(entry->cond);
870 pthread_mutex_unlock(&thread_cache_lock);
881 if (use_cached_thread(th)) {
882 thread_debug(
"create (use cached thread): %p\n", (
void *)th);
887 const size_t space = space_size(stack_size);
895 #ifdef HAVE_PTHREAD_ATTR_INIT
896 CHECK_ERR(pthread_attr_init(&attr));
898 # ifdef PTHREAD_STACK_MIN
899 thread_debug(
"create - stack size: %lu\n", (
unsigned long)stack_size);
900 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
903 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
904 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
906 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
908 err = pthread_create(&th->
thread_id, &attr, thread_start_func_1, th);
910 err = pthread_create(&th->
thread_id,
NULL, thread_start_func_1, th);
913 #ifdef HAVE_PTHREAD_ATTR_INIT
914 CHECK_ERR(pthread_attr_destroy(&attr));
921 native_thread_join(pthread_t th)
923 int err = pthread_join(th, 0);
930 #if USE_NATIVE_THREAD_PRIORITY
935 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
936 struct sched_param sp;
940 pthread_getschedparam(th->
thread_id, &policy, &sp);
941 max = sched_get_priority_max(policy);
942 min = sched_get_priority_min(policy);
944 if (min > priority) {
947 else if (max < priority) {
951 sp.sched_priority = priority;
952 pthread_setschedparam(th->
thread_id, policy, &sp);
963 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
967 ubf_pthread_cond_signal(
void *ptr)
970 thread_debug(
"ubf_pthread_cond_signal (%p)\n", (
void *)th);
985 timeout_rel.tv_nsec = timeout_tv->
tv_usec * 1000;
995 if (timeout_rel.tv_sec > 100000000) {
996 timeout_rel.tv_sec = 100000000;
997 timeout_rel.tv_nsec = 0;
1000 timeout = native_cond_timeout(cond, timeout_rel);
1005 pthread_mutex_lock(lock);
1011 thread_debug(
"native_sleep: interrupted before sleep\n");
1015 native_cond_wait(cond, lock);
1017 native_cond_timedwait(cond, lock, &timeout);
1022 pthread_mutex_unlock(lock);
1029 #ifdef USE_SIGNAL_THREAD_LIST
1030 struct signal_thread_list {
1032 struct signal_thread_list *prev;
1033 struct signal_thread_list *next;
1036 static struct signal_thread_list signal_thread_list_anchor = {
1040 #define FGLOCK(lock, body) do { \
1041 native_mutex_lock(lock); \
1045 native_mutex_unlock(lock); \
1050 print_signal_list(
char *str)
1052 struct signal_thread_list *
list =
1053 signal_thread_list_anchor.next;
1056 thread_debug(
"%p (%p), ", list->th, list->th->thread_id);
1067 FGLOCK(&signal_thread_list_lock, {
1068 struct signal_thread_list *list =
1069 malloc(
sizeof(
struct signal_thread_list));
1072 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
1078 list->prev = &signal_thread_list_anchor;
1079 list->next = signal_thread_list_anchor.next;
1081 list->next->prev =
list;
1083 signal_thread_list_anchor.next =
list;
1093 FGLOCK(&signal_thread_list_lock, {
1094 struct signal_thread_list *list =
1095 (
struct signal_thread_list *)
1098 list->prev->next = list->next;
1100 list->next->prev = list->prev;
1119 ubf_select(
void *ptr)
1122 add_signal_thread_list(th);
1123 if (pthread_self() != timer_thread_id)
1125 ubf_select_each(th);
1129 ping_signal_thread_list(
void)
1131 if (signal_thread_list_anchor.next) {
1132 FGLOCK(&signal_thread_list_lock, {
1133 struct signal_thread_list *
list;
1135 list = signal_thread_list_anchor.next;
1137 ubf_select_each(list->th);
1145 check_signal_thread_list(
void)
1147 if (signal_thread_list_anchor.next)
1153 #define add_signal_thread_list(th) (void)(th)
1154 #define remove_signal_thread_list(th) (void)(th)
1155 #define ubf_select 0
1156 static void ping_signal_thread_list(
void) {
return; }
1157 static int check_signal_thread_list(
void) {
return 0; }
1161 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1166 #define TIME_QUANTUM_USEC (100 * 1000)
1168 #if USE_SLEEPY_TIMER_THREAD
1169 static int timer_thread_pipe[2] = {-1, -1};
1170 static int timer_thread_pipe_low[2] = {-1, -1};
1171 static int timer_thread_pipe_owner_process;
1175 rb_thread_wakeup_timer_thread_fd(
int fd)
1180 if (timer_thread_pipe_owner_process == getpid()) {
1181 const char *buff =
"!";
1183 if ((result = write(fd, buff, 1)) <= 0) {
1185 case EINTR:
goto retry;
1187 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1195 if (TT_DEBUG)
WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1205 rb_thread_wakeup_timer_thread_fd(timer_thread_pipe[1]);
1209 rb_thread_wakeup_timer_thread_low(
void)
1211 rb_thread_wakeup_timer_thread_fd(timer_thread_pipe_low[1]);
1216 consume_communication_pipe(
int fd)
1218 #define CCP_READ_BUFF_SIZE 1024
1220 static char buff[CCP_READ_BUFF_SIZE];
1224 result = read(fd, buff,
sizeof(buff));
1228 else if (result < 0) {
1242 close_communication_pipe(
int pipes[2])
1244 if (close(pipes[0]) < 0) {
1247 if (close(pipes[1]) < 0) {
1250 pipes[0] = pipes[1] = -1;
1254 set_nonblock(
int fd)
1259 oflags =
fcntl(fd, F_GETFL);
1269 setup_communication_pipe_internal(
int pipes[2])
1273 if (pipes[0] != -1) {
1275 close_communication_pipe(pipes);
1280 rb_bug_errno(
"setup_communication_pipe: Failed to create communication pipe for timer thread",
errno);
1284 set_nonblock(pipes[0]);
1285 set_nonblock(pipes[1]);
1290 setup_communication_pipe(
void)
1292 if (timer_thread_pipe_owner_process == getpid()) {
1296 setup_communication_pipe_internal(timer_thread_pipe);
1297 setup_communication_pipe_internal(timer_thread_pipe_low);
1300 timer_thread_pipe_owner_process = getpid();
1314 struct pollfd pollfds[2];
1316 pollfds[0].fd = timer_thread_pipe[0];
1317 pollfds[0].events = POLLIN;
1318 pollfds[1].fd = timer_thread_pipe_low[0];
1319 pollfds[1].events = POLLIN;
1321 need_polling = check_signal_thread_list();
1323 if (gvl->
waiting > 0 || need_polling) {
1325 result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
1329 result = poll(pollfds, ARRAY_SIZE(pollfds), -1);
1335 else if (result > 0) {
1336 consume_communication_pipe(timer_thread_pipe[0]);
1337 consume_communication_pipe(timer_thread_pipe_low[0]);
1353 # define PER_NANO 1000000000
1355 static void rb_thread_wakeup_timer_thread_low(
void) {}
1357 static pthread_mutex_t timer_thread_lock;
1365 ts.
tv_nsec = TIME_QUANTUM_USEC * 1000;
1366 ts = native_cond_timeout(&timer_thread_cond, ts);
1368 native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, &ts);
1373 thread_timer(
void *
p)
1377 if (TT_DEBUG)
WRITE_CONST(2,
"start timer thread\n");
1379 #if defined(__linux__) && defined(PR_SET_NAME)
1380 prctl(PR_SET_NAME,
"ruby-timer-thr");
1383 #if !USE_SLEEPY_TIMER_THREAD
1384 native_mutex_initialize(&timer_thread_lock);
1385 native_cond_initialize(&timer_thread_cond, RB_CONDATTR_CLOCK_MONOTONIC);
1386 native_mutex_lock(&timer_thread_lock);
1391 ping_signal_thread_list();
1397 timer_thread_sleep(gvl);
1399 #if !USE_SLEEPY_TIMER_THREAD
1400 native_mutex_unlock(&timer_thread_lock);
1401 native_cond_destroy(&timer_thread_cond);
1402 native_mutex_destroy(&timer_thread_lock);
1405 if (TT_DEBUG)
WRITE_CONST(2,
"finish timer thread\n");
1410 rb_thread_create_timer_thread(
void)
1412 if (!timer_thread_id) {
1414 #ifdef HAVE_PTHREAD_ATTR_INIT
1415 pthread_attr_t attr;
1417 err = pthread_attr_init(&attr);
1419 fprintf(stderr,
"[FATAL] Failed to initialize pthread attr(errno: %d)\n", err);
1422 # ifdef PTHREAD_STACK_MIN
1424 const size_t min_size = (4096 * 4);
1429 size_t stack_size = PTHREAD_STACK_MIN;
1430 if (stack_size < min_size) stack_size = min_size;
1432 pthread_attr_setstacksize(&attr, stack_size);
1437 #if USE_SLEEPY_TIMER_THREAD
1438 setup_communication_pipe();
1442 if (timer_thread_id) {
1443 rb_bug(
"rb_thread_create_timer_thread: Timer thread was already created\n");
1445 #ifdef HAVE_PTHREAD_ATTR_INIT
1446 err = pthread_create(&timer_thread_id, &attr, thread_timer, &
GET_VM()->gvl);
1448 err = pthread_create(&timer_thread_id,
NULL, thread_timer, &
GET_VM()->gvl);
1451 fprintf(stderr,
"[FATAL] Failed to create timer thread (errno: %d)\n", err);
1454 #ifdef HAVE_PTHREAD_ATTR_INIT
1455 pthread_attr_destroy(&attr);
1461 native_stop_timer_thread(
int close_anyway)
1466 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
1470 native_thread_join(timer_thread_id);
1471 if (TT_DEBUG) fprintf(stderr,
"joined timer thread\n");
1472 timer_thread_id = 0;
1490 native_reset_timer_thread(
void)
1492 if (TT_DEBUG) fprintf(stderr,
"reset timer thread\n");
1495 #ifdef HAVE_SIGALTSTACK
1497 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
1501 const size_t water_mark = 1024 * 1024;
1508 #ifdef STACKADDR_AVAILABLE
1509 else if (get_stack(&base, &size) == 0) {
1516 size /= RUBY_STACK_SPACE_RATIO;
1517 if (size > water_mark) size = water_mark;
1519 if (size > ~(
size_t)base+1) size = ~(size_t)base+1;
1520 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
1523 if (size > (
size_t)base) size = (
size_t)base;
1524 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
1533 #if USE_SLEEPY_TIMER_THREAD
1534 if (fd == timer_thread_pipe[0] ||
1535 fd == timer_thread_pipe[1] ||
1536 fd == timer_thread_pipe_low[0] ||
1537 fd == timer_thread_pipe_low[1]) {
void rb_bug(const char *fmt,...)
int gettimeofday(struct timeval *, struct timezone *)
volatile unsigned long waiting
static int max(int a, int b)
void * signal_thread_list
rb_thread_lock_t interrupt_lock
pthread_mutex_t rb_thread_lock_t
rb_unblock_function_t * func
rb_thread_cond_t switch_cond
void rb_update_max_fd(int fd)
void rb_async_bug_errno(const char *mesg, int errno_arg)
#define STACK_UPPER(x, a, b)
void rb_raise(VALUE exc, const char *fmt,...)
static volatile int system_working
unsigned long unsigned_time_t
sighandler_t posix_signal(int signum, sighandler_t handler)
#define rb_fd_select(n, rfds, wfds, efds, timeout)
#define RUBY_VM_THREAD_VM_STACK_SIZE
void rb_thread_wakeup_timer_thread(void)
int getrlimit(int resource, struct rlimit *rlp)
int rb_cloexec_pipe(int fildes[2])
VALUE * machine_stack_start
#define GVL_UNLOCK_BEGIN()
struct rb_vm_struct::@153 default_params
#define STACK_DIR_UPPER(a, b)
rb_thread_cond_t switch_wait_cond
int pthread_kill(pthread_t thread, int sig)
#define STACK_GROW_DIR_DETECTION
void rb_bug_errno(const char *mesg, int errno_arg)
void ruby_init_stack(volatile VALUE *)
static void timer_thread_function(void *)
int rb_reserved_fd_p(int fd)
void rb_sys_fail(const char *mesg)
#define WRITE_CONST(fd, str)
#define thread_start_func_2(th, st, rst)
struct rb_unblock_callback unblock
rb_thread_cond_t sleep_cond
struct rb_encoding_entry * list
native_thread_data_t native_thread_data
size_t thread_machine_stack_size
static VALUE thread_start(VALUE klass, VALUE args)
#define RUBY_VM_INTERRUPTED(th)
void Init_native_thread(void)
size_t machine_stack_maxsize
static rb_thread_t * GET_THREAD(void)
#define IS_STACK_DIR_UPPER()