зеркало из https://github.com/github/ruby.git
thread: reduce GET_THREAD calls
This allows native_sleep to use less stack (80 -> 64 bytes on x86-64) for GVL_UNLOCK_BEGIN/END. For future APIs, we will pass `ec` or `th` around anyways, so the BLOCKING_REGION change should be beneficial in the future. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63448 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
Родитель
a3e73d13c1
Коммит
0f0311df0a
30
thread.c
30
thread.c
|
@ -149,14 +149,13 @@ static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_regio
|
|||
SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
|
||||
} while (0)
|
||||
|
||||
#define GVL_UNLOCK_BEGIN() do { \
|
||||
rb_thread_t *_th_stored = GET_THREAD(); \
|
||||
RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
|
||||
gvl_release(_th_stored->vm);
|
||||
#define GVL_UNLOCK_BEGIN(th) do { \
|
||||
RB_GC_SAVE_MACHINE_CONTEXT(th); \
|
||||
gvl_release(th->vm);
|
||||
|
||||
#define GVL_UNLOCK_END() \
|
||||
gvl_acquire(_th_stored->vm, _th_stored); \
|
||||
rb_thread_set_current(_th_stored); \
|
||||
#define GVL_UNLOCK_END(th) \
|
||||
gvl_acquire(th->vm, th); \
|
||||
rb_thread_set_current(th); \
|
||||
} while(0)
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
@ -168,14 +167,13 @@ static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_regio
|
|||
#else
|
||||
#define only_if_constant(expr, notconst) notconst
|
||||
#endif
|
||||
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
|
||||
rb_thread_t *__th = GET_THREAD(); \
|
||||
#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
|
||||
struct rb_blocking_region_buffer __region; \
|
||||
if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
|
||||
if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
|
||||
/* always return true unless fail_if_interrupted */ \
|
||||
!only_if_constant(fail_if_interrupted, TRUE)) { \
|
||||
exec; \
|
||||
blocking_region_end(__th, &__region); \
|
||||
blocking_region_end(th, &__region); \
|
||||
}; \
|
||||
} while(0)
|
||||
|
||||
|
@ -1399,7 +1397,7 @@ call_without_gvl(void *(*func)(void *), void *data1,
|
|||
data2 = th;
|
||||
}
|
||||
|
||||
BLOCKING_REGION({
|
||||
BLOCKING_REGION(th, {
|
||||
val = func(data1);
|
||||
saved_errno = errno;
|
||||
}, ubf, data2, fail_if_interrupted);
|
||||
|
@ -1527,10 +1525,10 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
|
|||
|
||||
EC_PUSH_TAG(ec);
|
||||
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
|
||||
BLOCKING_REGION({
|
||||
BLOCKING_REGION(wfd.th, {
|
||||
val = func(data1);
|
||||
saved_errno = errno;
|
||||
}, ubf_select, rb_ec_thread_ptr(ec), FALSE);
|
||||
}, ubf_select, wfd.th, FALSE);
|
||||
}
|
||||
EC_POP_TAG();
|
||||
|
||||
|
@ -3847,7 +3845,7 @@ do_select(int n, rb_fdset_t *const readfds, rb_fdset_t *const writefds,
|
|||
do {
|
||||
lerrno = 0;
|
||||
|
||||
BLOCKING_REGION({
|
||||
BLOCKING_REGION(th, {
|
||||
result = native_fd_select(n, readfds, writefds, exceptfds,
|
||||
timeval_for(timeout, tsp), th);
|
||||
if (result < 0) lerrno = errno;
|
||||
|
@ -3988,7 +3986,7 @@ rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
|
|||
do {
|
||||
fds.revents = 0;
|
||||
lerrno = 0;
|
||||
BLOCKING_REGION({
|
||||
BLOCKING_REGION(th, {
|
||||
result = ppoll(&fds, 1, tsp, NULL);
|
||||
if (result < 0) lerrno = errno;
|
||||
}, ubf_select, th, FALSE);
|
||||
|
|
|
@ -1044,7 +1044,7 @@ native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
|
|||
timeout = native_cond_timeout(cond, *timeout_rel);
|
||||
}
|
||||
|
||||
GVL_UNLOCK_BEGIN();
|
||||
GVL_UNLOCK_BEGIN(th);
|
||||
{
|
||||
rb_native_mutex_lock(lock);
|
||||
th->unblock.func = ubf_pthread_cond_signal;
|
||||
|
@ -1065,7 +1065,7 @@ native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
|
|||
|
||||
rb_native_mutex_unlock(lock);
|
||||
}
|
||||
GVL_UNLOCK_END();
|
||||
GVL_UNLOCK_END(th);
|
||||
|
||||
thread_debug("native_sleep done\n");
|
||||
}
|
||||
|
|
|
@ -210,8 +210,9 @@ int
|
|||
rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
|
||||
{
|
||||
int ret;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
|
||||
BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout),
|
||||
BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
|
||||
ubf_handle, ruby_thread_from_native(), FALSE);
|
||||
return ret;
|
||||
}
|
||||
|
@ -264,8 +265,9 @@ int WINAPI
|
|||
rb_w32_Sleep(unsigned long msec)
|
||||
{
|
||||
int ret;
|
||||
rb_thread_t *th = GET_THREAD();
|
||||
|
||||
BLOCKING_REGION(ret = rb_w32_sleep(msec),
|
||||
BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
|
||||
ubf_handle, ruby_thread_from_native(), FALSE);
|
||||
return ret;
|
||||
}
|
||||
|
@ -276,7 +278,7 @@ native_sleep(rb_thread_t *th, struct timespec *ts)
|
|||
const volatile DWORD msec = (ts) ?
|
||||
(DWORD)(ts->tv_sec * 1000 + ts->tv_nsec / 1000000) : INFINITE;
|
||||
|
||||
GVL_UNLOCK_BEGIN();
|
||||
GVL_UNLOCK_BEGIN(th);
|
||||
{
|
||||
DWORD ret;
|
||||
|
||||
|
@ -299,7 +301,7 @@ native_sleep(rb_thread_t *th, struct timespec *ts)
|
|||
th->unblock.arg = 0;
|
||||
rb_native_mutex_unlock(&th->interrupt_lock);
|
||||
}
|
||||
GVL_UNLOCK_END();
|
||||
GVL_UNLOCK_END(th);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
Загрузка…
Ссылка в новой задаче