dlm: Don't swamp the CPU with callbacks queued during recovery
[ Upstream commit 216f0efd19
]
Before this patch, recovery would cause all callbacks to be delayed,
put on a queue, and afterward they were all queued to the callback
work queue. This patch does the same thing, but occasionally takes
a break after 25 of them so it won't swamp the CPU at the expense
of other RT processes like corosync.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Родитель
0ca40937b6
Коммит
ecaa109084
10
fs/dlm/ast.c
10
fs/dlm/ast.c
|
@ -290,6 +290,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
|
|||
flush_workqueue(ls->ls_callback_wq);
|
||||
}
|
||||
|
||||
#define MAX_CB_QUEUE 25
|
||||
|
||||
void dlm_callback_resume(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_lkb *lkb, *safe;
|
||||
|
@ -300,15 +302,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
|
|||
if (!ls->ls_callback_wq)
|
||||
return;
|
||||
|
||||
more:
|
||||
mutex_lock(&ls->ls_cb_mutex);
|
||||
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
|
||||
list_del_init(&lkb->lkb_cb_list);
|
||||
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
|
||||
count++;
|
||||
if (count == MAX_CB_QUEUE)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ls->ls_cb_mutex);
|
||||
|
||||
if (count)
|
||||
log_rinfo(ls, "dlm_callback_resume %d", count);
|
||||
if (count == MAX_CB_QUEUE) {
|
||||
count = 0;
|
||||
cond_resched();
|
||||
goto more;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче