dlm: Don't swamp the CPU with callbacks queued during recovery
Before this patch, recovery would cause all callbacks to be delayed, put on a queue, and afterward they were all queued to the callback work queue. This patch does the same thing, but occasionally takes a break after 25 of them so it won't swamp the CPU at the expense of other RT processes like corosync. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
This commit is contained in:
parent
9de30f3f7f
commit
216f0efd19
10
fs/dlm/ast.c
10
fs/dlm/ast.c
|
@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
|
|||
flush_workqueue(ls->ls_callback_wq);
|
||||
}
|
||||
|
||||
#define MAX_CB_QUEUE 25
|
||||
|
||||
void dlm_callback_resume(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_lkb *lkb, *safe;
|
||||
|
@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
|
|||
if (!ls->ls_callback_wq)
|
||||
return;
|
||||
|
||||
more:
|
||||
mutex_lock(&ls->ls_cb_mutex);
|
||||
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
|
||||
list_del_init(&lkb->lkb_cb_list);
|
||||
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
|
||||
count++;
|
||||
if (count == MAX_CB_QUEUE)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ls->ls_cb_mutex);
|
||||
|
||||
if (count)
|
||||
log_rinfo(ls, "dlm_callback_resume %d", count);
|
||||
if (count == MAX_CB_QUEUE) {
|
||||
count = 0;
|
||||
cond_resched();
|
||||
goto more;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue