ep_scan_ready_list(): prepare to splitup
take the stuff done before and after the callback into separate helpers Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Родитель
bde03c4c1a
Коммит
db502f8a3b
|
@ -561,28 +561,10 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* ep_scan_ready_list - Scans the ready list in a way that makes possible for
|
||||
* the scan code, to call f_op->poll(). Also allows for
|
||||
* O(NumReady) performance.
|
||||
*
|
||||
* @ep: Pointer to the epoll private data structure.
|
||||
* @sproc: Pointer to the scan callback.
|
||||
* @priv: Private opaque data passed to the @sproc callback.
|
||||
* @depth: The current depth of recursive f_op->poll calls.
|
||||
* @ep_locked: caller already holds ep->mtx
|
||||
*
|
||||
* Returns: The same integer error code returned by the @sproc callback.
|
||||
*/
|
||||
static __poll_t ep_scan_ready_list(struct eventpoll *ep,
|
||||
__poll_t (*sproc)(struct eventpoll *,
|
||||
struct list_head *, void *),
|
||||
void *priv, int depth, bool ep_locked)
|
||||
static void ep_start_scan(struct eventpoll *ep,
|
||||
int depth, bool ep_locked,
|
||||
struct list_head *txlist)
|
||||
{
|
||||
__poll_t res;
|
||||
struct epitem *epi, *nepi;
|
||||
LIST_HEAD(txlist);
|
||||
|
||||
lockdep_assert_irqs_enabled();
|
||||
|
||||
/*
|
||||
|
@ -602,14 +584,16 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
|
|||
* in a lockless way.
|
||||
*/
|
||||
write_lock_irq(&ep->lock);
|
||||
list_splice_init(&ep->rdllist, &txlist);
|
||||
list_splice_init(&ep->rdllist, txlist);
|
||||
WRITE_ONCE(ep->ovflist, NULL);
|
||||
write_unlock_irq(&ep->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now call the callback function.
|
||||
*/
|
||||
res = (*sproc)(ep, &txlist, priv);
|
||||
static void ep_done_scan(struct eventpoll *ep,
|
||||
int depth, bool ep_locked,
|
||||
struct list_head *txlist)
|
||||
{
|
||||
struct epitem *epi, *nepi;
|
||||
|
||||
write_lock_irq(&ep->lock);
|
||||
/*
|
||||
|
@ -644,13 +628,38 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
|
|||
/*
|
||||
* Quickly re-inject items left on "txlist".
|
||||
*/
|
||||
list_splice(&txlist, &ep->rdllist);
|
||||
list_splice(txlist, &ep->rdllist);
|
||||
__pm_relax(ep->ws);
|
||||
write_unlock_irq(&ep->lock);
|
||||
|
||||
if (!ep_locked)
|
||||
mutex_unlock(&ep->mtx);
|
||||
}
|
||||
|
||||
/**
|
||||
* ep_scan_ready_list - Scans the ready list in a way that makes possible for
|
||||
* the scan code, to call f_op->poll(). Also allows for
|
||||
* O(NumReady) performance.
|
||||
*
|
||||
* @ep: Pointer to the epoll private data structure.
|
||||
* @sproc: Pointer to the scan callback.
|
||||
* @priv: Private opaque data passed to the @sproc callback.
|
||||
* @depth: The current depth of recursive f_op->poll calls.
|
||||
* @ep_locked: caller already holds ep->mtx
|
||||
*
|
||||
* Returns: The same integer error code returned by the @sproc callback.
|
||||
*/
|
||||
static __poll_t ep_scan_ready_list(struct eventpoll *ep,
|
||||
__poll_t (*sproc)(struct eventpoll *,
|
||||
struct list_head *, void *),
|
||||
void *priv, int depth, bool ep_locked)
|
||||
{
|
||||
__poll_t res;
|
||||
LIST_HEAD(txlist);
|
||||
|
||||
ep_start_scan(ep, depth, ep_locked, &txlist);
|
||||
res = (*sproc)(ep, &txlist, priv);
|
||||
ep_done_scan(ep, depth, ep_locked, &txlist);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче