mirror of https://gitee.com/openkylin/linux.git
epoll: simplify signal handling
Check signals before locking ep->lock, and immediately return -EINTR if there is any signal pending. This saves a few loads, stores, and branches from the hot path and simplifies the loop structure for follow up patches. Link: https://lkml.kernel.org/r/20201106231635.3528496-3-soheil.kdev@gmail.com Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Khazhismel Kumykov <khazhy@google.com> Cc: Guantao Liu <guantaol@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
289caf5d8f
commit
2efdaf7660
|
@ -1733,7 +1733,7 @@ static inline struct timespec64 ep_set_mstimeout(long ms)
|
||||||
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||||
int maxevents, long timeout)
|
int maxevents, long timeout)
|
||||||
{
|
{
|
||||||
int res = 0, eavail, timed_out = 0;
|
int res, eavail, timed_out = 0;
|
||||||
u64 slack = 0;
|
u64 slack = 0;
|
||||||
wait_queue_entry_t wait;
|
wait_queue_entry_t wait;
|
||||||
ktime_t expires, *to = NULL;
|
ktime_t expires, *to = NULL;
|
||||||
|
@ -1780,6 +1780,9 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||||
ep_reset_busy_poll_napi_id(ep);
|
ep_reset_busy_poll_napi_id(ep);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
if (signal_pending(current))
|
||||||
|
return -EINTR;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internally init_wait() uses autoremove_wake_function(),
|
* Internally init_wait() uses autoremove_wake_function(),
|
||||||
* thus wait entry is removed from the wait queue on each
|
* thus wait entry is removed from the wait queue on each
|
||||||
|
@ -1809,15 +1812,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||||
* important.
|
* important.
|
||||||
*/
|
*/
|
||||||
eavail = ep_events_available(ep);
|
eavail = ep_events_available(ep);
|
||||||
if (!eavail) {
|
if (!eavail)
|
||||||
if (signal_pending(current))
|
__add_wait_queue_exclusive(&ep->wq, &wait);
|
||||||
res = -EINTR;
|
|
||||||
else
|
|
||||||
__add_wait_queue_exclusive(&ep->wq, &wait);
|
|
||||||
}
|
|
||||||
write_unlock_irq(&ep->lock);
|
write_unlock_irq(&ep->lock);
|
||||||
|
|
||||||
if (!eavail && !res)
|
if (!eavail)
|
||||||
timed_out = !schedule_hrtimeout_range(to, slack,
|
timed_out = !schedule_hrtimeout_range(to, slack,
|
||||||
HRTIMER_MODE_ABS);
|
HRTIMER_MODE_ABS);
|
||||||
|
|
||||||
|
@ -1853,14 +1853,14 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
||||||
* finding more events available and fetching
|
* finding more events available and fetching
|
||||||
* repeatedly.
|
* repeatedly.
|
||||||
*/
|
*/
|
||||||
res = -EINTR;
|
return -EINTR;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Try to transfer events to user space. In case we get 0 events and
|
* Try to transfer events to user space. In case we get 0 events and
|
||||||
* there's still timeout left over, we go trying again in search of
|
* there's still timeout left over, we go trying again in search of
|
||||||
* more luck.
|
* more luck.
|
||||||
*/
|
*/
|
||||||
if (!res && eavail &&
|
if (eavail &&
|
||||||
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
|
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
|
||||||
goto fetch_events;
|
goto fetch_events;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue