diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 72c82f2ea536..8ffd434676b7 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -51,7 +51,11 @@ static inline bool sk_can_busy_loop(const struct sock *sk) return sk->sk_ll_usec && !signal_pending(current); } -void sk_busy_loop(struct sock *sk, int nonblock); +bool sk_busy_loop_end(void *p, unsigned long start_time); + +void napi_busy_loop(unsigned int napi_id, + bool (*loop_end)(void *, unsigned long), + void *loop_end_arg); #else /* CONFIG_NET_RX_BUSY_POLL */ static inline unsigned long net_busy_loop_on(void) @@ -64,10 +68,6 @@ static inline bool sk_can_busy_loop(struct sock *sk) return false; } -static inline void sk_busy_loop(struct sock *sk, int nonblock) -{ -} - #endif /* CONFIG_NET_RX_BUSY_POLL */ static inline unsigned long busy_loop_current_time(void) @@ -111,6 +111,16 @@ static inline bool sk_busy_loop_timeout(struct sock *sk, return true; } +static inline void sk_busy_loop(struct sock *sk, int nonblock) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int napi_id = READ_ONCE(sk->sk_napi_id); + + if (napi_id >= MIN_NAPI_ID) + napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk); +#endif +} + /* used in the NIC receive handler to mark the skb */ static inline void skb_mark_napi_id(struct sk_buff *skb, struct napi_struct *napi) diff --git a/net/core/dev.c b/net/core/dev.c index 2d1b5613b7fd..ef9fe60ee294 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5060,19 +5060,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) do_softirq(); } -void sk_busy_loop(struct sock *sk, int nonblock) +void napi_busy_loop(unsigned int napi_id, + bool (*loop_end)(void *, unsigned long), + void *loop_end_arg) { - unsigned long start_time = nonblock ? 0 : busy_loop_current_time(); + unsigned long start_time = loop_end ? busy_loop_current_time() : 0; int (*napi_poll)(struct napi_struct *napi, int budget); void *have_poll_lock = NULL; struct napi_struct *napi; - unsigned int napi_id; restart: - napi_id = READ_ONCE(sk->sk_napi_id); - if (napi_id < MIN_NAPI_ID) - return; - napi_poll = NULL; rcu_read_lock(); @@ -5106,12 +5103,11 @@ void sk_busy_loop(struct sock *sk, int nonblock) trace_napi_poll(napi, work, BUSY_POLL_BUDGET); count: if (work > 0) - __NET_ADD_STATS(sock_net(sk), + __NET_ADD_STATS(dev_net(napi->dev), LINUX_MIB_BUSYPOLLRXPACKETS, work); local_bh_enable(); - if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) || - sk_busy_loop_timeout(sk, start_time)) + if (!loop_end || loop_end(loop_end_arg, start_time)) break; if (unlikely(need_resched())) { @@ -5120,8 +5116,7 @@ void sk_busy_loop(struct sock *sk, int nonblock) preempt_enable(); rcu_read_unlock(); cond_resched(); - if (!skb_queue_empty(&sk->sk_receive_queue) || - sk_busy_loop_timeout(sk, start_time)) + if (loop_end(loop_end_arg, start_time)) return; goto restart; } @@ -5133,7 +5128,7 @@ void sk_busy_loop(struct sock *sk, int nonblock) out: rcu_read_unlock(); } -EXPORT_SYMBOL(sk_busy_loop); +EXPORT_SYMBOL(napi_busy_loop); #endif /* CONFIG_NET_RX_BUSY_POLL */ diff --git a/net/core/sock.c b/net/core/sock.c index 1b9030ee6f4b..4b762f2a3552 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3237,3 +3237,14 @@ static int __init proto_init(void) subsys_initcall(proto_init); #endif /* PROC_FS */ + +#ifdef CONFIG_NET_RX_BUSY_POLL +bool sk_busy_loop_end(void *p, unsigned long start_time) +{ + struct sock *sk = p; + + return !skb_queue_empty(&sk->sk_receive_queue) || + sk_busy_loop_timeout(sk, start_time); +} +EXPORT_SYMBOL(sk_busy_loop_end); +#endif /* CONFIG_NET_RX_BUSY_POLL */