netpoll: more efficient locking
Callers of netpoll_poll_lock() own NAPI_STATE_SCHED
Callers of netpoll_poll_unlock() have BH blocked between
the NAPI_STATE_SCHED being cleared and poll_lock is released.
We can avoid the spinlock which has no contention, and use cmpxchg()
on poll_owner which we need to set anyway.
This removes a possible lockdep violation after the cited commit,
since sk_busy_loop() re-enables BH before calling busy_poll_stop()
Fixes: 217f697436
("net: busy-poll: allow preemption in sk_busy_loop()")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1629dd4f76
commit
89c4b442b7
|
@ -316,7 +316,6 @@ struct napi_struct {
|
|||
unsigned int gro_count;
|
||||
int (*poll)(struct napi_struct *, int);
|
||||
#ifdef CONFIG_NETPOLL
|
||||
spinlock_t poll_lock;
|
||||
int poll_owner;
|
||||
#endif
|
||||
struct net_device *dev;
|
||||
|
|
|
@ -78,8 +78,11 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
|
|||
struct net_device *dev = napi->dev;
|
||||
|
||||
if (dev && dev->npinfo) {
|
||||
spin_lock(&napi->poll_lock);
|
||||
napi->poll_owner = smp_processor_id();
|
||||
int owner = smp_processor_id();
|
||||
|
||||
while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
|
||||
cpu_relax();
|
||||
|
||||
return napi;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -89,10 +92,8 @@ static inline void netpoll_poll_unlock(void *have)
|
|||
{
|
||||
struct napi_struct *napi = have;
|
||||
|
||||
if (napi) {
|
||||
napi->poll_owner = -1;
|
||||
spin_unlock(&napi->poll_lock);
|
||||
}
|
||||
if (napi)
|
||||
smp_store_release(&napi->poll_owner, -1);
|
||||
}
|
||||
|
||||
static inline bool netpoll_tx_running(struct net_device *dev)
|
||||
|
|
|
@ -5143,7 +5143,6 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
|
|||
list_add(&napi->dev_list, &dev->napi_list);
|
||||
napi->dev = dev;
|
||||
#ifdef CONFIG_NETPOLL
|
||||
spin_lock_init(&napi->poll_lock);
|
||||
napi->poll_owner = -1;
|
||||
#endif
|
||||
set_bit(NAPI_STATE_SCHED, &napi->state);
|
||||
|
|
|
@ -171,12 +171,12 @@ static void poll_one_napi(struct napi_struct *napi)
|
|||
static void poll_napi(struct net_device *dev)
|
||||
{
|
||||
struct napi_struct *napi;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
list_for_each_entry(napi, &dev->napi_list, dev_list) {
|
||||
if (napi->poll_owner != smp_processor_id() &&
|
||||
spin_trylock(&napi->poll_lock)) {
|
||||
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
|
||||
poll_one_napi(napi);
|
||||
spin_unlock(&napi->poll_lock);
|
||||
smp_store_release(&napi->poll_owner, -1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue