net: mvneta: Fix race condition during stopping

When stopping the port, the CPU notifier are still there whereas the
mvneta_stop_dev function calls mvneta_percpu_disable() on each CPUs.
It was possible to have a new CPU coming at this point which could be
racy.

This patch adds a flag preventing executing the code notifier for a new
CPU when the port is stopping. It also uses the spinlock introduces
previously. To avoid the deadlock, the lock has been moved outside the
mvneta_percpu_elect function.

Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Gregory CLEMENT 2016-02-04 22:09:29 +01:00 committed by David S. Miller
parent 5888511ea0
commit 120cfa502c
1 changed files with 28 additions and 8 deletions

View File

@ -374,6 +374,7 @@ struct mvneta_port {
* ensuring that the configuration remains coherent. * ensuring that the configuration remains coherent.
*/ */
spinlock_t lock; spinlock_t lock;
bool is_stopped;
/* Core clock */ /* Core clock */
struct clk *clk; struct clk *clk;
@ -2855,16 +2856,14 @@ static void mvneta_percpu_disable(void *arg)
disable_percpu_irq(pp->dev->irq); disable_percpu_irq(pp->dev->irq);
} }
/* Electing a CPU must be done in an atomic way: it should be done
* after or before the removal/insertion of a CPU and this function is
* not reentrant.
*/
static void mvneta_percpu_elect(struct mvneta_port *pp) static void mvneta_percpu_elect(struct mvneta_port *pp)
{ {
int elected_cpu = 0, max_cpu, cpu, i = 0; int elected_cpu = 0, max_cpu, cpu, i = 0;
/* Electing a CPU must be done in an atomic way: it should be
* done after or before the removal/insertion of a CPU and
* this function is not reentrant.
*/
spin_lock(&pp->lock);
/* Use the cpu associated to the rxq when it is online, in all /* Use the cpu associated to the rxq when it is online, in all
* the other cases, use the cpu 0 which can't be offline. * the other cases, use the cpu 0 which can't be offline.
*/ */
@ -2908,7 +2907,6 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
i++; i++;
} }
spin_unlock(&pp->lock);
}; };
static int mvneta_percpu_notifier(struct notifier_block *nfb, static int mvneta_percpu_notifier(struct notifier_block *nfb,
@ -2922,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
switch (action) { switch (action) {
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
spin_lock(&pp->lock);
/* Configuring the driver for a new CPU while the
* driver is stopping is racy, so just avoid it.
*/
if (pp->is_stopped) {
spin_unlock(&pp->lock);
break;
}
netif_tx_stop_all_queues(pp->dev); netif_tx_stop_all_queues(pp->dev);
/* We have to synchronise on tha napi of each CPU /* We have to synchronise on tha napi of each CPU
@ -2959,6 +2965,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE); MVNETA_CAUSE_PSC_SYNC_CHANGE);
netif_tx_start_all_queues(pp->dev); netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
break; break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN: case CPU_DOWN_PREPARE_FROZEN:
@ -2983,7 +2990,9 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
/* Check if a new CPU must be elected now this on is down */ /* Check if a new CPU must be elected now this on is down */
spin_lock(&pp->lock);
mvneta_percpu_elect(pp); mvneta_percpu_elect(pp);
spin_unlock(&pp->lock);
/* Unmask all ethernet port interrupts */ /* Unmask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, mvreg_write(pp, MVNETA_INTR_MISC_MASK,
@ -3027,7 +3036,7 @@ static int mvneta_open(struct net_device *dev)
*/ */
on_each_cpu(mvneta_percpu_enable, pp, true); on_each_cpu(mvneta_percpu_enable, pp, true);
pp->is_stopped = false;
/* Register a CPU notifier to handle the case where our CPU /* Register a CPU notifier to handle the case where our CPU
* might be taken offline. * might be taken offline.
*/ */
@ -3060,9 +3069,18 @@ static int mvneta_stop(struct net_device *dev)
{ {
struct mvneta_port *pp = netdev_priv(dev); struct mvneta_port *pp = netdev_priv(dev);
/* Inform that we are stopping so we don't want to setup the
* driver for new CPUs in the notifiers
*/
spin_lock(&pp->lock);
pp->is_stopped = true;
mvneta_stop_dev(pp); mvneta_stop_dev(pp);
mvneta_mdio_remove(pp); mvneta_mdio_remove(pp);
unregister_cpu_notifier(&pp->cpu_notifier); unregister_cpu_notifier(&pp->cpu_notifier);
/* Now that the notifier are unregistered, we can release le
* lock
*/
spin_unlock(&pp->lock);
on_each_cpu(mvneta_percpu_disable, pp, true); on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports); free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp); mvneta_cleanup_rxqs(pp);
@ -3333,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_PORT_CONFIG, val); mvreg_write(pp, MVNETA_PORT_CONFIG, val);
/* Update the elected CPU matching the new rxq_def */ /* Update the elected CPU matching the new rxq_def */
spin_lock(&pp->lock);
mvneta_percpu_elect(pp); mvneta_percpu_elect(pp);
spin_unlock(&pp->lock);
/* We have to synchronise on the napi of each CPU */ /* We have to synchronise on the napi of each CPU */
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {