sfc: Test all event queues in parallel

In case all event queues are broken for some reason, this means it
will only take about a second to check them all, rather than up to 32
seconds.  This may also speed up testing in the successful case.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
This commit is contained in:
Ben Hutchings 2012-02-28 20:40:54 +00:00
parent 93e5dfa59b
commit ed74f48087
1 changed files with 64 additions and 48 deletions

View File

@ -178,69 +178,88 @@ static int efx_test_interrupts(struct efx_nic *efx,
} }
/* Test generation and receipt of interrupting events */ /* Test generation and receipt of interrupting events */
static int efx_test_eventq_irq(struct efx_channel *channel, static int efx_test_eventq_irq(struct efx_nic *efx,
struct efx_self_tests *tests) struct efx_self_tests *tests)
{ {
struct efx_nic *efx = channel->efx; struct efx_channel *channel;
unsigned int read_ptr; unsigned int read_ptr[EFX_MAX_CHANNELS];
bool napi_ran, dma_seen, int_seen; unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
unsigned long timeout, wait; unsigned long timeout, wait;
read_ptr = channel->eventq_read_ptr; BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
channel->last_irq_cpu = -1;
smp_wmb(); efx_for_each_channel(channel, efx) {
read_ptr[channel->channel] = channel->eventq_read_ptr;
set_bit(channel->channel, &dma_pend);
set_bit(channel->channel, &int_pend);
channel->last_irq_cpu = -1;
smp_wmb();
efx_nic_generate_test_event(channel);
}
efx_nic_generate_test_event(channel);
timeout = jiffies + IRQ_TIMEOUT; timeout = jiffies + IRQ_TIMEOUT;
wait = 1; wait = 1;
/* Wait for arrival of interrupt. NAPI processing may or may /* Wait for arrival of interrupts. NAPI processing may or may
* not complete in time, but we can cope in any case. * not complete in time, but we can cope in any case.
*/ */
do { do {
schedule_timeout_uninterruptible(wait); schedule_timeout_uninterruptible(wait);
napi_disable(&channel->napi_str); efx_for_each_channel(channel, efx) {
if (channel->eventq_read_ptr != read_ptr) { napi_disable(&channel->napi_str);
napi_ran = true; if (channel->eventq_read_ptr !=
dma_seen = true; read_ptr[channel->channel]) {
int_seen = true; set_bit(channel->channel, &napi_ran);
} else { clear_bit(channel->channel, &dma_pend);
napi_ran = false; clear_bit(channel->channel, &int_pend);
dma_seen = efx_nic_event_present(channel); } else {
int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0; if (efx_nic_event_present(channel))
clear_bit(channel->channel, &dma_pend);
if (ACCESS_ONCE(channel->last_irq_cpu) >= 0)
clear_bit(channel->channel, &int_pend);
}
napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel);
} }
napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel);
wait *= 2; wait *= 2;
} while (!(dma_seen && int_seen) && time_before(jiffies, timeout)); } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; efx_for_each_channel(channel, efx) {
tests->eventq_int[channel->channel] = int_seen ? 1 : -1; bool dma_seen = !test_bit(channel->channel, &dma_pend);
bool int_seen = !test_bit(channel->channel, &int_pend);
if (dma_seen && int_seen) { tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
netif_dbg(efx, drv, efx->net_dev, tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
"channel %d event queue passed (with%s NAPI)\n",
channel->channel, napi_ran ? "" : "out"); if (dma_seen && int_seen) {
return 0; netif_dbg(efx, drv, efx->net_dev,
} else { "channel %d event queue passed (with%s NAPI)\n",
/* Report failure and whether either interrupt or DMA worked */ channel->channel,
netif_err(efx, drv, efx->net_dev, test_bit(channel->channel, &napi_ran) ?
"channel %d timed out waiting for event queue\n", "" : "out");
channel->channel); } else {
if (int_seen) /* Report failure and whether either interrupt or DMA
* worked
*/
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"channel %d saw interrupt " "channel %d timed out waiting for event queue\n",
"during event queue test\n",
channel->channel); channel->channel);
if (dma_seen) if (int_seen)
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but " "channel %d saw interrupt "
"failed to trigger an interrupt\n", "during event queue test\n",
channel->channel); channel->channel);
return -ETIMEDOUT; if (dma_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n",
channel->channel);
}
} }
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
} }
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
@ -687,7 +706,6 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
enum efx_loopback_mode loopback_mode = efx->loopback_mode; enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode; int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE; enum reset_type reset_method = RESET_TYPE_INVISIBLE;
struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc; int rc_test = 0, rc_reset = 0, rc;
/* Online (i.e. non-disruptive) testing /* Online (i.e. non-disruptive) testing
@ -705,11 +723,9 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
if (rc && !rc_test) if (rc && !rc_test)
rc_test = rc; rc_test = rc;
efx_for_each_channel(channel, efx) { rc = efx_test_eventq_irq(efx, tests);
rc = efx_test_eventq_irq(channel, tests); if (rc && !rc_test)
if (rc && !rc_test) rc_test = rc;
rc_test = rc;
}
if (rc_test) if (rc_test)
return rc_test; return rc_test;