netvsc: eliminate per-device outstanding send counter

Since now keep track of per-queue outstanding sends, we can avoid
one atomic update by removing no longer needed per-device atomic.

Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
stephen hemminger 2017-01-24 13:06:11 -08:00 committed by David S. Miller
parent 2289f0aa70
commit 46b4f7f5d1
3 changed files with 36 additions and 34 deletions

View File

@ -729,7 +729,6 @@ struct netvsc_channel {
struct netvsc_device {
u32 nvsp_version;
atomic_t num_outstanding_sends;
wait_queue_head_t wait_drain;
bool destroy;

View File

@ -90,6 +90,16 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
kfree(nvdev);
}
static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
u16 q_idx)
{
const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
atomic_read(&nvchan->queue_sends) == 0;
}
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
@ -100,22 +110,6 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
return net_device;
}
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
if (!net_device)
goto get_in_err;
if (net_device->destroy &&
atomic_read(&net_device->num_outstanding_sends) == 0 &&
atomic_read(&net_device->num_outstanding_recvs) == 0)
net_device = NULL;
get_in_err:
return net_device;
}
static void netvsc_destroy_buf(struct hv_device *device)
{
struct nvsp_message *revoke_packet;
@ -612,7 +606,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel;
int num_outstanding_sends;
u16 q_idx = 0;
int queue_sends;
@ -630,13 +623,10 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
dev_consume_skb_any(skb);
}
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
queue_sends =
atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
if (net_device->destroy && num_outstanding_sends == 0)
if (net_device->destroy && queue_sends == 0)
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@ -823,15 +813,10 @@ static inline int netvsc_send_pkt(
}
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
atomic_inc_return(&nvchan->queue_sends);
if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
netif_tx_stop_queue(txq);
if (atomic_read(&nvchan->queue_sends) < 1)
netif_tx_wake_queue(txq);
}
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
if (atomic_read(&nvchan->queue_sends) < 1) {
@ -1259,11 +1244,14 @@ void netvsc_channel_cb(void *context)
else
device = channel->device_obj;
net_device = get_inbound_net_device(device);
if (!net_device)
ndev = hv_get_drvdata(device);
if (unlikely(!ndev))
return;
ndev = hv_get_drvdata(device);
net_device = net_device_to_netvsc_device(ndev);
if (unlikely(net_device->destroy) &&
netvsc_channel_idle(net_device, q_idx))
return;
while ((desc = get_next_pkt_raw(channel)) != NULL) {
netvsc_process_raw_pkt(device, channel, net_device,

View File

@ -903,6 +903,23 @@ static int rndis_filter_init_device(struct rndis_device *dev)
return ret;
}
static bool netvsc_device_idle(const struct netvsc_device *nvdev)
{
int i;
if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
return false;
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
if (atomic_read(&nvchan->queue_sends) > 0)
return false;
}
return true;
}
static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
@ -933,9 +950,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
/* Wait for all send completions */
wait_event(nvdev->wait_drain,
atomic_read(&nvdev->num_outstanding_sends) == 0 &&
atomic_read(&nvdev->num_outstanding_recvs) == 0);
wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
if (request)
put_rndis_request(dev, request);