xen-netback: release pending index before pushing Tx responses

If the pending indexes are released /after/ pushing the Tx response
then a stale pending index may be used if a new Tx request is
immediately pushed by the frontend.  The may cause various WARNINGs or
BUGs if the stale pending index is actually still in use.

Fix this by releasing the pending index before pushing the Tx
response.

The full barrier for the pending ring update is not required since the
the Tx response push already has a suitable write barrier.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David Vrabel 2015-02-24 11:17:59 +00:00 committed by David S. Miller
parent 41a50d621a
commit 7fbb9d8415
1 changed files with 21 additions and 8 deletions

View File

@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
unsigned long flags; unsigned long flags;
do { do {
int notify;
spin_lock_irqsave(&queue->response_lock, flags); spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
spin_unlock_irqrestore(&queue->response_lock, flags); spin_unlock_irqrestore(&queue->response_lock, flags);
if (notify)
notify_remote_via_irq(queue->tx_irq);
if (cons == end) if (cons == end)
break; break;
txp = RING_GET_REQUEST(&queue->tx, cons++); txp = RING_GET_REQUEST(&queue->tx, cons++);
@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
{ {
struct pending_tx_info *pending_tx_info; struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index; pending_ring_idx_t index;
int notify;
unsigned long flags; unsigned long flags;
pending_tx_info = &queue->pending_tx_info[pending_idx]; pending_tx_info = &queue->pending_tx_info[pending_idx];
spin_lock_irqsave(&queue->response_lock, flags); spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, &pending_tx_info->req, status); make_tx_response(queue, &pending_tx_info->req, status);
index = pending_index(queue->pending_prod);
/* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the
* frontend.
*/
index = pending_index(queue->pending_prod++);
queue->pending_ring[index] = pending_idx; queue->pending_ring[index] = pending_idx;
/* TX shouldn't use the index before we give it back here */
mb(); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
queue->pending_prod++;
spin_unlock_irqrestore(&queue->response_lock, flags); spin_unlock_irqrestore(&queue->response_lock, flags);
if (notify)
notify_remote_via_irq(queue->tx_irq);
} }
@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
{ {
RING_IDX i = queue->tx.rsp_prod_pvt; RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp; struct xen_netif_tx_response *resp;
int notify;
resp = RING_GET_RESPONSE(&queue->tx, i); resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id; resp->id = txp->id;
@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i; queue->tx.rsp_prod_pvt = ++i;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
} }
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,