From 869b9b19b3affd81cee853d33c0b124797f3c387 Mon Sep 17 00:00:00 2001 From: Zoltan Kiss Date: Mon, 24 Mar 2014 23:59:49 +0000 Subject: [PATCH] xen-netback: Stop using xenvif_tx_pending_slots_available Since the early days TX stops if there isn't enough free pending slots to consume a maximum sized (slot-wise) packet. Probably the reason for that is to avoid the case when we don't have enough free pending slot in the ring to finish the packet. But if we make sure that the pending ring has the same size as the shared ring, that shouldn't really happen. The frontend can only post packets which fit the to the free space of the shared ring. If it doesn't, the frontend has to stop, as it can only increase the req_prod when the whole packet fits onto the ring. This patch avoid using this checking, makes sure the 2 ring has the same size, and remove a checking from the callback. As now we don't stop the NAPI instance on this condition, we don't have to wake it up if we free pending slots up. Signed-off-by: Zoltan Kiss Signed-off-by: David S. Miller --- drivers/net/xen-netback/common.h | 8 +------- drivers/net/xen-netback/interface.c | 3 +-- drivers/net/xen-netback/netback.c | 13 ++----------- 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 0355f8767e3b..89b2d429c440 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -81,7 +81,7 @@ struct xenvif_rx_meta { #define MAX_BUFFER_OFFSET PAGE_SIZE -#define MAX_PENDING_REQS 256 +#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE /* It's possible for an skb to have a maximal number of frags * but still be less than MAX_BUFFER_OFFSET in size. Thus the @@ -251,12 +251,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) vif->pending_prod + vif->pending_cons; } -static inline bool xenvif_tx_pending_slots_available(struct xenvif *vif) -{ - return nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS; -} - /* Callback from stack when TX packet can be released */ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 23bb2f4b18fe..e71fb1ac5c4d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -88,8 +88,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget) local_irq_save(flags); RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); - if (!(more_to_do && - xenvif_tx_pending_slots_available(vif))) + if (!more_to_do) __napi_complete(napi); local_irq_restore(flags); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1e4628724778..5d2dd1d54173 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1167,8 +1167,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) struct sk_buff *skb; int ret; - while (xenvif_tx_pending_slots_available(vif) && - (skb_queue_len(&vif->tx_queue) < budget)) { + while (skb_queue_len(&vif->tx_queue) < budget) { struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; @@ -1508,13 +1507,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) wake_up(&vif->dealloc_wq); spin_unlock_irqrestore(&vif->callback_lock, flags); - if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx) && - xenvif_tx_pending_slots_available(vif)) { - local_bh_disable(); - napi_schedule(&vif->napi); - local_bh_enable(); - } - if (likely(zerocopy_success)) vif->tx_zerocopy_success++; else @@ -1706,8 +1698,7 @@ static inline int rx_work_todo(struct xenvif *vif) static inline int tx_work_todo(struct xenvif *vif) { - if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && - xenvif_tx_pending_slots_available(vif)) + if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))) return 1; return 0;