mirror of https://gitee.com/openkylin/linux.git
virtio-net: support XDP when not more queues
The number of queues implemented by many virtio backends is limited, especially some machines have a large number of CPUs. In this case, it is often impossible to allocate a separate queue for XDP_TX/XDP_REDIRECT, then xdp cannot be loaded to work, even xdp does not use the XDP_TX/XDP_REDIRECT. This patch allows XDP_TX/XDP_REDIRECT to run by reuse the existing SQ with __netif_tx_lock() hold when there are not enough queues. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Reviewed-by: Dust Li <dust.li@linux.alibaba.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0bb3262c02
commit
97c2c69e19
|
@ -195,6 +195,9 @@ struct virtnet_info {
|
|||
/* # of XDP queue pairs currently used by the driver */
|
||||
u16 xdp_queue_pairs;
|
||||
|
||||
/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
|
||||
bool xdp_enabled;
|
||||
|
||||
/* I like... big packets and I cannot lie! */
|
||||
bool big_packets;
|
||||
|
||||
|
@ -481,12 +484,41 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
|
||||
{
|
||||
unsigned int qp;
|
||||
/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
|
||||
* the current cpu, so it does not need to be locked.
|
||||
*
|
||||
* Here we use marco instead of inline functions because we have to deal with
|
||||
* three issues at the same time: 1. the choice of sq. 2. judge and execute the
|
||||
* lock/unlock of txq 3. make sparse happy. It is difficult for two inline
|
||||
* functions to perfectly solve these three problems at the same time.
|
||||
*/
|
||||
#define virtnet_xdp_get_sq(vi) ({ \
|
||||
struct netdev_queue *txq; \
|
||||
typeof(vi) v = (vi); \
|
||||
unsigned int qp; \
|
||||
\
|
||||
if (v->curr_queue_pairs > nr_cpu_ids) { \
|
||||
qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
|
||||
qp += smp_processor_id(); \
|
||||
txq = netdev_get_tx_queue(v->dev, qp); \
|
||||
__netif_tx_acquire(txq); \
|
||||
} else { \
|
||||
qp = smp_processor_id() % v->curr_queue_pairs; \
|
||||
txq = netdev_get_tx_queue(v->dev, qp); \
|
||||
__netif_tx_lock(txq, raw_smp_processor_id()); \
|
||||
} \
|
||||
v->sq + qp; \
|
||||
})
|
||||
|
||||
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
|
||||
return &vi->sq[qp];
|
||||
#define virtnet_xdp_put_sq(vi, q) { \
|
||||
struct netdev_queue *txq; \
|
||||
typeof(vi) v = (vi); \
|
||||
\
|
||||
txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
|
||||
if (v->curr_queue_pairs > nr_cpu_ids) \
|
||||
__netif_tx_release(txq); \
|
||||
else \
|
||||
__netif_tx_unlock(txq); \
|
||||
}
|
||||
|
||||
static int virtnet_xdp_xmit(struct net_device *dev,
|
||||
|
@ -512,7 +544,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|||
if (!xdp_prog)
|
||||
return -ENXIO;
|
||||
|
||||
sq = virtnet_xdp_sq(vi);
|
||||
sq = virtnet_xdp_get_sq(vi);
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
|
||||
ret = -EINVAL;
|
||||
|
@ -560,12 +592,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|||
sq->stats.kicks += kicks;
|
||||
u64_stats_update_end(&sq->stats.syncp);
|
||||
|
||||
virtnet_xdp_put_sq(vi, sq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
|
||||
{
|
||||
return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
|
||||
return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
|
||||
}
|
||||
|
||||
/* We copy the packet for XDP in the following cases:
|
||||
|
@ -1458,12 +1491,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
|
|||
xdp_do_flush();
|
||||
|
||||
if (xdp_xmit & VIRTIO_XDP_TX) {
|
||||
sq = virtnet_xdp_sq(vi);
|
||||
sq = virtnet_xdp_get_sq(vi);
|
||||
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
|
||||
u64_stats_update_begin(&sq->stats.syncp);
|
||||
sq->stats.kicks++;
|
||||
u64_stats_update_end(&sq->stats.syncp);
|
||||
}
|
||||
virtnet_xdp_put_sq(vi, sq);
|
||||
}
|
||||
|
||||
return received;
|
||||
|
@ -2418,10 +2452,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|||
|
||||
/* XDP requires extra queues for XDP_TX */
|
||||
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
|
||||
netdev_warn(dev, "request %i queues but max is %i\n",
|
||||
netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
|
||||
curr_qp + xdp_qp, vi->max_queue_pairs);
|
||||
return -ENOMEM;
|
||||
xdp_qp = 0;
|
||||
}
|
||||
|
||||
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
|
||||
|
@ -2455,11 +2488,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|||
vi->xdp_queue_pairs = xdp_qp;
|
||||
|
||||
if (prog) {
|
||||
vi->xdp_enabled = true;
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
|
||||
if (i == 0 && !old_prog)
|
||||
virtnet_clear_guest_offloads(vi);
|
||||
}
|
||||
} else {
|
||||
vi->xdp_enabled = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
|
@ -2527,7 +2563,7 @@ static int virtnet_set_features(struct net_device *dev,
|
|||
int err;
|
||||
|
||||
if ((dev->features ^ features) & NETIF_F_LRO) {
|
||||
if (vi->xdp_queue_pairs)
|
||||
if (vi->xdp_enabled)
|
||||
return -EBUSY;
|
||||
|
||||
if (features & NETIF_F_LRO)
|
||||
|
|
Loading…
Reference in New Issue