Merge branch 'xsk-improvements-to-RX-queue-check-and-replace'
Jakub Kicinski says: ==================== xsk: improvements to RX queue check and replace First 3 patches of my recent RFC. The first one make the check against real_num_rx_queues slightly more reliable, while the latter two redefine XDP_QUERY_XSK_UMEM slightly to disallow replacing UMEM in the driver at the stack level. I'm not sure where this lays on the bpf vs net trees scale, but there should be no conflicts with either tree. ==================== Acked-by: Björn Töpel <bjorn.topel@intel.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6293e4d674
|
@ -872,10 +872,10 @@ struct netdev_bpf {
|
|||
struct {
|
||||
struct bpf_offloaded_map *offmap;
|
||||
};
|
||||
/* XDP_SETUP_XSK_UMEM */
|
||||
/* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
|
||||
struct {
|
||||
struct xdp_umem *umem;
|
||||
u16 queue_id;
|
||||
struct xdp_umem *umem; /* out for query*/
|
||||
u16 queue_id; /* in for query */
|
||||
} xsk;
|
||||
};
|
||||
};
|
||||
|
@ -3431,8 +3431,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
|
|||
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
|
||||
#else
|
||||
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
|
||||
unsigned int rxq)
|
||||
unsigned int rxqs)
|
||||
{
|
||||
dev->real_num_rx_queues = rxqs;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -3567,6 +3568,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
|
|||
int fd, u32 flags);
|
||||
u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
|
||||
enum bpf_netdev_command cmd);
|
||||
int xdp_umem_query(struct net_device *dev, u16 queue_id);
|
||||
|
||||
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
#include "xdp_umem.h"
|
||||
#include "xsk_queue.h"
|
||||
|
@ -40,6 +42,21 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
|
|||
}
|
||||
}
|
||||
|
||||
int xdp_umem_query(struct net_device *dev, u16 queue_id)
|
||||
{
|
||||
struct netdev_bpf bpf;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
memset(&bpf, 0, sizeof(bpf));
|
||||
bpf.command = XDP_QUERY_XSK_UMEM;
|
||||
bpf.xsk.queue_id = queue_id;
|
||||
|
||||
if (!dev->netdev_ops->ndo_bpf)
|
||||
return 0;
|
||||
return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
|
||||
}
|
||||
|
||||
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
||||
u32 queue_id, u16 flags)
|
||||
{
|
||||
|
@ -56,41 +73,36 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
|||
if (force_copy)
|
||||
return 0;
|
||||
|
||||
dev_hold(dev);
|
||||
if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
|
||||
return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
|
||||
|
||||
if (dev->netdev_ops->ndo_bpf && dev->netdev_ops->ndo_xsk_async_xmit) {
|
||||
bpf.command = XDP_QUERY_XSK_UMEM;
|
||||
bpf.command = XDP_QUERY_XSK_UMEM;
|
||||
|
||||
rtnl_lock();
|
||||
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
|
||||
rtnl_unlock();
|
||||
|
||||
if (err) {
|
||||
dev_put(dev);
|
||||
return force_zc ? -ENOTSUPP : 0;
|
||||
}
|
||||
|
||||
bpf.command = XDP_SETUP_XSK_UMEM;
|
||||
bpf.xsk.umem = umem;
|
||||
bpf.xsk.queue_id = queue_id;
|
||||
|
||||
rtnl_lock();
|
||||
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
|
||||
rtnl_unlock();
|
||||
|
||||
if (err) {
|
||||
dev_put(dev);
|
||||
return force_zc ? err : 0; /* fail or fallback */
|
||||
}
|
||||
|
||||
umem->dev = dev;
|
||||
umem->queue_id = queue_id;
|
||||
umem->zc = true;
|
||||
return 0;
|
||||
rtnl_lock();
|
||||
err = xdp_umem_query(dev, queue_id);
|
||||
if (err) {
|
||||
err = err < 0 ? -ENOTSUPP : -EBUSY;
|
||||
goto err_rtnl_unlock;
|
||||
}
|
||||
|
||||
dev_put(dev);
|
||||
return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
|
||||
bpf.command = XDP_SETUP_XSK_UMEM;
|
||||
bpf.xsk.umem = umem;
|
||||
bpf.xsk.queue_id = queue_id;
|
||||
|
||||
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
|
||||
if (err)
|
||||
goto err_rtnl_unlock;
|
||||
rtnl_unlock();
|
||||
|
||||
dev_hold(dev);
|
||||
umem->dev = dev;
|
||||
umem->queue_id = queue_id;
|
||||
umem->zc = true;
|
||||
return 0;
|
||||
|
||||
err_rtnl_unlock:
|
||||
rtnl_unlock();
|
||||
return force_zc ? err : 0; /* fail or fallback */
|
||||
}
|
||||
|
||||
static void xdp_umem_clear_dev(struct xdp_umem *umem)
|
||||
|
|
Loading…
Reference in New Issue