mirror of https://gitee.com/openkylin/linux.git
tun: convert to use generic xdp_frame and xdp_return_frame API
The tuntap driver invented it's own driver specific way of queuing XDP packets, by storing the xdp_buff information in the top of the XDP frame data. Convert it over to use the more generic xdp_frame structure. The main problem with the in-driver method is that the xdp_rxq_info pointer cannot be trused/used when dequeueing the frame. V3: Remove check based on feedback from Jason Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c0048cff8a
commit
1ffcbc8537
|
@ -248,11 +248,11 @@ struct veth {
|
|||
__be16 h_vlan_TCI;
|
||||
};
|
||||
|
||||
bool tun_is_xdp_buff(void *ptr)
|
||||
bool tun_is_xdp_frame(void *ptr)
|
||||
{
|
||||
return (unsigned long)ptr & TUN_XDP_FLAG;
|
||||
}
|
||||
EXPORT_SYMBOL(tun_is_xdp_buff);
|
||||
EXPORT_SYMBOL(tun_is_xdp_frame);
|
||||
|
||||
void *tun_xdp_to_ptr(void *ptr)
|
||||
{
|
||||
|
@ -660,10 +660,10 @@ void tun_ptr_free(void *ptr)
|
|||
{
|
||||
if (!ptr)
|
||||
return;
|
||||
if (tun_is_xdp_buff(ptr)) {
|
||||
struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
|
||||
if (tun_is_xdp_frame(ptr)) {
|
||||
struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
|
||||
|
||||
put_page(virt_to_head_page(xdp->data));
|
||||
xdp_return_frame(xdpf->data, &xdpf->mem);
|
||||
} else {
|
||||
__skb_array_destroy_skb(ptr);
|
||||
}
|
||||
|
@ -1298,17 +1298,14 @@ static const struct net_device_ops tun_netdev_ops = {
|
|||
static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
struct xdp_buff *buff = xdp->data_hard_start;
|
||||
int headroom = xdp->data - xdp->data_hard_start;
|
||||
struct xdp_frame *frame;
|
||||
struct tun_file *tfile;
|
||||
u32 numqueues;
|
||||
int ret = 0;
|
||||
|
||||
/* Assure headroom is available and buff is properly aligned */
|
||||
if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
|
||||
return -ENOSPC;
|
||||
|
||||
*buff = *xdp;
|
||||
frame = convert_to_xdp_frame(xdp);
|
||||
if (unlikely(!frame))
|
||||
return -EOVERFLOW;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -1323,7 +1320,7 @@ static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
|
|||
/* Encode the XDP flag into lowest bit for consumer to differ
|
||||
* XDP buffer from sk_buff.
|
||||
*/
|
||||
if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
|
||||
if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(frame))) {
|
||||
this_cpu_inc(tun->pcpu_stats->tx_dropped);
|
||||
ret = -ENOSPC;
|
||||
}
|
||||
|
@ -2001,11 +1998,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
|
||||
static ssize_t tun_put_user_xdp(struct tun_struct *tun,
|
||||
struct tun_file *tfile,
|
||||
struct xdp_buff *xdp,
|
||||
struct xdp_frame *xdp_frame,
|
||||
struct iov_iter *iter)
|
||||
{
|
||||
int vnet_hdr_sz = 0;
|
||||
size_t size = xdp->data_end - xdp->data;
|
||||
size_t size = xdp_frame->len;
|
||||
struct tun_pcpu_stats *stats;
|
||||
size_t ret;
|
||||
|
||||
|
@ -2021,7 +2018,7 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
|
|||
iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
|
||||
}
|
||||
|
||||
ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
|
||||
ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
|
||||
|
||||
stats = get_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
|
@ -2189,11 +2186,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (tun_is_xdp_buff(ptr)) {
|
||||
struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
|
||||
if (tun_is_xdp_frame(ptr)) {
|
||||
struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
|
||||
|
||||
ret = tun_put_user_xdp(tun, tfile, xdp, to);
|
||||
put_page(virt_to_head_page(xdp->data));
|
||||
ret = tun_put_user_xdp(tun, tfile, xdpf, to);
|
||||
xdp_return_frame(xdpf->data, &xdpf->mem);
|
||||
} else {
|
||||
struct sk_buff *skb = ptr;
|
||||
|
||||
|
@ -2432,10 +2429,10 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
|
|||
static int tun_ptr_peek_len(void *ptr)
|
||||
{
|
||||
if (likely(ptr)) {
|
||||
if (tun_is_xdp_buff(ptr)) {
|
||||
struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
|
||||
if (tun_is_xdp_frame(ptr)) {
|
||||
struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
|
||||
|
||||
return xdp->data_end - xdp->data;
|
||||
return xdpf->len;
|
||||
}
|
||||
return __skb_array_len_with_tag(ptr);
|
||||
} else {
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/skbuff.h>
|
||||
|
||||
#include <net/sock.h>
|
||||
#include <net/xdp.h>
|
||||
|
||||
#include "vhost.h"
|
||||
|
||||
|
@ -181,10 +182,10 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
|
|||
|
||||
static int vhost_net_buf_peek_len(void *ptr)
|
||||
{
|
||||
if (tun_is_xdp_buff(ptr)) {
|
||||
struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
|
||||
if (tun_is_xdp_frame(ptr)) {
|
||||
struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
|
||||
|
||||
return xdp->data_end - xdp->data;
|
||||
return xdpf->len;
|
||||
}
|
||||
|
||||
return __skb_array_len_with_tag(ptr);
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
|
||||
struct socket *tun_get_socket(struct file *);
|
||||
struct ptr_ring *tun_get_tx_ring(struct file *file);
|
||||
bool tun_is_xdp_buff(void *ptr);
|
||||
bool tun_is_xdp_frame(void *ptr);
|
||||
void *tun_xdp_to_ptr(void *ptr);
|
||||
void *tun_ptr_to_xdp(void *ptr);
|
||||
void tun_ptr_free(void *ptr);
|
||||
|
@ -39,7 +39,7 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
|
|||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
static inline bool tun_is_xdp_buff(void *ptr)
|
||||
static inline bool tun_is_xdp_frame(void *ptr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue