2019-05-19 21:51:43 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2011-03-15 08:06:18 +08:00
|
|
|
/*
|
|
|
|
* Xenbus code for netif backend
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
|
|
|
|
* Copyright (C) 2005 XenSource Ltd
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "common.h"
|
2014-06-04 17:30:42 +08:00
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2016-05-13 16:37:26 +08:00
|
|
|
static int connect_data_rings(struct backend_info *be,
|
|
|
|
struct xenvif_queue *queue);
|
2014-06-04 17:30:42 +08:00
|
|
|
static void connect(struct backend_info *be);
|
|
|
|
static int read_xenbus_vif_flags(struct backend_info *be);
|
2014-11-24 18:58:00 +08:00
|
|
|
static int backend_create_xenvif(struct backend_info *be);
|
2011-03-15 08:06:18 +08:00
|
|
|
static void unregister_hotplug_status_watch(struct backend_info *be);
|
2015-03-19 18:05:42 +08:00
|
|
|
static void xen_unregister_watchers(struct xenvif *vif);
|
2013-10-07 20:55:19 +08:00
|
|
|
static void set_backend_state(struct backend_info *be,
|
|
|
|
enum xenbus_state state);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2014-07-09 02:49:14 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
struct dentry *xen_netback_dbg_root = NULL;
|
|
|
|
|
|
|
|
static int xenvif_read_io_ring(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct xenvif_queue *queue = m->private;
|
|
|
|
struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
|
|
|
|
struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
|
2014-10-22 21:08:54 +08:00
|
|
|
struct netdev_queue *dev_queue;
|
2014-07-09 02:49:14 +08:00
|
|
|
|
|
|
|
if (tx_ring->sring) {
|
|
|
|
struct xen_netif_tx_sring *sring = tx_ring->sring;
|
|
|
|
|
|
|
|
seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
|
|
|
|
tx_ring->nr_ents);
|
|
|
|
seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
|
|
|
|
sring->req_prod,
|
|
|
|
sring->req_prod - sring->rsp_prod,
|
|
|
|
tx_ring->req_cons,
|
|
|
|
tx_ring->req_cons - sring->rsp_prod,
|
|
|
|
sring->req_event,
|
|
|
|
sring->req_event - sring->rsp_prod);
|
|
|
|
seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
|
|
|
|
sring->rsp_prod,
|
|
|
|
tx_ring->rsp_prod_pvt,
|
|
|
|
tx_ring->rsp_prod_pvt - sring->rsp_prod,
|
|
|
|
sring->rsp_event,
|
|
|
|
sring->rsp_event - sring->rsp_prod);
|
|
|
|
seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
|
|
|
|
queue->pending_prod,
|
|
|
|
queue->pending_cons,
|
|
|
|
nr_pending_reqs(queue));
|
|
|
|
seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
|
|
|
|
queue->dealloc_prod,
|
|
|
|
queue->dealloc_cons,
|
|
|
|
queue->dealloc_prod - queue->dealloc_cons);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx_ring->sring) {
|
|
|
|
struct xen_netif_rx_sring *sring = rx_ring->sring;
|
|
|
|
|
|
|
|
seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
|
|
|
|
seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
|
|
|
|
sring->req_prod,
|
|
|
|
sring->req_prod - sring->rsp_prod,
|
|
|
|
rx_ring->req_cons,
|
|
|
|
rx_ring->req_cons - sring->rsp_prod,
|
|
|
|
sring->req_event,
|
|
|
|
sring->req_event - sring->rsp_prod);
|
|
|
|
seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
|
|
|
|
sring->rsp_prod,
|
|
|
|
rx_ring->rsp_prod_pvt,
|
|
|
|
rx_ring->rsp_prod_pvt - sring->rsp_prod,
|
|
|
|
sring->rsp_event,
|
|
|
|
sring->rsp_event - sring->rsp_prod);
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
|
|
|
|
"Credit timer_pending: %d, credit: %lu, usec: %lu\n"
|
|
|
|
"remaining: %lu, expires: %lu, now: %lu\n",
|
|
|
|
queue->napi.state, queue->napi.weight,
|
|
|
|
skb_queue_len(&queue->tx_queue),
|
|
|
|
timer_pending(&queue->credit_timeout),
|
|
|
|
queue->credit_bytes,
|
|
|
|
queue->credit_usec,
|
|
|
|
queue->remaining_credit,
|
|
|
|
queue->credit_timeout.expires,
|
|
|
|
jiffies);
|
|
|
|
|
2014-10-22 21:08:54 +08:00
|
|
|
dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
|
|
|
|
|
|
|
|
seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
|
|
|
|
queue->rx_queue_len, queue->rx_queue_max,
|
|
|
|
skb_queue_len(&queue->rx_queue),
|
|
|
|
netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
|
|
|
|
|
2014-07-09 02:49:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define XENVIF_KICK_STR "kick"
|
2014-08-12 18:59:29 +08:00
|
|
|
#define BUFFER_SIZE 32
|
2014-07-09 02:49:14 +08:00
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct xenvif_queue *queue =
|
|
|
|
((struct seq_file *)filp->private_data)->private;
|
|
|
|
int len;
|
2014-08-12 18:59:29 +08:00
|
|
|
char write[BUFFER_SIZE];
|
2014-07-09 02:49:14 +08:00
|
|
|
|
|
|
|
/* don't allow partial writes and check the length */
|
|
|
|
if (*ppos != 0)
|
|
|
|
return 0;
|
2014-08-12 18:59:29 +08:00
|
|
|
if (count >= sizeof(write))
|
2014-07-09 02:49:14 +08:00
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
len = simple_write_to_buffer(write,
|
2014-08-12 18:59:29 +08:00
|
|
|
sizeof(write) - 1,
|
2014-07-09 02:49:14 +08:00
|
|
|
ppos,
|
|
|
|
buf,
|
|
|
|
count);
|
|
|
|
if (len < 0)
|
|
|
|
return len;
|
|
|
|
|
2014-08-12 18:59:29 +08:00
|
|
|
write[len] = '\0';
|
|
|
|
|
2014-07-09 02:49:14 +08:00
|
|
|
if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
|
|
|
|
xenvif_interrupt(0, (void *)queue);
|
|
|
|
else {
|
|
|
|
pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
|
|
|
|
queue->id);
|
|
|
|
count = -EINVAL;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-10-10 16:30:53 +08:00
|
|
|
static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
|
2014-07-09 02:49:14 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
void *queue = NULL;
|
|
|
|
|
|
|
|
if (inode->i_private)
|
|
|
|
queue = inode->i_private;
|
|
|
|
ret = single_open(filp, xenvif_read_io_ring, queue);
|
|
|
|
filp->f_mode |= FMODE_PWRITE;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
2016-10-10 16:30:53 +08:00
|
|
|
.open = xenvif_io_ring_open,
|
2014-07-09 02:49:14 +08:00
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.write = xenvif_write_io_ring,
|
|
|
|
};
|
|
|
|
|
2018-12-10 23:53:29 +08:00
|
|
|
static int xenvif_ctrl_show(struct seq_file *m, void *v)
|
2016-10-10 16:30:53 +08:00
|
|
|
{
|
|
|
|
struct xenvif *vif = m->private;
|
|
|
|
|
|
|
|
xenvif_dump_hash_info(vif, m);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-12-10 23:53:29 +08:00
|
|
|
DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
|
2016-10-10 16:30:53 +08:00
|
|
|
|
2014-08-12 18:59:30 +08:00
|
|
|
static void xenvif_debugfs_addif(struct xenvif *vif)
|
2014-07-09 02:49:14 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
|
|
|
|
xen_netback_dbg_root);
|
2019-08-10 18:31:08 +08:00
|
|
|
for (i = 0; i < vif->num_queues; ++i) {
|
|
|
|
char filename[sizeof("io_ring_q") + 4];
|
|
|
|
|
|
|
|
snprintf(filename, sizeof(filename), "io_ring_q%d", i);
|
|
|
|
debugfs_create_file(filename, 0600, vif->xenvif_dbg_root,
|
|
|
|
&vif->queues[i],
|
|
|
|
&xenvif_dbg_io_ring_ops_fops);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vif->ctrl_irq)
|
|
|
|
debugfs_create_file("ctrl", 0400, vif->xenvif_dbg_root, vif,
|
|
|
|
&xenvif_ctrl_fops);
|
2014-07-09 02:49:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void xenvif_debugfs_delif(struct xenvif *vif)
|
|
|
|
{
|
2018-09-08 21:35:06 +08:00
|
|
|
debugfs_remove_recursive(vif->xenvif_dbg_root);
|
2014-07-09 02:49:14 +08:00
|
|
|
vif->xenvif_dbg_root = NULL;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
/*
|
|
|
|
* Handle the creation of the hotplug script environment. We add the script
|
|
|
|
* and vif variables to the environment, for the benefit of the vif-* hotplug
|
|
|
|
* scripts.
|
|
|
|
*/
|
|
|
|
static int netback_uevent(struct xenbus_device *xdev,
|
|
|
|
struct kobj_uevent_env *env)
|
|
|
|
{
|
|
|
|
struct backend_info *be = dev_get_drvdata(&xdev->dev);
|
|
|
|
|
2015-06-01 18:30:24 +08:00
|
|
|
if (!be)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (add_uevent_var(env, "script=%s", be->hotplug_script))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (!be->vif)
|
2011-03-15 08:06:18 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return add_uevent_var(env, "vif=%s", be->vif->dev->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-11-24 18:58:00 +08:00
|
|
|
static int backend_create_xenvif(struct backend_info *be)
|
2011-03-15 08:06:18 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
long handle;
|
|
|
|
struct xenbus_device *dev = be->dev;
|
2014-12-09 19:47:04 +08:00
|
|
|
struct xenvif *vif;
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
if (be->vif != NULL)
|
2014-11-24 18:58:00 +08:00
|
|
|
return 0;
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
|
|
|
|
if (err != 1) {
|
|
|
|
xenbus_dev_fatal(dev, err, "reading handle");
|
2014-11-24 18:58:00 +08:00
|
|
|
return (err < 0) ? err : -EINVAL;
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
|
|
|
|
2014-12-09 19:47:04 +08:00
|
|
|
vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
|
|
|
|
if (IS_ERR(vif)) {
|
|
|
|
err = PTR_ERR(vif);
|
2011-03-15 08:06:18 +08:00
|
|
|
xenbus_dev_fatal(dev, err, "creating interface");
|
2014-11-24 18:58:00 +08:00
|
|
|
return err;
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
2014-12-09 19:47:04 +08:00
|
|
|
be->vif = vif;
|
2019-04-12 14:53:24 +08:00
|
|
|
vif->be = be;
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
|
2014-11-24 18:58:00 +08:00
|
|
|
return 0;
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
|
|
|
|
2013-09-26 19:09:52 +08:00
|
|
|
static void backend_disconnect(struct backend_info *be)
|
2011-03-15 08:06:18 +08:00
|
|
|
{
|
2017-03-02 20:54:25 +08:00
|
|
|
struct xenvif *vif = be->vif;
|
|
|
|
|
|
|
|
if (vif) {
|
2017-03-11 05:36:22 +08:00
|
|
|
unsigned int num_queues = vif->num_queues;
|
2017-01-18 04:49:37 +08:00
|
|
|
unsigned int queue_index;
|
|
|
|
|
2017-03-02 20:54:25 +08:00
|
|
|
xen_unregister_watchers(vif);
|
2014-07-09 02:49:14 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
2017-03-02 20:54:25 +08:00
|
|
|
xenvif_debugfs_delif(vif);
|
2014-07-09 02:49:14 +08:00
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
2017-03-02 20:54:25 +08:00
|
|
|
xenvif_disconnect_data(vif);
|
|
|
|
|
2017-03-11 05:36:22 +08:00
|
|
|
/* At this point some of the handlers may still be active
|
|
|
|
* so we need to have additional synchronization here.
|
|
|
|
*/
|
2017-03-02 20:54:25 +08:00
|
|
|
vif->num_queues = 0;
|
2017-03-11 05:36:22 +08:00
|
|
|
synchronize_net();
|
2017-03-02 20:54:25 +08:00
|
|
|
|
2017-03-11 05:36:22 +08:00
|
|
|
for (queue_index = 0; queue_index < num_queues; ++queue_index)
|
|
|
|
xenvif_deinit_queue(&vif->queues[queue_index]);
|
|
|
|
|
|
|
|
vfree(vif->queues);
|
|
|
|
vif->queues = NULL;
|
2017-03-02 20:54:26 +08:00
|
|
|
|
2017-03-02 20:54:25 +08:00
|
|
|
xenvif_disconnect_ctrl(vif);
|
2014-07-09 02:49:14 +08:00
|
|
|
}
|
2013-09-18 00:46:08 +08:00
|
|
|
}
|
|
|
|
|
2013-09-26 19:09:52 +08:00
|
|
|
static void backend_connect(struct backend_info *be)
|
2013-09-18 00:46:08 +08:00
|
|
|
{
|
2013-09-26 19:09:52 +08:00
|
|
|
if (be->vif)
|
|
|
|
connect(be);
|
|
|
|
}
|
2013-09-18 00:46:08 +08:00
|
|
|
|
2013-09-26 19:09:52 +08:00
|
|
|
static inline void backend_switch_state(struct backend_info *be,
|
|
|
|
enum xenbus_state state)
|
|
|
|
{
|
|
|
|
struct xenbus_device *dev = be->dev;
|
|
|
|
|
|
|
|
pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
|
|
|
|
be->state = state;
|
|
|
|
|
|
|
|
/* If we are waiting for a hotplug script then defer the
|
|
|
|
* actual xenbus state change.
|
|
|
|
*/
|
|
|
|
if (!be->have_hotplug_status_watch)
|
|
|
|
xenbus_switch_state(dev, state);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle backend state transitions:
|
|
|
|
*
|
2016-09-15 23:10:46 +08:00
|
|
|
* The backend state starts in Initialising and the following transitions are
|
2013-09-26 19:09:52 +08:00
|
|
|
* allowed.
|
|
|
|
*
|
2016-09-15 23:10:46 +08:00
|
|
|
* Initialising -> InitWait -> Connected
|
|
|
|
* \
|
|
|
|
* \ ^ \ |
|
|
|
|
* \ | \ |
|
|
|
|
* \ | \ |
|
|
|
|
* \ | \ |
|
|
|
|
* \ | \ |
|
|
|
|
* \ | \ |
|
|
|
|
* V | V V
|
2013-09-26 19:09:52 +08:00
|
|
|
*
|
2016-09-15 23:10:46 +08:00
|
|
|
* Closed <-> Closing
|
2013-09-26 19:09:52 +08:00
|
|
|
*
|
|
|
|
* The state argument specifies the eventual state of the backend and the
|
|
|
|
* function transitions to that state via the shortest path.
|
|
|
|
*/
|
|
|
|
static void set_backend_state(struct backend_info *be,
|
|
|
|
enum xenbus_state state)
|
|
|
|
{
|
|
|
|
while (be->state != state) {
|
|
|
|
switch (be->state) {
|
2016-09-15 23:10:46 +08:00
|
|
|
case XenbusStateInitialising:
|
|
|
|
switch (state) {
|
|
|
|
case XenbusStateInitWait:
|
|
|
|
case XenbusStateConnected:
|
|
|
|
case XenbusStateClosing:
|
|
|
|
backend_switch_state(be, XenbusStateInitWait);
|
|
|
|
break;
|
|
|
|
case XenbusStateClosed:
|
|
|
|
backend_switch_state(be, XenbusStateClosed);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
break;
|
2013-09-26 19:09:52 +08:00
|
|
|
case XenbusStateClosed:
|
|
|
|
switch (state) {
|
|
|
|
case XenbusStateInitWait:
|
|
|
|
case XenbusStateConnected:
|
|
|
|
backend_switch_state(be, XenbusStateInitWait);
|
|
|
|
break;
|
|
|
|
case XenbusStateClosing:
|
|
|
|
backend_switch_state(be, XenbusStateClosing);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XenbusStateInitWait:
|
|
|
|
switch (state) {
|
|
|
|
case XenbusStateConnected:
|
|
|
|
backend_connect(be);
|
|
|
|
backend_switch_state(be, XenbusStateConnected);
|
|
|
|
break;
|
|
|
|
case XenbusStateClosing:
|
|
|
|
case XenbusStateClosed:
|
|
|
|
backend_switch_state(be, XenbusStateClosing);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XenbusStateConnected:
|
|
|
|
switch (state) {
|
|
|
|
case XenbusStateInitWait:
|
|
|
|
case XenbusStateClosing:
|
|
|
|
case XenbusStateClosed:
|
|
|
|
backend_disconnect(be);
|
|
|
|
backend_switch_state(be, XenbusStateClosing);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XenbusStateClosing:
|
|
|
|
switch (state) {
|
|
|
|
case XenbusStateInitWait:
|
|
|
|
case XenbusStateConnected:
|
|
|
|
case XenbusStateClosed:
|
|
|
|
backend_switch_state(be, XenbusStateClosed);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:13:29 +08:00
|
|
|
static void read_xenbus_frontend_xdp(struct backend_info *be,
|
|
|
|
struct xenbus_device *dev)
|
|
|
|
{
|
|
|
|
struct xenvif *vif = be->vif;
|
|
|
|
u16 headroom;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = xenbus_scanf(XBT_NIL, dev->otherend,
|
|
|
|
"xdp-headroom", "%hu", &headroom);
|
|
|
|
if (err != 1) {
|
|
|
|
vif->xdp_headroom = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (headroom > XEN_NETIF_MAX_XDP_HEADROOM)
|
|
|
|
headroom = XEN_NETIF_MAX_XDP_HEADROOM;
|
|
|
|
vif->xdp_headroom = headroom;
|
|
|
|
}
|
|
|
|
|
2021-01-16 04:09:00 +08:00
|
|
|
/*
|
2011-03-15 08:06:18 +08:00
|
|
|
* Callback received when the frontend's state changes.
|
|
|
|
*/
|
|
|
|
static void frontend_changed(struct xenbus_device *dev,
|
|
|
|
enum xenbus_state frontend_state)
|
|
|
|
{
|
|
|
|
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
|
|
|
|
2013-09-26 19:09:52 +08:00
|
|
|
pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
be->frontend_state = frontend_state;
|
|
|
|
|
|
|
|
switch (frontend_state) {
|
|
|
|
case XenbusStateInitialising:
|
2013-09-26 19:09:52 +08:00
|
|
|
set_backend_state(be, XenbusStateInitWait);
|
2011-03-15 08:06:18 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case XenbusStateInitialised:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XenbusStateConnected:
|
2013-09-26 19:09:52 +08:00
|
|
|
set_backend_state(be, XenbusStateConnected);
|
2011-03-15 08:06:18 +08:00
|
|
|
break;
|
|
|
|
|
2020-06-29 21:13:29 +08:00
|
|
|
case XenbusStateReconfiguring:
|
|
|
|
read_xenbus_frontend_xdp(be, dev);
|
|
|
|
xenbus_switch_state(dev, XenbusStateReconfigured);
|
|
|
|
break;
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
case XenbusStateClosing:
|
2013-09-26 19:09:52 +08:00
|
|
|
set_backend_state(be, XenbusStateClosing);
|
2011-03-15 08:06:18 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case XenbusStateClosed:
|
2013-09-26 19:09:52 +08:00
|
|
|
set_backend_state(be, XenbusStateClosed);
|
2011-03-15 08:06:18 +08:00
|
|
|
if (xenbus_dev_is_online(dev))
|
|
|
|
break;
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough; /* if not online */
|
2011-03-15 08:06:18 +08:00
|
|
|
case XenbusStateUnknown:
|
2013-09-26 19:09:52 +08:00
|
|
|
set_backend_state(be, XenbusStateClosed);
|
2011-03-15 08:06:18 +08:00
|
|
|
device_unregister(&dev->dev);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
|
|
|
|
frontend_state);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void xen_net_read_rate(struct xenbus_device *dev,
|
|
|
|
unsigned long *bytes, unsigned long *usec)
|
|
|
|
{
|
|
|
|
char *s, *e;
|
|
|
|
unsigned long b, u;
|
|
|
|
char *ratestr;
|
|
|
|
|
|
|
|
/* Default to unlimited bandwidth. */
|
|
|
|
*bytes = ~0UL;
|
|
|
|
*usec = 0;
|
|
|
|
|
|
|
|
ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
|
|
|
|
if (IS_ERR(ratestr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
s = ratestr;
|
|
|
|
b = simple_strtoul(s, &e, 10);
|
|
|
|
if ((s == e) || (*e != ','))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
s = e + 1;
|
|
|
|
u = simple_strtoul(s, &e, 10);
|
|
|
|
if ((s == e) || (*e != '\0'))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
*bytes = b;
|
|
|
|
*usec = u;
|
|
|
|
|
|
|
|
kfree(ratestr);
|
|
|
|
return;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
|
|
|
|
kfree(ratestr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
|
|
|
|
{
|
|
|
|
char *s, *e, *macstr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
|
|
|
|
if (IS_ERR(macstr))
|
|
|
|
return PTR_ERR(macstr);
|
|
|
|
|
|
|
|
for (i = 0; i < ETH_ALEN; i++) {
|
|
|
|
mac[i] = simple_strtoul(s, &e, 16);
|
|
|
|
if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
|
|
|
|
kfree(macstr);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
s = e+1;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(macstr);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-19 18:05:42 +08:00
|
|
|
static void xen_net_rate_changed(struct xenbus_watch *watch,
|
2017-02-09 21:39:57 +08:00
|
|
|
const char *path, const char *token)
|
2015-03-19 18:05:42 +08:00
|
|
|
{
|
|
|
|
struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
|
|
|
|
struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
|
|
|
|
unsigned long credit_bytes;
|
|
|
|
unsigned long credit_usec;
|
|
|
|
unsigned int queue_index;
|
|
|
|
|
|
|
|
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
|
|
|
|
for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
|
|
|
|
struct xenvif_queue *queue = &vif->queues[queue_index];
|
|
|
|
|
|
|
|
queue->credit_bytes = credit_bytes;
|
|
|
|
queue->credit_usec = credit_usec;
|
|
|
|
if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
|
|
|
|
queue->remaining_credit > queue->credit_bytes) {
|
|
|
|
queue->remaining_credit = queue->credit_bytes;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-02 19:55:05 +08:00
|
|
|
static int xen_register_credit_watch(struct xenbus_device *dev,
|
|
|
|
struct xenvif *vif)
|
2015-03-19 18:05:42 +08:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
char *node;
|
|
|
|
unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
|
|
|
|
|
2015-06-19 20:21:51 +08:00
|
|
|
if (vif->credit_watch.node)
|
|
|
|
return -EADDRINUSE;
|
|
|
|
|
2015-03-19 18:05:42 +08:00
|
|
|
node = kmalloc(maxlen, GFP_KERNEL);
|
|
|
|
if (!node)
|
|
|
|
return -ENOMEM;
|
|
|
|
snprintf(node, maxlen, "%s/rate", dev->nodename);
|
|
|
|
vif->credit_watch.node = node;
|
xen/xenbus: Allow watches discard events before queueing
If handling logics of watch events are slower than the events enqueue
logic and the events can be created from the guests, the guests could
trigger memory pressure by intensively inducing the events, because it
will create a huge number of pending events that exhausting the memory.
Fortunately, some watch events could be ignored, depending on its
handler callback. For example, if the callback has interest in only one
single path, the watch wouldn't want multiple pending events. Or, some
watches could ignore events to same path.
To let such watches to volutarily help avoiding the memory pressure
situation, this commit introduces new watch callback, 'will_handle'. If
it is not NULL, it will be called for each new event just before
enqueuing it. Then, if the callback returns false, the event will be
discarded. No watch is using the callback for now, though.
This is part of XSA-349
Cc: stable@vger.kernel.org
Signed-off-by: SeongJae Park <sjpark@amazon.de>
Reported-by: Michael Kurth <mku@amazon.de>
Reported-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
Reviewed-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2020-12-14 17:02:45 +08:00
|
|
|
vif->credit_watch.will_handle = NULL;
|
2015-03-19 18:05:42 +08:00
|
|
|
vif->credit_watch.callback = xen_net_rate_changed;
|
|
|
|
err = register_xenbus_watch(&vif->credit_watch);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
|
|
|
|
kfree(node);
|
|
|
|
vif->credit_watch.node = NULL;
|
xen/xenbus: Allow watches discard events before queueing
If handling logics of watch events are slower than the events enqueue
logic and the events can be created from the guests, the guests could
trigger memory pressure by intensively inducing the events, because it
will create a huge number of pending events that exhausting the memory.
Fortunately, some watch events could be ignored, depending on its
handler callback. For example, if the callback has interest in only one
single path, the watch wouldn't want multiple pending events. Or, some
watches could ignore events to same path.
To let such watches to volutarily help avoiding the memory pressure
situation, this commit introduces new watch callback, 'will_handle'. If
it is not NULL, it will be called for each new event just before
enqueuing it. Then, if the callback returns false, the event will be
discarded. No watch is using the callback for now, though.
This is part of XSA-349
Cc: stable@vger.kernel.org
Signed-off-by: SeongJae Park <sjpark@amazon.de>
Reported-by: Michael Kurth <mku@amazon.de>
Reported-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
Reviewed-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2020-12-14 17:02:45 +08:00
|
|
|
vif->credit_watch.will_handle = NULL;
|
2015-03-19 18:05:42 +08:00
|
|
|
vif->credit_watch.callback = NULL;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-02-02 19:55:05 +08:00
|
|
|
static void xen_unregister_credit_watch(struct xenvif *vif)
|
2015-03-19 18:05:42 +08:00
|
|
|
{
|
|
|
|
if (vif->credit_watch.node) {
|
|
|
|
unregister_xenbus_watch(&vif->credit_watch);
|
|
|
|
kfree(vif->credit_watch.node);
|
|
|
|
vif->credit_watch.node = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-02 19:55:05 +08:00
|
|
|
static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
|
2017-02-09 21:39:57 +08:00
|
|
|
const char *path, const char *token)
|
2016-02-02 19:55:05 +08:00
|
|
|
{
|
|
|
|
struct xenvif *vif = container_of(watch, struct xenvif,
|
|
|
|
mcast_ctrl_watch);
|
|
|
|
struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
|
|
|
|
|
2016-10-31 21:58:41 +08:00
|
|
|
vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
|
|
|
|
"request-multicast-control", 0);
|
2016-02-02 19:55:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
|
|
|
|
struct xenvif *vif)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
char *node;
|
|
|
|
unsigned maxlen = strlen(dev->otherend) +
|
|
|
|
sizeof("/request-multicast-control");
|
|
|
|
|
|
|
|
if (vif->mcast_ctrl_watch.node) {
|
|
|
|
pr_err_ratelimited("Watch is already registered\n");
|
|
|
|
return -EADDRINUSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
node = kmalloc(maxlen, GFP_KERNEL);
|
|
|
|
if (!node) {
|
|
|
|
pr_err("Failed to allocate memory for watch\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
snprintf(node, maxlen, "%s/request-multicast-control",
|
|
|
|
dev->otherend);
|
|
|
|
vif->mcast_ctrl_watch.node = node;
|
xen/xenbus: Allow watches discard events before queueing
If handling logics of watch events are slower than the events enqueue
logic and the events can be created from the guests, the guests could
trigger memory pressure by intensively inducing the events, because it
will create a huge number of pending events that exhausting the memory.
Fortunately, some watch events could be ignored, depending on its
handler callback. For example, if the callback has interest in only one
single path, the watch wouldn't want multiple pending events. Or, some
watches could ignore events to same path.
To let such watches to volutarily help avoiding the memory pressure
situation, this commit introduces new watch callback, 'will_handle'. If
it is not NULL, it will be called for each new event just before
enqueuing it. Then, if the callback returns false, the event will be
discarded. No watch is using the callback for now, though.
This is part of XSA-349
Cc: stable@vger.kernel.org
Signed-off-by: SeongJae Park <sjpark@amazon.de>
Reported-by: Michael Kurth <mku@amazon.de>
Reported-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
Reviewed-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2020-12-14 17:02:45 +08:00
|
|
|
vif->mcast_ctrl_watch.will_handle = NULL;
|
2016-02-02 19:55:05 +08:00
|
|
|
vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
|
|
|
|
err = register_xenbus_watch(&vif->mcast_ctrl_watch);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Failed to set watcher %s\n",
|
|
|
|
vif->mcast_ctrl_watch.node);
|
|
|
|
kfree(node);
|
|
|
|
vif->mcast_ctrl_watch.node = NULL;
|
xen/xenbus: Allow watches discard events before queueing
If handling logics of watch events are slower than the events enqueue
logic and the events can be created from the guests, the guests could
trigger memory pressure by intensively inducing the events, because it
will create a huge number of pending events that exhausting the memory.
Fortunately, some watch events could be ignored, depending on its
handler callback. For example, if the callback has interest in only one
single path, the watch wouldn't want multiple pending events. Or, some
watches could ignore events to same path.
To let such watches to volutarily help avoiding the memory pressure
situation, this commit introduces new watch callback, 'will_handle'. If
it is not NULL, it will be called for each new event just before
enqueuing it. Then, if the callback returns false, the event will be
discarded. No watch is using the callback for now, though.
This is part of XSA-349
Cc: stable@vger.kernel.org
Signed-off-by: SeongJae Park <sjpark@amazon.de>
Reported-by: Michael Kurth <mku@amazon.de>
Reported-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
Reviewed-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2020-12-14 17:02:45 +08:00
|
|
|
vif->mcast_ctrl_watch.will_handle = NULL;
|
2016-02-02 19:55:05 +08:00
|
|
|
vif->mcast_ctrl_watch.callback = NULL;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
|
|
|
|
{
|
|
|
|
if (vif->mcast_ctrl_watch.node) {
|
|
|
|
unregister_xenbus_watch(&vif->mcast_ctrl_watch);
|
|
|
|
kfree(vif->mcast_ctrl_watch.node);
|
|
|
|
vif->mcast_ctrl_watch.node = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_register_watchers(struct xenbus_device *dev,
|
|
|
|
struct xenvif *vif)
|
|
|
|
{
|
|
|
|
xen_register_credit_watch(dev, vif);
|
|
|
|
xen_register_mcast_ctrl_watch(dev, vif);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_unregister_watchers(struct xenvif *vif)
|
|
|
|
{
|
|
|
|
xen_unregister_mcast_ctrl_watch(vif);
|
|
|
|
xen_unregister_credit_watch(vif);
|
|
|
|
}
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
static void unregister_hotplug_status_watch(struct backend_info *be)
|
|
|
|
{
|
|
|
|
if (be->have_hotplug_status_watch) {
|
|
|
|
unregister_xenbus_watch(&be->hotplug_status_watch);
|
|
|
|
kfree(be->hotplug_status_watch.node);
|
|
|
|
}
|
|
|
|
be->have_hotplug_status_watch = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hotplug_status_changed(struct xenbus_watch *watch,
|
2017-02-09 21:39:57 +08:00
|
|
|
const char *path,
|
|
|
|
const char *token)
|
2011-03-15 08:06:18 +08:00
|
|
|
{
|
|
|
|
struct backend_info *be = container_of(watch,
|
|
|
|
struct backend_info,
|
|
|
|
hotplug_status_watch);
|
|
|
|
char *str;
|
|
|
|
unsigned int len;
|
|
|
|
|
|
|
|
str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
|
|
|
|
if (IS_ERR(str))
|
|
|
|
return;
|
|
|
|
if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
|
2013-09-26 19:09:52 +08:00
|
|
|
/* Complete any pending state change */
|
|
|
|
xenbus_switch_state(be->dev, be->state);
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
/* Not interested in this watch anymore. */
|
|
|
|
unregister_hotplug_status_watch(be);
|
2019-12-17 21:32:18 +08:00
|
|
|
xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
|
|
|
kfree(str);
|
|
|
|
}
|
|
|
|
|
2016-05-13 16:37:26 +08:00
|
|
|
static int connect_ctrl_ring(struct backend_info *be)
|
|
|
|
{
|
|
|
|
struct xenbus_device *dev = be->dev;
|
|
|
|
struct xenvif *vif = be->vif;
|
|
|
|
unsigned int val;
|
|
|
|
grant_ref_t ring_ref;
|
|
|
|
unsigned int evtchn;
|
|
|
|
int err;
|
|
|
|
|
2016-11-08 15:45:53 +08:00
|
|
|
err = xenbus_scanf(XBT_NIL, dev->otherend,
|
|
|
|
"ctrl-ring-ref", "%u", &val);
|
|
|
|
if (err < 0)
|
2016-05-13 16:37:26 +08:00
|
|
|
goto done; /* The frontend does not have a control ring */
|
|
|
|
|
|
|
|
ring_ref = val;
|
|
|
|
|
2016-11-08 15:45:53 +08:00
|
|
|
err = xenbus_scanf(XBT_NIL, dev->otherend,
|
|
|
|
"event-channel-ctrl", "%u", &val);
|
|
|
|
if (err < 0) {
|
2016-05-13 16:37:26 +08:00
|
|
|
xenbus_dev_fatal(dev, err,
|
|
|
|
"reading %s/event-channel-ctrl",
|
|
|
|
dev->otherend);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
evtchn = val;
|
|
|
|
|
|
|
|
err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
|
|
|
|
if (err) {
|
|
|
|
xenbus_dev_fatal(dev, err,
|
|
|
|
"mapping shared-frame %u port %u",
|
|
|
|
ring_ref, evtchn);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
static void connect(struct backend_info *be)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct xenbus_device *dev = be->dev;
|
2014-06-04 17:30:42 +08:00
|
|
|
unsigned long credit_bytes, credit_usec;
|
|
|
|
unsigned int queue_index;
|
2014-06-04 17:30:43 +08:00
|
|
|
unsigned int requested_num_queues;
|
2014-06-04 17:30:42 +08:00
|
|
|
struct xenvif_queue *queue;
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2014-06-04 17:30:43 +08:00
|
|
|
/* Check whether the frontend requested multiple queues
|
|
|
|
* and read the number requested.
|
|
|
|
*/
|
2016-10-31 21:58:41 +08:00
|
|
|
requested_num_queues = xenbus_read_unsigned(dev->otherend,
|
|
|
|
"multi-queue-num-queues", 1);
|
|
|
|
if (requested_num_queues > xenvif_max_queues) {
|
2014-06-04 17:30:43 +08:00
|
|
|
/* buggy or malicious guest */
|
2016-11-10 16:55:42 +08:00
|
|
|
xenbus_dev_fatal(dev, -EINVAL,
|
2014-06-04 17:30:43 +08:00
|
|
|
"guest requested %u queues, exceeding the maximum of %u.",
|
|
|
|
requested_num_queues, xenvif_max_queues);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
|
|
|
|
if (err) {
|
|
|
|
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-04 17:30:42 +08:00
|
|
|
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
|
2015-06-19 20:21:51 +08:00
|
|
|
xen_unregister_watchers(be->vif);
|
2015-03-19 18:05:42 +08:00
|
|
|
xen_register_watchers(dev, be->vif);
|
2014-06-04 17:30:42 +08:00
|
|
|
read_xenbus_vif_flags(be);
|
|
|
|
|
2016-05-13 16:37:26 +08:00
|
|
|
err = connect_ctrl_ring(be);
|
|
|
|
if (err) {
|
|
|
|
xenbus_dev_fatal(dev, err, "connecting control ring");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-04 17:30:43 +08:00
|
|
|
/* Use the number of queues requested by the frontend */
|
treewide: Use array_size() in vzalloc()
The vzalloc() function has no 2-factor argument form, so multiplication
factors need to be wrapped in array_size(). This patch replaces cases of:
vzalloc(a * b)
with:
vzalloc(array_size(a, b))
as well as handling cases of:
vzalloc(a * b * c)
with:
vzalloc(array3_size(a, b, c))
This does, however, attempt to ignore constant size factors like:
vzalloc(4 * 1024)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
vzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
vzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
vzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
vzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
vzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_ID
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_ID
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
vzalloc(
- SIZE * COUNT
+ array_size(COUNT, SIZE)
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
vzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
vzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
vzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
vzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
vzalloc(C1 * C2 * C3, ...)
|
vzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants.
@@
expression E1, E2;
constant C1, C2;
@@
(
vzalloc(C1 * C2, ...)
|
vzalloc(
- E1 * E2
+ array_size(E1, E2)
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:27:37 +08:00
|
|
|
be->vif->queues = vzalloc(array_size(requested_num_queues,
|
|
|
|
sizeof(struct xenvif_queue)));
|
2015-10-16 02:02:28 +08:00
|
|
|
if (!be->vif->queues) {
|
|
|
|
xenbus_dev_fatal(dev, -ENOMEM,
|
|
|
|
"allocating queues");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-23 17:50:17 +08:00
|
|
|
be->vif->num_queues = requested_num_queues;
|
2014-10-22 21:08:55 +08:00
|
|
|
be->vif->stalled_queues = requested_num_queues;
|
2014-06-04 17:30:42 +08:00
|
|
|
|
|
|
|
for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
|
|
|
|
queue = &be->vif->queues[queue_index];
|
|
|
|
queue->vif = be->vif;
|
|
|
|
queue->id = queue_index;
|
|
|
|
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
|
|
|
|
be->vif->dev->name, queue->id);
|
|
|
|
|
|
|
|
err = xenvif_init_queue(queue);
|
2014-06-04 17:30:43 +08:00
|
|
|
if (err) {
|
|
|
|
/* xenvif_init_queue() cleans up after itself on
|
|
|
|
* failure, but we need to clean up any previously
|
|
|
|
* initialised queues. Set num_queues to i so that
|
|
|
|
* earlier queues can be destroyed using the regular
|
|
|
|
* disconnect logic.
|
|
|
|
*/
|
2014-06-23 17:50:17 +08:00
|
|
|
be->vif->num_queues = queue_index;
|
2014-06-04 17:30:42 +08:00
|
|
|
goto err;
|
2014-06-04 17:30:43 +08:00
|
|
|
}
|
2014-06-04 17:30:42 +08:00
|
|
|
|
2015-05-27 18:44:32 +08:00
|
|
|
queue->credit_bytes = credit_bytes;
|
2014-06-04 17:30:42 +08:00
|
|
|
queue->remaining_credit = credit_bytes;
|
2015-01-06 23:44:44 +08:00
|
|
|
queue->credit_usec = credit_usec;
|
2014-06-04 17:30:42 +08:00
|
|
|
|
2016-05-13 16:37:26 +08:00
|
|
|
err = connect_data_rings(be, queue);
|
2014-06-04 17:30:43 +08:00
|
|
|
if (err) {
|
2016-05-13 16:37:26 +08:00
|
|
|
/* connect_data_rings() cleans up after itself on
|
|
|
|
* failure, but we need to clean up after
|
|
|
|
* xenvif_init_queue() here, and also clean up any
|
|
|
|
* previously initialised queues.
|
2014-06-04 17:30:43 +08:00
|
|
|
*/
|
|
|
|
xenvif_deinit_queue(queue);
|
2014-06-23 17:50:17 +08:00
|
|
|
be->vif->num_queues = queue_index;
|
2014-06-04 17:30:42 +08:00
|
|
|
goto err;
|
2014-06-04 17:30:43 +08:00
|
|
|
}
|
2014-08-12 18:59:30 +08:00
|
|
|
}
|
|
|
|
|
2014-07-09 02:49:14 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
2014-08-12 18:59:30 +08:00
|
|
|
xenvif_debugfs_addif(be->vif);
|
2014-07-09 02:49:14 +08:00
|
|
|
#endif /* CONFIG_DEBUG_FS */
|
2014-06-04 17:30:42 +08:00
|
|
|
|
2014-06-23 17:50:17 +08:00
|
|
|
/* Initialisation completed, tell core driver the number of
|
|
|
|
* active queues.
|
|
|
|
*/
|
|
|
|
rtnl_lock();
|
|
|
|
netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
|
|
|
|
netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
|
|
|
|
rtnl_unlock();
|
|
|
|
|
2014-06-04 17:30:42 +08:00
|
|
|
xenvif_carrier_on(be->vif);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
unregister_hotplug_status_watch(be);
|
2020-12-14 17:04:18 +08:00
|
|
|
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
|
2011-03-15 08:06:18 +08:00
|
|
|
hotplug_status_changed,
|
|
|
|
"%s/%s", dev->nodename, "hotplug-status");
|
2013-09-26 19:09:52 +08:00
|
|
|
if (!err)
|
2011-03-15 08:06:18 +08:00
|
|
|
be->have_hotplug_status_watch = 1;
|
|
|
|
|
2014-06-04 17:30:42 +08:00
|
|
|
netif_tx_wake_all_queues(be->vif->dev);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err:
|
2014-06-23 17:50:17 +08:00
|
|
|
if (be->vif->num_queues > 0)
|
2016-05-13 16:37:26 +08:00
|
|
|
xenvif_disconnect_data(be->vif); /* Clean up existing queues */
|
2017-01-18 04:49:37 +08:00
|
|
|
for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
|
|
|
|
xenvif_deinit_queue(&be->vif->queues[queue_index]);
|
2014-06-04 17:30:42 +08:00
|
|
|
vfree(be->vif->queues);
|
|
|
|
be->vif->queues = NULL;
|
2014-06-23 17:50:17 +08:00
|
|
|
be->vif->num_queues = 0;
|
2016-05-13 16:37:26 +08:00
|
|
|
xenvif_disconnect_ctrl(be->vif);
|
2014-06-04 17:30:42 +08:00
|
|
|
return;
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-13 16:37:26 +08:00
|
|
|
static int connect_data_rings(struct backend_info *be,
|
|
|
|
struct xenvif_queue *queue)
|
2011-03-15 08:06:18 +08:00
|
|
|
{
|
|
|
|
struct xenbus_device *dev = be->dev;
|
2014-06-23 17:50:17 +08:00
|
|
|
unsigned int num_queues = queue->vif->num_queues;
|
2011-03-15 08:06:18 +08:00
|
|
|
unsigned long tx_ring_ref, rx_ring_ref;
|
2014-06-04 17:30:42 +08:00
|
|
|
unsigned int tx_evtchn, rx_evtchn;
|
2011-03-15 08:06:18 +08:00
|
|
|
int err;
|
2014-06-04 17:30:43 +08:00
|
|
|
char *xspath;
|
|
|
|
size_t xspathsize;
|
|
|
|
const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
|
|
|
|
|
|
|
|
/* If the frontend requested 1 queue, or we have fallen back
|
|
|
|
* to single queue due to lack of frontend support for multi-
|
|
|
|
* queue, expect the remaining XenStore keys in the toplevel
|
|
|
|
* directory. Otherwise, expect them in a subdirectory called
|
|
|
|
* queue-N.
|
|
|
|
*/
|
|
|
|
if (num_queues == 1) {
|
|
|
|
xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
|
|
|
|
if (!xspath) {
|
|
|
|
xenbus_dev_fatal(dev, -ENOMEM,
|
|
|
|
"reading ring references");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
strcpy(xspath, dev->otherend);
|
|
|
|
} else {
|
|
|
|
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
|
|
|
|
xspath = kzalloc(xspathsize, GFP_KERNEL);
|
|
|
|
if (!xspath) {
|
|
|
|
xenbus_dev_fatal(dev, -ENOMEM,
|
|
|
|
"reading ring references");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
|
|
|
|
queue->id);
|
|
|
|
}
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2014-06-04 17:30:43 +08:00
|
|
|
err = xenbus_gather(XBT_NIL, xspath,
|
2011-03-15 08:06:18 +08:00
|
|
|
"tx-ring-ref", "%lu", &tx_ring_ref,
|
2013-05-22 14:34:45 +08:00
|
|
|
"rx-ring-ref", "%lu", &rx_ring_ref, NULL);
|
2011-03-15 08:06:18 +08:00
|
|
|
if (err) {
|
|
|
|
xenbus_dev_fatal(dev, err,
|
2013-05-22 14:34:45 +08:00
|
|
|
"reading %s/ring-ref",
|
2014-06-04 17:30:43 +08:00
|
|
|
xspath);
|
|
|
|
goto err;
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
|
|
|
|
2013-05-22 14:34:45 +08:00
|
|
|
/* Try split event channels first, then single event channel. */
|
2014-06-04 17:30:43 +08:00
|
|
|
err = xenbus_gather(XBT_NIL, xspath,
|
2013-05-22 14:34:45 +08:00
|
|
|
"event-channel-tx", "%u", &tx_evtchn,
|
|
|
|
"event-channel-rx", "%u", &rx_evtchn, NULL);
|
|
|
|
if (err < 0) {
|
2014-06-04 17:30:43 +08:00
|
|
|
err = xenbus_scanf(XBT_NIL, xspath,
|
2013-05-22 14:34:45 +08:00
|
|
|
"event-channel", "%u", &tx_evtchn);
|
|
|
|
if (err < 0) {
|
|
|
|
xenbus_dev_fatal(dev, err,
|
|
|
|
"reading %s/event-channel(-tx/rx)",
|
2014-06-04 17:30:43 +08:00
|
|
|
xspath);
|
|
|
|
goto err;
|
2013-05-22 14:34:45 +08:00
|
|
|
}
|
|
|
|
rx_evtchn = tx_evtchn;
|
|
|
|
}
|
|
|
|
|
2014-06-04 17:30:42 +08:00
|
|
|
/* Map the shared frame, irq etc. */
|
2016-05-13 16:37:26 +08:00
|
|
|
err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
|
|
|
|
tx_evtchn, rx_evtchn);
|
2014-06-04 17:30:42 +08:00
|
|
|
if (err) {
|
|
|
|
xenbus_dev_fatal(dev, err,
|
|
|
|
"mapping shared-frames %lu/%lu port tx %u rx %u",
|
|
|
|
tx_ring_ref, rx_ring_ref,
|
|
|
|
tx_evtchn, rx_evtchn);
|
2014-06-04 17:30:43 +08:00
|
|
|
goto err;
|
2014-06-04 17:30:42 +08:00
|
|
|
}
|
|
|
|
|
2014-06-04 17:30:43 +08:00
|
|
|
err = 0;
|
|
|
|
err: /* Regular return falls through with err == 0 */
|
|
|
|
kfree(xspath);
|
|
|
|
return err;
|
2014-06-04 17:30:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int read_xenbus_vif_flags(struct backend_info *be)
|
|
|
|
{
|
|
|
|
struct xenvif *vif = be->vif;
|
|
|
|
struct xenbus_device *dev = be->dev;
|
|
|
|
unsigned int rx_copy;
|
2016-10-31 21:58:41 +08:00
|
|
|
int err;
|
2014-06-04 17:30:42 +08:00
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
|
|
|
|
&rx_copy);
|
|
|
|
if (err == -ENOENT) {
|
|
|
|
err = 0;
|
|
|
|
rx_copy = 0;
|
|
|
|
}
|
|
|
|
if (err < 0) {
|
|
|
|
xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
|
|
|
|
dev->otherend);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
if (!rx_copy)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2016-10-31 21:58:41 +08:00
|
|
|
if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
|
2014-12-18 19:13:06 +08:00
|
|
|
/* - Reduce drain timeout to poll more frequently for
|
|
|
|
* Rx requests.
|
|
|
|
* - Disable Rx stall detection.
|
|
|
|
*/
|
|
|
|
be->vif->drain_timeout = msecs_to_jiffies(30);
|
|
|
|
be->vif->stall_timeout = 0;
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
|
|
|
|
2016-10-31 21:58:41 +08:00
|
|
|
vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2013-10-17 00:50:32 +08:00
|
|
|
vif->gso_mask = 0;
|
|
|
|
|
2016-10-31 21:58:41 +08:00
|
|
|
if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
|
2013-10-17 00:50:32 +08:00
|
|
|
vif->gso_mask |= GSO_BIT(TCPV4);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2016-10-31 21:58:41 +08:00
|
|
|
if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
|
2013-10-17 00:50:32 +08:00
|
|
|
vif->gso_mask |= GSO_BIT(TCPV6);
|
|
|
|
|
2016-10-31 21:58:41 +08:00
|
|
|
vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
|
|
|
|
"feature-no-csum-offload", 0);
|
2013-10-17 00:50:28 +08:00
|
|
|
|
2016-10-31 21:58:41 +08:00
|
|
|
vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
|
|
|
|
"feature-ipv6-csum-offload", 0);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2020-06-29 21:13:29 +08:00
|
|
|
read_xenbus_frontend_xdp(be, dev);
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-12-17 21:32:16 +08:00
|
|
|
static int netback_remove(struct xenbus_device *dev)
|
|
|
|
{
|
|
|
|
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
|
|
|
|
|
|
|
unregister_hotplug_status_watch(be);
|
|
|
|
if (be->vif) {
|
|
|
|
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
2019-12-23 17:59:23 +08:00
|
|
|
backend_disconnect(be);
|
2019-12-17 21:32:16 +08:00
|
|
|
xenvif_free(be->vif);
|
|
|
|
be->vif = NULL;
|
|
|
|
}
|
|
|
|
kfree(be->hotplug_script);
|
|
|
|
kfree(be);
|
|
|
|
dev_set_drvdata(&dev->dev, NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-01-16 04:09:00 +08:00
|
|
|
/*
|
2019-12-17 21:32:16 +08:00
|
|
|
* Entry point to this code when a new device is created. Allocate the basic
|
|
|
|
* structures and switch to InitWait.
|
|
|
|
*/
|
|
|
|
static int netback_probe(struct xenbus_device *dev,
|
|
|
|
const struct xenbus_device_id *id)
|
|
|
|
{
|
|
|
|
const char *message;
|
|
|
|
struct xenbus_transaction xbt;
|
|
|
|
int err;
|
|
|
|
int sg;
|
|
|
|
const char *script;
|
|
|
|
struct backend_info *be = kzalloc(sizeof(*be), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!be) {
|
|
|
|
xenbus_dev_fatal(dev, -ENOMEM,
|
|
|
|
"allocating backend structure");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
be->dev = dev;
|
|
|
|
dev_set_drvdata(&dev->dev, be);
|
|
|
|
|
|
|
|
sg = 1;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = xenbus_transaction_start(&xbt);
|
|
|
|
if (err) {
|
|
|
|
xenbus_dev_fatal(dev, err, "starting transaction");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-sg";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
|
|
|
|
"%d", sg);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-gso-tcpv4";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
|
|
|
|
"%d", sg);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-gso-tcpv6";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We support partial checksum setup for IPv6 packets */
|
|
|
|
err = xenbus_printf(xbt, dev->nodename,
|
|
|
|
"feature-ipv6-csum-offload",
|
|
|
|
"%d", 1);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-ipv6-csum-offload";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We support rx-copy path. */
|
|
|
|
err = xenbus_printf(xbt, dev->nodename,
|
|
|
|
"feature-rx-copy", "%d", 1);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-rx-copy";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
2020-06-29 21:13:29 +08:00
|
|
|
/* we can adjust a headroom for netfront XDP processing */
|
|
|
|
err = xenbus_printf(xbt, dev->nodename,
|
|
|
|
"feature-xdp-headroom", "%d",
|
|
|
|
provides_xdp_headroom);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-xdp-headroom";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
2019-12-17 21:32:16 +08:00
|
|
|
/* We don't support rx-flip path (except old guests who
|
|
|
|
* don't grok this feature flag).
|
|
|
|
*/
|
|
|
|
err = xenbus_printf(xbt, dev->nodename,
|
|
|
|
"feature-rx-flip", "%d", 0);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-rx-flip";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We support dynamic multicast-control. */
|
|
|
|
err = xenbus_printf(xbt, dev->nodename,
|
|
|
|
"feature-multicast-control", "%d", 1);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-multicast-control";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = xenbus_printf(xbt, dev->nodename,
|
|
|
|
"feature-dynamic-multicast-control",
|
|
|
|
"%d", 1);
|
|
|
|
if (err) {
|
|
|
|
message = "writing feature-dynamic-multicast-control";
|
|
|
|
goto abort_transaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = xenbus_transaction_end(xbt, 0);
|
|
|
|
} while (err == -EAGAIN);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
xenbus_dev_fatal(dev, err, "completing transaction");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Split event channels support, this is optional so it is not
|
|
|
|
* put inside the above loop.
|
|
|
|
*/
|
|
|
|
err = xenbus_printf(XBT_NIL, dev->nodename,
|
|
|
|
"feature-split-event-channels",
|
|
|
|
"%u", separate_tx_rx_irq);
|
|
|
|
if (err)
|
|
|
|
pr_debug("Error writing feature-split-event-channels\n");
|
|
|
|
|
|
|
|
/* Multi-queue support: This is an optional feature. */
|
|
|
|
err = xenbus_printf(XBT_NIL, dev->nodename,
|
|
|
|
"multi-queue-max-queues", "%u", xenvif_max_queues);
|
|
|
|
if (err)
|
|
|
|
pr_debug("Error writing multi-queue-max-queues\n");
|
|
|
|
|
|
|
|
err = xenbus_printf(XBT_NIL, dev->nodename,
|
|
|
|
"feature-ctrl-ring",
|
|
|
|
"%u", true);
|
|
|
|
if (err)
|
|
|
|
pr_debug("Error writing feature-ctrl-ring\n");
|
|
|
|
|
2019-12-17 21:32:17 +08:00
|
|
|
backend_switch_state(be, XenbusStateInitWait);
|
|
|
|
|
2019-12-17 21:32:16 +08:00
|
|
|
script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
|
|
|
|
if (IS_ERR(script)) {
|
|
|
|
err = PTR_ERR(script);
|
|
|
|
xenbus_dev_fatal(dev, err, "reading script");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
be->hotplug_script = script;
|
|
|
|
|
|
|
|
/* This kicks hotplug scripts, so do it immediately. */
|
|
|
|
err = backend_create_xenvif(be);
|
|
|
|
if (err)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
abort_transaction:
|
|
|
|
xenbus_transaction_end(xbt, 1);
|
|
|
|
xenbus_dev_fatal(dev, err, "%s", message);
|
|
|
|
fail:
|
|
|
|
pr_debug("failed\n");
|
|
|
|
netback_remove(dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
static const struct xenbus_device_id netback_ids[] = {
|
|
|
|
{ "vif" },
|
|
|
|
{ "" }
|
|
|
|
};
|
|
|
|
|
2014-09-09 00:30:41 +08:00
|
|
|
static struct xenbus_driver netback_driver = {
|
|
|
|
.ids = netback_ids,
|
2011-03-15 08:06:18 +08:00
|
|
|
.probe = netback_probe,
|
|
|
|
.remove = netback_remove,
|
|
|
|
.uevent = netback_uevent,
|
|
|
|
.otherend_changed = frontend_changed,
|
2019-12-23 17:59:23 +08:00
|
|
|
.allow_rebind = true,
|
2014-09-09 00:30:41 +08:00
|
|
|
};
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
int xenvif_xenbus_init(void)
|
|
|
|
{
|
2011-12-22 17:08:13 +08:00
|
|
|
return xenbus_register_backend(&netback_driver);
|
2011-03-15 08:06:18 +08:00
|
|
|
}
|
2013-05-17 07:26:11 +08:00
|
|
|
|
|
|
|
void xenvif_xenbus_fini(void)
|
|
|
|
{
|
|
|
|
return xenbus_unregister_driver(&netback_driver);
|
|
|
|
}
|