2011-03-15 08:06:18 +08:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License version 2
|
|
|
|
* as published by the Free Software Foundation; or, when distributed
|
|
|
|
* separately from the Linux kernel or incorporated into other
|
|
|
|
* software packages, subject to the following license:
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this source file (the "Software"), to deal in the Software without
|
|
|
|
* restriction, including without limitation the rights to use, copy, modify,
|
|
|
|
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
|
|
|
* and to permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __XEN_NETBACK__COMMON_H__
|
|
|
|
#define __XEN_NETBACK__COMMON_H__
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
#include <xen/interface/io/netif.h>
|
|
|
|
#include <xen/interface/grant_table.h>
|
|
|
|
#include <xen/grant_table.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
|
2013-08-26 19:59:38 +08:00
|
|
|
typedef unsigned int pending_ring_idx_t;
|
|
|
|
#define INVALID_PENDING_RING_IDX (~0U)
|
|
|
|
|
|
|
|
/* For the head field in pending_tx_info: it is used to indicate
|
|
|
|
* whether this tx info is the head of one or more coalesced requests.
|
|
|
|
*
|
|
|
|
* When head != INVALID_PENDING_RING_IDX, it means the start of a new
|
|
|
|
* tx requests queue and the end of previous queue.
|
|
|
|
*
|
|
|
|
* An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
|
|
|
|
*
|
|
|
|
* ...|0 I I I|5 I|9 I I I|...
|
|
|
|
* -->|<-INUSE----------------
|
|
|
|
*
|
|
|
|
* After consuming the first slot(s) we have:
|
|
|
|
*
|
|
|
|
* ...|V V V V|5 I|9 I I I|...
|
|
|
|
* -----FREE->|<-INUSE--------
|
|
|
|
*
|
|
|
|
* where V stands for "valid pending ring index". Any number other
|
|
|
|
* than INVALID_PENDING_RING_IDX is OK. These entries are considered
|
|
|
|
* free and can contain any number other than
|
|
|
|
* INVALID_PENDING_RING_IDX. In practice we use 0.
|
|
|
|
*
|
|
|
|
* The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
|
|
|
|
* above example) number is the index into pending_tx_info and
|
|
|
|
* mmap_pages arrays.
|
|
|
|
*/
|
|
|
|
struct pending_tx_info {
|
|
|
|
struct xen_netif_tx_request req; /* coalesced tx request */
|
|
|
|
pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
|
|
|
|
* if it is head of one or more tx
|
|
|
|
* reqs
|
|
|
|
*/
|
|
|
|
};
|
|
|
|
|
|
|
|
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
|
|
|
|
#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
|
|
|
|
|
|
|
|
struct xenvif_rx_meta {
|
|
|
|
int id;
|
|
|
|
int size;
|
2013-10-17 00:50:32 +08:00
|
|
|
int gso_type;
|
2013-08-26 19:59:38 +08:00
|
|
|
int gso_size;
|
|
|
|
};
|
|
|
|
|
2013-10-17 00:50:32 +08:00
|
|
|
#define GSO_BIT(type) \
|
|
|
|
(1 << XEN_NETIF_GSO_TYPE_ ## type)
|
|
|
|
|
2013-08-26 19:59:38 +08:00
|
|
|
/* Discriminate from any valid pending_idx value. */
|
|
|
|
#define INVALID_PENDING_IDX 0xFFFF
|
|
|
|
|
|
|
|
#define MAX_BUFFER_OFFSET PAGE_SIZE
|
|
|
|
|
|
|
|
#define MAX_PENDING_REQS 256
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
struct xenvif {
|
|
|
|
/* Unique identifier for this interface. */
|
|
|
|
domid_t domid;
|
|
|
|
unsigned int handle;
|
|
|
|
|
2013-08-26 19:59:38 +08:00
|
|
|
/* Use NAPI for guest TX */
|
|
|
|
struct napi_struct napi;
|
|
|
|
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
|
|
|
|
unsigned int tx_irq;
|
|
|
|
/* Only used when feature-split-event-channels = 1 */
|
|
|
|
char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
|
|
|
|
struct xen_netif_tx_back_ring tx;
|
|
|
|
struct sk_buff_head tx_queue;
|
|
|
|
struct page *mmap_pages[MAX_PENDING_REQS];
|
|
|
|
pending_ring_idx_t pending_prod;
|
|
|
|
pending_ring_idx_t pending_cons;
|
|
|
|
u16 pending_ring[MAX_PENDING_REQS];
|
|
|
|
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
|
|
|
|
|
|
|
|
/* Coalescing tx requests before copying makes number of grant
|
|
|
|
* copy ops greater or equal to number of slots required. In
|
|
|
|
* worst case a tx request consumes 2 gnttab_copy.
|
|
|
|
*/
|
|
|
|
struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
|
2013-08-26 19:59:38 +08:00
|
|
|
/* Use kthread for guest RX */
|
|
|
|
struct task_struct *task;
|
|
|
|
wait_queue_head_t wq;
|
2013-05-22 14:34:45 +08:00
|
|
|
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
|
|
|
|
unsigned int rx_irq;
|
|
|
|
/* Only used when feature-split-event-channels = 1 */
|
|
|
|
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
|
2013-08-26 19:59:38 +08:00
|
|
|
struct xen_netif_rx_back_ring rx;
|
|
|
|
struct sk_buff_head rx_queue;
|
xen-netback: improve guest-receive-side flow control
The way that flow control works without this patch is that, in start_xmit()
the code uses xenvif_count_skb_slots() to predict how many slots
xenvif_gop_skb() will consume and then adds this to a 'req_cons_peek'
counter which it then uses to determine if the shared ring has that amount
of space available by checking whether 'req_prod' has passed that value.
If the ring doesn't have space the tx queue is stopped.
xenvif_gop_skb() will then consume slots and update 'req_cons' and issue
responses, updating 'rsp_prod' as it goes. The frontend will consume those
responses and post new requests, by updating req_prod. So, req_prod chases
req_cons which chases rsp_prod, and can never exceed that value. Thus if
xenvif_count_skb_slots() ever returns a number of slots greater than
xenvif_gop_skb() uses, req_cons_peek will get to a value that req_prod cannot
possibly achieve (since it's limited by the 'real' req_cons) and, if this
happens enough times, req_cons_peek gets more than a ring size ahead of
req_cons and the tx queue then remains stopped forever waiting for an
unachievable amount of space to become available in the ring.
Having two routines trying to calculate the same value is always going to be
fragile, so this patch does away with that. All we essentially need to do is
make sure that we have 'enough stuff' on our internal queue without letting
it build up uncontrollably. So start_xmit() makes a cheap optimistic check
of how much space is needed for an skb and only turns the queue off if that
is unachievable. net_rx_action() is the place where we could do with an
accurate predicition but, since that has proven tricky to calculate, a cheap
worse-case (but not too bad) estimate is all we really need since the only
thing we *must* prevent is xenvif_gop_skb() consuming more slots than are
available.
Without this patch I can trivially stall netback permanently by just doing
a large guest to guest file copy between two Windows Server 2008R2 VMs on a
single host.
Patch tested with frontends in:
- Windows Server 2008R2
- CentOS 6.0
- Debian Squeeze
- Debian Wheezy
- SLES11
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Annie Li <annie.li@oracle.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-12-07 00:36:07 +08:00
|
|
|
/* Set when the RX interrupt is triggered by the frontend.
|
|
|
|
* The worker thread may need to wake the queue.
|
2013-08-26 19:59:38 +08:00
|
|
|
*/
|
xen-netback: improve guest-receive-side flow control
The way that flow control works without this patch is that, in start_xmit()
the code uses xenvif_count_skb_slots() to predict how many slots
xenvif_gop_skb() will consume and then adds this to a 'req_cons_peek'
counter which it then uses to determine if the shared ring has that amount
of space available by checking whether 'req_prod' has passed that value.
If the ring doesn't have space the tx queue is stopped.
xenvif_gop_skb() will then consume slots and update 'req_cons' and issue
responses, updating 'rsp_prod' as it goes. The frontend will consume those
responses and post new requests, by updating req_prod. So, req_prod chases
req_cons which chases rsp_prod, and can never exceed that value. Thus if
xenvif_count_skb_slots() ever returns a number of slots greater than
xenvif_gop_skb() uses, req_cons_peek will get to a value that req_prod cannot
possibly achieve (since it's limited by the 'real' req_cons) and, if this
happens enough times, req_cons_peek gets more than a ring size ahead of
req_cons and the tx queue then remains stopped forever waiting for an
unachievable amount of space to become available in the ring.
Having two routines trying to calculate the same value is always going to be
fragile, so this patch does away with that. All we essentially need to do is
make sure that we have 'enough stuff' on our internal queue without letting
it build up uncontrollably. So start_xmit() makes a cheap optimistic check
of how much space is needed for an skb and only turns the queue off if that
is unachievable. net_rx_action() is the place where we could do with an
accurate predicition but, since that has proven tricky to calculate, a cheap
worse-case (but not too bad) estimate is all we really need since the only
thing we *must* prevent is xenvif_gop_skb() consuming more slots than are
available.
Without this patch I can trivially stall netback permanently by just doing
a large guest to guest file copy between two Windows Server 2008R2 VMs on a
single host.
Patch tested with frontends in:
- Windows Server 2008R2
- CentOS 6.0
- Debian Squeeze
- Debian Wheezy
- SLES11
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Annie Li <annie.li@oracle.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-12-07 00:36:07 +08:00
|
|
|
bool rx_event;
|
2013-08-26 19:59:38 +08:00
|
|
|
|
|
|
|
/* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
|
|
|
|
* head/fragment page uses 2 copy operations because it
|
|
|
|
* straddles two buffers in the frontend.
|
|
|
|
*/
|
|
|
|
struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
|
|
|
|
struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2013-08-26 19:59:38 +08:00
|
|
|
|
|
|
|
u8 fe_dev_addr[6];
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
/* Frontend feature information. */
|
2013-10-17 00:50:32 +08:00
|
|
|
int gso_mask;
|
|
|
|
int gso_prefix_mask;
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
u8 can_sg:1;
|
2013-10-17 00:50:28 +08:00
|
|
|
u8 ip_csum:1;
|
|
|
|
u8 ipv6_csum:1;
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
/* Internal feature information. */
|
|
|
|
u8 can_queue:1; /* can queue packets for receiver? */
|
|
|
|
|
|
|
|
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
|
|
|
|
unsigned long credit_bytes;
|
|
|
|
unsigned long credit_usec;
|
|
|
|
unsigned long remaining_credit;
|
|
|
|
struct timer_list credit_timeout;
|
2013-10-28 20:07:57 +08:00
|
|
|
u64 credit_window_start;
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
/* Statistics */
|
|
|
|
unsigned long rx_gso_checksum_fixup;
|
|
|
|
|
|
|
|
/* Miscellaneous private stuff. */
|
|
|
|
struct net_device *dev;
|
|
|
|
};
|
|
|
|
|
2011-09-29 23:53:31 +08:00
|
|
|
static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
|
|
|
|
{
|
|
|
|
return to_xenbus_device(vif->dev->dev.parent);
|
|
|
|
}
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
struct xenvif *xenvif_alloc(struct device *parent,
|
|
|
|
domid_t domid,
|
|
|
|
unsigned int handle);
|
|
|
|
|
|
|
|
int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
|
2013-05-22 14:34:45 +08:00
|
|
|
unsigned long rx_ring_ref, unsigned int tx_evtchn,
|
|
|
|
unsigned int rx_evtchn);
|
2011-03-15 08:06:18 +08:00
|
|
|
void xenvif_disconnect(struct xenvif *vif);
|
2013-09-18 00:46:08 +08:00
|
|
|
void xenvif_free(struct xenvif *vif);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
int xenvif_xenbus_init(void);
|
2013-05-17 07:26:11 +08:00
|
|
|
void xenvif_xenbus_fini(void);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
int xenvif_schedulable(struct xenvif *vif);
|
|
|
|
|
2013-08-26 19:59:39 +08:00
|
|
|
int xenvif_must_stop_queue(struct xenvif *vif);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
/* (Un)Map communication rings. */
|
2013-08-26 19:59:39 +08:00
|
|
|
void xenvif_unmap_frontend_rings(struct xenvif *vif);
|
|
|
|
int xenvif_map_frontend_rings(struct xenvif *vif,
|
|
|
|
grant_ref_t tx_ring_ref,
|
|
|
|
grant_ref_t rx_ring_ref);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
|
|
|
/* Check for SKBs from frontend and schedule backend processing */
|
2013-08-26 19:59:39 +08:00
|
|
|
void xenvif_check_rx_xenvif(struct xenvif *vif);
|
2011-03-15 08:06:18 +08:00
|
|
|
|
2013-02-07 07:41:35 +08:00
|
|
|
/* Prevent the device from generating any further traffic. */
|
|
|
|
void xenvif_carrier_off(struct xenvif *vif);
|
|
|
|
|
2013-08-26 19:59:39 +08:00
|
|
|
int xenvif_tx_action(struct xenvif *vif, int budget);
|
2013-08-26 19:59:38 +08:00
|
|
|
|
2013-08-26 19:59:39 +08:00
|
|
|
int xenvif_kthread(void *data);
|
xen-netback: improve guest-receive-side flow control
The way that flow control works without this patch is that, in start_xmit()
the code uses xenvif_count_skb_slots() to predict how many slots
xenvif_gop_skb() will consume and then adds this to a 'req_cons_peek'
counter which it then uses to determine if the shared ring has that amount
of space available by checking whether 'req_prod' has passed that value.
If the ring doesn't have space the tx queue is stopped.
xenvif_gop_skb() will then consume slots and update 'req_cons' and issue
responses, updating 'rsp_prod' as it goes. The frontend will consume those
responses and post new requests, by updating req_prod. So, req_prod chases
req_cons which chases rsp_prod, and can never exceed that value. Thus if
xenvif_count_skb_slots() ever returns a number of slots greater than
xenvif_gop_skb() uses, req_cons_peek will get to a value that req_prod cannot
possibly achieve (since it's limited by the 'real' req_cons) and, if this
happens enough times, req_cons_peek gets more than a ring size ahead of
req_cons and the tx queue then remains stopped forever waiting for an
unachievable amount of space to become available in the ring.
Having two routines trying to calculate the same value is always going to be
fragile, so this patch does away with that. All we essentially need to do is
make sure that we have 'enough stuff' on our internal queue without letting
it build up uncontrollably. So start_xmit() makes a cheap optimistic check
of how much space is needed for an skb and only turns the queue off if that
is unachievable. net_rx_action() is the place where we could do with an
accurate predicition but, since that has proven tricky to calculate, a cheap
worse-case (but not too bad) estimate is all we really need since the only
thing we *must* prevent is xenvif_gop_skb() consuming more slots than are
available.
Without this patch I can trivially stall netback permanently by just doing
a large guest to guest file copy between two Windows Server 2008R2 VMs on a
single host.
Patch tested with frontends in:
- Windows Server 2008R2
- CentOS 6.0
- Debian Squeeze
- Debian Wheezy
- SLES11
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Annie Li <annie.li@oracle.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-12-07 00:36:07 +08:00
|
|
|
void xenvif_kick_thread(struct xenvif *vif);
|
|
|
|
|
|
|
|
/* Determine whether the needed number of slots (req) are available,
|
|
|
|
* and set req_event if not.
|
|
|
|
*/
|
|
|
|
bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
|
|
|
|
|
|
|
|
void xenvif_stop_queue(struct xenvif *vif);
|
2013-08-26 19:59:38 +08:00
|
|
|
|
2013-05-22 14:34:45 +08:00
|
|
|
extern bool separate_tx_rx_irq;
|
|
|
|
|
2011-03-15 08:06:18 +08:00
|
|
|
#endif /* __XEN_NETBACK__COMMON_H__ */
|