Merge branch 'netback'

Ian Campbell says:

====================
The Xen netback implementation contains a couple of flaws which can
allow a guest to cause a DoS in the backend domain, potentially
affecting other domains in the system.

CVE-2013-0216 is a failure to sanity check the ring producer/consumer
pointers which can allow a guest to cause netback to loop for an
extended period preventing other work from occurring.

CVE-2013-0217 is a memory leak on an error path which is guest
triggerable.

The following series contains the fixes for these issues, as previously
included in Xen Security Advisory 39:
http://lists.xen.org/archives/html/xen-announce/2013-02/msg00001.html

Changes in v2:
 - Typo and block comment format fixes
 - Added stable Cc
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-02-07 23:31:47 -05:00
commit 0c35565b46
3 changed files with 88 additions and 53 deletions

View File

@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */ /* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif); void xenvif_notify_tx_completion(struct xenvif *vif);
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
/* Returns number of ring slots required to send an skb to the frontend */ /* Returns number of ring slots required to send an skb to the frontend */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);

View File

@ -343,17 +343,22 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
return err; return err;
} }
void xenvif_disconnect(struct xenvif *vif) void xenvif_carrier_off(struct xenvif *vif)
{ {
struct net_device *dev = vif->dev; struct net_device *dev = vif->dev;
if (netif_carrier_ok(dev)) {
rtnl_lock(); rtnl_lock();
netif_carrier_off(dev); /* discard queued packets */ netif_carrier_off(dev); /* discard queued packets */
if (netif_running(dev)) if (netif_running(dev))
xenvif_down(vif); xenvif_down(vif);
rtnl_unlock(); rtnl_unlock();
xenvif_put(vif); xenvif_put(vif);
} }
void xenvif_disconnect(struct xenvif *vif)
{
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
atomic_dec(&vif->refcnt); atomic_dec(&vif->refcnt);
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);

View File

@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
atomic_dec(&netbk->netfront_count); atomic_dec(&netbk->netfront_count);
} }
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
u8 status);
static void make_tx_response(struct xenvif *vif, static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
s8 st); s8 st);
@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
do { do {
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
if (cons >= end) if (cons == end)
break; break;
txp = RING_GET_REQUEST(&vif->tx, cons++); txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1); } while (1);
@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
xenvif_put(vif); xenvif_put(vif);
} }
static void netbk_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
xenvif_carrier_off(vif);
xenvif_put(vif);
}
static int netbk_count_requests(struct xenvif *vif, static int netbk_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first, struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp, struct xen_netif_tx_request *txp,
@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,
do { do {
if (frags >= work_to_do) { if (frags >= work_to_do) {
netdev_dbg(vif->dev, "Need more frags\n"); netdev_err(vif->dev, "Need more frags\n");
netbk_fatal_tx_err(vif);
return -frags; return -frags;
} }
if (unlikely(frags >= MAX_SKB_FRAGS)) { if (unlikely(frags >= MAX_SKB_FRAGS)) {
netdev_dbg(vif->dev, "Too many frags\n"); netdev_err(vif->dev, "Too many frags\n");
netbk_fatal_tx_err(vif);
return -frags; return -frags;
} }
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
sizeof(*txp)); sizeof(*txp));
if (txp->size > first->size) { if (txp->size > first->size) {
netdev_dbg(vif->dev, "Frags galore\n"); netdev_err(vif->dev, "Frag is bigger than frame.\n");
netbk_fatal_tx_err(vif);
return -frags; return -frags;
} }
@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,
frags++; frags++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
txp->offset, txp->size); txp->offset, txp->size);
netbk_fatal_tx_err(vif);
return -frags; return -frags;
} }
} while ((txp++)->flags & XEN_NETTXF_more_data); } while ((txp++)->flags & XEN_NETTXF_more_data);
@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
pending_idx = netbk->pending_ring[index]; pending_idx = netbk->pending_ring[index];
page = xen_netbk_alloc_page(netbk, skb, pending_idx); page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page) if (!page)
return NULL; goto err;
gop->source.u.ref = txp->gref; gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid; gop->source.domid = vif->domid;
@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
} }
return gop; return gop;
err:
/* Unwind, freeing all pages and sending error responses. */
while (i-- > start) {
xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
XEN_NETIF_RSP_ERROR);
}
/* The head too, if necessary. */
if (start)
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
return NULL;
} }
static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
{ {
struct gnttab_copy *gop = *gopp; struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data); u16 pending_idx = *((u16 *)skb->data);
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
struct xenvif *vif = pending_tx_info[pending_idx].vif;
struct xen_netif_tx_request *txp;
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags; int nr_frags = shinfo->nr_frags;
int i, err, start; int i, err, start;
/* Check status of header. */ /* Check status of header. */
err = gop->status; err = gop->status;
if (unlikely(err)) { if (unlikely(err))
pending_ring_idx_t index; xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
index = pending_index(netbk->pending_prod++);
txp = &pending_tx_info[pending_idx].req;
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
netbk->pending_ring[index] = pending_idx;
xenvif_put(vif);
}
/* Skip first skb fragment if it is on same page as header fragment. */ /* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) { for (i = start; i < nr_frags; i++) {
int j, newerr; int j, newerr;
pending_ring_idx_t index;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]); pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
if (likely(!newerr)) { if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */ /* Had a previous error? Invalidate this fragment. */
if (unlikely(err)) if (unlikely(err))
xen_netbk_idx_release(netbk, pending_idx); xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
continue; continue;
} }
/* Error on this fragment: respond to client with an error. */ /* Error on this fragment: respond to client with an error. */
txp = &netbk->pending_tx_info[pending_idx].req; xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx;
xenvif_put(vif);
/* Not the first error? Preceding frags already invalidated. */ /* Not the first error? Preceding frags already invalidated. */
if (err) if (err)
@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
/* First error: invalidate header and preceding fragments. */ /* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data); pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx); xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
for (j = start; j < i; j++) { for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]); pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx); xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
} }
/* Remember the error: invalidate all subsequent fragments. */ /* Remember the error: invalidate all subsequent fragments. */
@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
/* Take an extra reference to offset xen_netbk_idx_release */ /* Take an extra reference to offset xen_netbk_idx_release */
get_page(netbk->mmap_pages[pending_idx]); get_page(netbk->mmap_pages[pending_idx]);
xen_netbk_idx_release(netbk, pending_idx); xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
} }
} }
@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
do { do {
if (unlikely(work_to_do-- <= 0)) { if (unlikely(work_to_do-- <= 0)) {
netdev_dbg(vif->dev, "Missing extra info\n"); netdev_err(vif->dev, "Missing extra info\n");
netbk_fatal_tx_err(vif);
return -EBADR; return -EBADR;
} }
@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
if (unlikely(!extra.type || if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
vif->tx.req_cons = ++cons; vif->tx.req_cons = ++cons;
netdev_dbg(vif->dev, netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type); "Invalid extra type: %d\n", extra.type);
netbk_fatal_tx_err(vif);
return -EINVAL; return -EINVAL;
} }
@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
struct xen_netif_extra_info *gso) struct xen_netif_extra_info *gso)
{ {
if (!gso->u.gso.size) { if (!gso->u.gso.size) {
netdev_dbg(vif->dev, "GSO size must not be zero.\n"); netdev_err(vif->dev, "GSO size must not be zero.\n");
netbk_fatal_tx_err(vif);
return -EINVAL; return -EINVAL;
} }
/* Currently only TCPv4 S.O. is supported. */ /* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
netbk_fatal_tx_err(vif);
return -EINVAL; return -EINVAL;
} }
@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
/* Get a netif from the list with work to do. */ /* Get a netif from the list with work to do. */
vif = poll_net_schedule_list(netbk); vif = poll_net_schedule_list(netbk);
/* This can sometimes happen because the test of
* list_empty(net_schedule_list) at the top of the
* loop is unlocked. Just go back and have another
* look.
*/
if (!vif) if (!vif)
continue; continue;
if (vif->tx.sring->req_prod - vif->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
netdev_err(vif->dev,
"Impossible number of requests. "
"req_prod %d, req_cons %d, size %ld\n",
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
netbk_fatal_tx_err(vif);
continue;
}
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
if (!work_to_do) { if (!work_to_do) {
xenvif_put(vif); xenvif_put(vif);
@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
work_to_do = xen_netbk_get_extras(vif, extras, work_to_do = xen_netbk_get_extras(vif, extras,
work_to_do); work_to_do);
idx = vif->tx.req_cons; idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0)) { if (unlikely(work_to_do < 0))
netbk_tx_err(vif, &txreq, idx);
continue; continue;
}
} }
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0)) { if (unlikely(ret < 0))
netbk_tx_err(vif, &txreq, idx - ret);
continue; continue;
}
idx += ret; idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) { if (unlikely(txreq.size < ETH_HLEN)) {
@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
/* No crossing a page as the payload mustn't fragment. */ /* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
netdev_dbg(vif->dev, netdev_err(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n", "txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size, txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size); (txreq.offset&~PAGE_MASK) + txreq.size);
netbk_tx_err(vif, &txreq, idx); netbk_fatal_tx_err(vif);
continue; continue;
} }
@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (netbk_set_skb_gso(vif, skb, gso)) { if (netbk_set_skb_gso(vif, skb, gso)) {
/* Failure in netbk_set_skb_gso is fatal. */
kfree_skb(skb); kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue; continue;
} }
} }
@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
txp->size -= data_len; txp->size -= data_len;
} else { } else {
/* Schedule a response immediately. */ /* Schedule a response immediately. */
xen_netbk_idx_release(netbk, pending_idx); xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
} }
if (txp->flags & XEN_NETTXF_csum_blank) if (txp->flags & XEN_NETTXF_csum_blank)
@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
xen_netbk_tx_submit(netbk); xen_netbk_tx_submit(netbk);
} }
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
u8 status)
{ {
struct xenvif *vif; struct xenvif *vif;
struct pending_tx_info *pending_tx_info; struct pending_tx_info *pending_tx_info;
@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
vif = pending_tx_info->vif; vif = pending_tx_info->vif;
make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); make_tx_response(vif, &pending_tx_info->req, status);
index = pending_index(netbk->pending_prod++); index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx; netbk->pending_ring[index] = pending_idx;