virtio-net: fix use after unmap/free for sg

When mergeable buffer is enabled, we try to set the num_buffers after
the virtqueue elem has been unmapped. This will lead several issues,
E.g a use after free when the descriptor has an address which belongs
to the non direct access region. In this case we use bounce buffer
that is allocated during address_space_map() and freed during
address_space_unmap().

Fixing this by storing the elems temporarily in an array and delay the
unmap after we set the the num_buffers.

This addresses CVE-2021-3748.

Reported-by: Alexander Bulekov <alxndr@bu.edu>
Fixes: fbe78f4f55 ("virtio-net support")
Cc: qemu-stable@nongnu.org
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Jason Wang 2021-09-02 13:44:12 +08:00
parent 080832e4f4
commit bedd7e93d0
1 changed files with 32 additions and 7 deletions

View File

@ -1746,10 +1746,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
VirtIONet *n = qemu_get_nic_opaque(nc); VirtIONet *n = qemu_get_nic_opaque(nc);
VirtIONetQueue *q = virtio_net_get_subqueue(nc); VirtIONetQueue *q = virtio_net_get_subqueue(nc);
VirtIODevice *vdev = VIRTIO_DEVICE(n); VirtIODevice *vdev = VIRTIO_DEVICE(n);
VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
size_t lens[VIRTQUEUE_MAX_SIZE];
struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
struct virtio_net_hdr_mrg_rxbuf mhdr; struct virtio_net_hdr_mrg_rxbuf mhdr;
unsigned mhdr_cnt = 0; unsigned mhdr_cnt = 0;
size_t offset, i, guest_offset; size_t offset, i, guest_offset, j;
ssize_t err;
if (!virtio_net_can_receive(nc)) { if (!virtio_net_can_receive(nc)) {
return -1; return -1;
@ -1780,6 +1783,12 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
total = 0; total = 0;
if (i == VIRTQUEUE_MAX_SIZE) {
virtio_error(vdev, "virtio-net unexpected long buffer chain");
err = size;
goto err;
}
elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
if (!elem) { if (!elem) {
if (i) { if (i) {
@ -1791,7 +1800,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
n->guest_hdr_len, n->host_hdr_len, n->guest_hdr_len, n->host_hdr_len,
vdev->guest_features); vdev->guest_features);
} }
return -1; err = -1;
goto err;
} }
if (elem->in_num < 1) { if (elem->in_num < 1) {
@ -1799,7 +1809,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
"virtio-net receive queue contains no in buffers"); "virtio-net receive queue contains no in buffers");
virtqueue_detach_element(q->rx_vq, elem, 0); virtqueue_detach_element(q->rx_vq, elem, 0);
g_free(elem); g_free(elem);
return -1; err = -1;
goto err;
} }
sg = elem->in_sg; sg = elem->in_sg;
@ -1836,12 +1847,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
if (!n->mergeable_rx_bufs && offset < size) { if (!n->mergeable_rx_bufs && offset < size) {
virtqueue_unpop(q->rx_vq, elem, total); virtqueue_unpop(q->rx_vq, elem, total);
g_free(elem); g_free(elem);
return size; err = size;
goto err;
} }
/* signal other side */ elems[i] = elem;
virtqueue_fill(q->rx_vq, elem, total, i++); lens[i] = total;
g_free(elem); i++;
} }
if (mhdr_cnt) { if (mhdr_cnt) {
@ -1851,10 +1863,23 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
&mhdr.num_buffers, sizeof mhdr.num_buffers); &mhdr.num_buffers, sizeof mhdr.num_buffers);
} }
for (j = 0; j < i; j++) {
/* signal other side */
virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
g_free(elems[j]);
}
virtqueue_flush(q->rx_vq, i); virtqueue_flush(q->rx_vq, i);
virtio_notify(vdev, q->rx_vq); virtio_notify(vdev, q->rx_vq);
return size; return size;
err:
for (j = 0; j < i; j++) {
g_free(elems[j]);
}
return err;
} }
static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf, static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,