KVM: coalesced_mmio: add bounds checking

The first/last indexes are typically shared with a user app.
The app can change the 'last' index that the kernel uses
to store the next result.  This change sanity checks the index
before using it for writing to a potentially arbitrary address.

This fixes CVE-2019-14821.

Cc: stable@vger.kernel.org
Fixes: 5f94c1741b ("KVM: Add coalesced MMIO support (common part)")
Signed-off-by: Matt Delco <delco@chromium.org>
Signed-off-by: Jim Mattson <jmattson@google.com>
Reported-by: syzbot+983c866c3dd6efa3662a@syzkaller.appspotmail.com
[Use READ_ONCE. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Matt Delco 2019-09-16 14:16:54 -07:00 committed by Paolo Bonzini
parent a9c20bb020
commit b60fe990c6
1 changed files with 11 additions and 8 deletions

View File

@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
return 1; return 1;
} }
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
{ {
struct kvm_coalesced_mmio_ring *ring; struct kvm_coalesced_mmio_ring *ring;
unsigned avail; unsigned avail;
@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
* there is always one unused entry in the buffer * there is always one unused entry in the buffer
*/ */
ring = dev->kvm->coalesced_mmio_ring; ring = dev->kvm->coalesced_mmio_ring;
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
if (avail == 0) { if (avail == 0) {
/* full */ /* full */
return 0; return 0;
@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
{ {
struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
__u32 insert;
if (!coalesced_mmio_in_range(dev, addr, len)) if (!coalesced_mmio_in_range(dev, addr, len))
return -EOPNOTSUPP; return -EOPNOTSUPP;
spin_lock(&dev->kvm->ring_lock); spin_lock(&dev->kvm->ring_lock);
if (!coalesced_mmio_has_room(dev)) { insert = READ_ONCE(ring->last);
if (!coalesced_mmio_has_room(dev, insert) ||
insert >= KVM_COALESCED_MMIO_MAX) {
spin_unlock(&dev->kvm->ring_lock); spin_unlock(&dev->kvm->ring_lock);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/* copy data in first free entry of the ring */ /* copy data in first free entry of the ring */
ring->coalesced_mmio[ring->last].phys_addr = addr; ring->coalesced_mmio[insert].phys_addr = addr;
ring->coalesced_mmio[ring->last].len = len; ring->coalesced_mmio[insert].len = len;
memcpy(ring->coalesced_mmio[ring->last].data, val, len); memcpy(ring->coalesced_mmio[insert].data, val, len);
ring->coalesced_mmio[ring->last].pio = dev->zone.pio; ring->coalesced_mmio[insert].pio = dev->zone.pio;
smp_wmb(); smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
spin_unlock(&dev->kvm->ring_lock); spin_unlock(&dev->kvm->ring_lock);
return 0; return 0;
} }