xen-netback: validate queue numbers in xenvif_set_hash_mapping()

Checking them before the grant copy means nothing as to the validity of
the incoming request. As we shouldn't make the new data live before
having validated it, introduce a second instance of the mapping array.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jan Beulich 2018-09-25 02:13:01 -06:00 committed by David S. Miller
parent 780e83c259
commit 22f9cde340
3 changed files with 18 additions and 8 deletions

View File

@ -241,8 +241,9 @@ struct xenvif_hash_cache {
struct xenvif_hash {
unsigned int alg;
u32 flags;
bool mapping_sel;
u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
unsigned int size;
struct xenvif_hash_cache cache;
};

View File

@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.size = size;
memset(vif->hash.mapping, 0, sizeof(u32) * size);
memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
sizeof(u32) * size);
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
@ -332,7 +333,7 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
u32 off)
{
u32 *mapping = vif->hash.mapping;
u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
struct gnttab_copy copy_op = {
.source.u.ref = gref,
.source.domid = vif->domid,
@ -348,9 +349,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
copy_op.dest.offset = xen_offset_in_page(mapping + off);
while (len-- != 0)
if (mapping[off++] >= vif->num_queues)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
vif->hash.size * sizeof(*mapping));
if (copy_op.len != 0) {
gnttab_batch_copy(&copy_op, 1);
@ -359,6 +359,12 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
while (len-- != 0)
if (mapping[off++] >= vif->num_queues)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.mapping_sel = !vif->hash.mapping_sel;
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
@ -410,6 +416,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
}
if (vif->hash.size != 0) {
const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
seq_puts(m, "\nHash Mapping:\n");
for (i = 0; i < vif->hash.size; ) {
@ -422,7 +430,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%4u ", vif->hash.mapping[i]);
seq_printf(m, "%4u ", mapping[i]);
seq_puts(m, "\n");
}

View File

@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
if (size == 0)
return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
return vif->hash.mapping[skb_get_hash_raw(skb) % size];
return vif->hash.mapping[vif->hash.mapping_sel]
[skb_get_hash_raw(skb) % size];
}
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)