irqchip/gic-v3-its: Make vlpi_lock a spinlock
The VLPI map is currently a mutex, and that's a bad idea as this lock can be taken in non-preemptible contexts. Convert it to a raw spinlock, and turn the memory allocation of the VLPI map to be atomic. Reported-by: Heyi Guo <guoheyi@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20191108165805.3071-12-maz@kernel.org
This commit is contained in:
parent
046b5054f5
commit
11635fa26d
|
@ -132,7 +132,7 @@ struct event_lpi_map {
|
||||||
u16 *col_map;
|
u16 *col_map;
|
||||||
irq_hw_number_t lpi_base;
|
irq_hw_number_t lpi_base;
|
||||||
int nr_lpis;
|
int nr_lpis;
|
||||||
struct mutex vlpi_lock;
|
raw_spinlock_t vlpi_lock;
|
||||||
struct its_vm *vm;
|
struct its_vm *vm;
|
||||||
struct its_vlpi_map *vlpi_maps;
|
struct its_vlpi_map *vlpi_maps;
|
||||||
int nr_vlpis;
|
int nr_vlpis;
|
||||||
|
@ -1436,13 +1436,13 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
|
||||||
if (!info->map)
|
if (!info->map)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&its_dev->event_map.vlpi_lock);
|
raw_spin_lock(&its_dev->event_map.vlpi_lock);
|
||||||
|
|
||||||
if (!its_dev->event_map.vm) {
|
if (!its_dev->event_map.vm) {
|
||||||
struct its_vlpi_map *maps;
|
struct its_vlpi_map *maps;
|
||||||
|
|
||||||
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
|
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
|
||||||
GFP_KERNEL);
|
GFP_ATOMIC);
|
||||||
if (!maps) {
|
if (!maps) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1485,7 +1485,7 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&its_dev->event_map.vlpi_lock);
|
raw_spin_unlock(&its_dev->event_map.vlpi_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1495,7 +1495,7 @@ static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
|
||||||
struct its_vlpi_map *map;
|
struct its_vlpi_map *map;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&its_dev->event_map.vlpi_lock);
|
raw_spin_lock(&its_dev->event_map.vlpi_lock);
|
||||||
|
|
||||||
map = get_vlpi_map(d);
|
map = get_vlpi_map(d);
|
||||||
|
|
||||||
|
@ -1508,7 +1508,7 @@ static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
|
||||||
*info->map = *map;
|
*info->map = *map;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&its_dev->event_map.vlpi_lock);
|
raw_spin_unlock(&its_dev->event_map.vlpi_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1518,7 +1518,7 @@ static int its_vlpi_unmap(struct irq_data *d)
|
||||||
u32 event = its_get_event_id(d);
|
u32 event = its_get_event_id(d);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&its_dev->event_map.vlpi_lock);
|
raw_spin_lock(&its_dev->event_map.vlpi_lock);
|
||||||
|
|
||||||
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
|
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -1548,7 +1548,7 @@ static int its_vlpi_unmap(struct irq_data *d)
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&its_dev->event_map.vlpi_lock);
|
raw_spin_unlock(&its_dev->event_map.vlpi_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2608,7 +2608,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
|
||||||
dev->event_map.col_map = col_map;
|
dev->event_map.col_map = col_map;
|
||||||
dev->event_map.lpi_base = lpi_base;
|
dev->event_map.lpi_base = lpi_base;
|
||||||
dev->event_map.nr_lpis = nr_lpis;
|
dev->event_map.nr_lpis = nr_lpis;
|
||||||
mutex_init(&dev->event_map.vlpi_lock);
|
raw_spin_lock_init(&dev->event_map.vlpi_lock);
|
||||||
dev->device_id = dev_id;
|
dev->device_id = dev_id;
|
||||||
INIT_LIST_HEAD(&dev->entry);
|
INIT_LIST_HEAD(&dev->entry);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue