mirror of https://gitee.com/openkylin/linux.git
xen: bug fixes for 4.2-rc5
- Don't lose interrupts when offlining CPUs. - Fix gntdev oops during unmap. - Drop the balloon lock occasionally to allow domain create/destroy. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJVwNFHAAoJEFxbo/MsZsTRZ0oH/1PBpbABz2iK7SZc75paORkm BwVlSTn1Z6ftmRjC8r5BS5KHOrQRsDf8cZnDptCIWpm+uWTXAfeZP5HaH1bA6qMy d+T7d9QMlC5nsCxebfduXYl4AHl7fupblBi3y8CmrJdVW0aASyL7roAtkSS23rXl ND3288juhI6E0Y5kch3b7yip5vjJtKFR7Mw3RkAZO5ihdx30NCYMdqBRBFcKT0Tp s901VXo+87ZSutY12BcToqCwr9E0y2oBdkDIQ5Q7rUlqKM1ifLVOYbx4sxXvNQtk 3WYcAMuBrwfcBP00xYHt18ozEaJxQ2bTOokhZ//2p6LLnhzo0ZiZcxNRX3jda8A= =niy/ -----END PGP SIGNATURE----- Merge tag 'for-linus-4.2-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull xen bug fixes from David Vrabel: - don't lose interrupts when offlining CPUs - fix gntdev oops during unmap - drop the balloon lock occasionally to allow domain create/destroy * tag 'for-linus-4.2-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/events/fifo: Handle linked events when closing a port xen: release lock occasionally during ballooning xen/gntdevt: Fix race condition in gntdev_release()
This commit is contained in:
commit
1ddc6dd855
|
@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
|
|||
}
|
||||
|
||||
/*
|
||||
* We avoid multiple worker processes conflicting via the balloon mutex.
|
||||
* As this is a work item it is guaranteed to run as a single instance only.
|
||||
* We may of course race updates of the target counts (which are protected
|
||||
* by the balloon lock), or with changes to the Xen hard limit, but we will
|
||||
* recover from these in time.
|
||||
|
@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
|
|||
enum bp_state state = BP_DONE;
|
||||
long credit;
|
||||
|
||||
mutex_lock(&balloon_mutex);
|
||||
|
||||
do {
|
||||
mutex_lock(&balloon_mutex);
|
||||
|
||||
credit = current_credit();
|
||||
|
||||
if (credit > 0) {
|
||||
|
@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
|
|||
|
||||
state = update_schedule(state);
|
||||
|
||||
#ifndef CONFIG_PREEMPT
|
||||
if (need_resched())
|
||||
schedule();
|
||||
#endif
|
||||
mutex_unlock(&balloon_mutex);
|
||||
|
||||
cond_resched();
|
||||
|
||||
} while (credit && state == BP_DONE);
|
||||
|
||||
/* Schedule more work if there is some still to be done. */
|
||||
if (state == BP_EAGAIN)
|
||||
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
|
||||
|
||||
mutex_unlock(&balloon_mutex);
|
||||
}
|
||||
|
||||
/* Resets the Xen limit, sets new target, and kicks off processing. */
|
||||
|
|
|
@ -452,10 +452,12 @@ static void xen_free_irq(unsigned irq)
|
|||
irq_free_desc(irq);
|
||||
}
|
||||
|
||||
static void xen_evtchn_close(unsigned int port)
|
||||
static void xen_evtchn_close(unsigned int port, unsigned int cpu)
|
||||
{
|
||||
struct evtchn_close close;
|
||||
|
||||
xen_evtchn_op_close(port, cpu);
|
||||
|
||||
close.port = port;
|
||||
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
|
||||
BUG();
|
||||
|
@ -544,7 +546,7 @@ static unsigned int __startup_pirq(unsigned int irq)
|
|||
|
||||
err:
|
||||
pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
|
||||
xen_evtchn_close(evtchn);
|
||||
xen_evtchn_close(evtchn, NR_CPUS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -565,7 +567,7 @@ static void shutdown_pirq(struct irq_data *data)
|
|||
return;
|
||||
|
||||
mask_evtchn(evtchn);
|
||||
xen_evtchn_close(evtchn);
|
||||
xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn));
|
||||
xen_irq_info_cleanup(info);
|
||||
}
|
||||
|
||||
|
@ -609,7 +611,7 @@ static void __unbind_from_irq(unsigned int irq)
|
|||
if (VALID_EVTCHN(evtchn)) {
|
||||
unsigned int cpu = cpu_from_irq(irq);
|
||||
|
||||
xen_evtchn_close(evtchn);
|
||||
xen_evtchn_close(evtchn, cpu);
|
||||
|
||||
switch (type_from_irq(irq)) {
|
||||
case IRQT_VIRQ:
|
||||
|
|
|
@ -255,6 +255,12 @@ static void evtchn_fifo_unmask(unsigned port)
|
|||
}
|
||||
}
|
||||
|
||||
static bool evtchn_fifo_is_linked(unsigned port)
|
||||
{
|
||||
event_word_t *word = event_word_from_port(port);
|
||||
return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word));
|
||||
}
|
||||
|
||||
static uint32_t clear_linked(volatile event_word_t *word)
|
||||
{
|
||||
event_word_t new, old, w;
|
||||
|
@ -281,7 +287,8 @@ static void handle_irq_for_port(unsigned port)
|
|||
|
||||
static void consume_one_event(unsigned cpu,
|
||||
struct evtchn_fifo_control_block *control_block,
|
||||
unsigned priority, unsigned long *ready)
|
||||
unsigned priority, unsigned long *ready,
|
||||
bool drop)
|
||||
{
|
||||
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
|
||||
uint32_t head;
|
||||
|
@ -313,13 +320,15 @@ static void consume_one_event(unsigned cpu,
|
|||
if (head == 0)
|
||||
clear_bit(priority, ready);
|
||||
|
||||
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
|
||||
handle_irq_for_port(port);
|
||||
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
|
||||
if (likely(!drop))
|
||||
handle_irq_for_port(port);
|
||||
}
|
||||
|
||||
q->head[priority] = head;
|
||||
}
|
||||
|
||||
static void evtchn_fifo_handle_events(unsigned cpu)
|
||||
static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
|
||||
{
|
||||
struct evtchn_fifo_control_block *control_block;
|
||||
unsigned long ready;
|
||||
|
@ -331,11 +340,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
|
|||
|
||||
while (ready) {
|
||||
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
|
||||
consume_one_event(cpu, control_block, q, &ready);
|
||||
consume_one_event(cpu, control_block, q, &ready, drop);
|
||||
ready |= xchg(&control_block->ready, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void evtchn_fifo_handle_events(unsigned cpu)
|
||||
{
|
||||
__evtchn_fifo_handle_events(cpu, false);
|
||||
}
|
||||
|
||||
static void evtchn_fifo_resume(void)
|
||||
{
|
||||
unsigned cpu;
|
||||
|
@ -371,6 +385,26 @@ static void evtchn_fifo_resume(void)
|
|||
event_array_pages = 0;
|
||||
}
|
||||
|
||||
static void evtchn_fifo_close(unsigned port, unsigned int cpu)
|
||||
{
|
||||
if (cpu == NR_CPUS)
|
||||
return;
|
||||
|
||||
get_online_cpus();
|
||||
if (cpu_online(cpu)) {
|
||||
if (WARN_ON(irqs_disabled()))
|
||||
goto out;
|
||||
|
||||
while (evtchn_fifo_is_linked(port))
|
||||
cpu_relax();
|
||||
} else {
|
||||
__evtchn_fifo_handle_events(cpu, true);
|
||||
}
|
||||
|
||||
out:
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static const struct evtchn_ops evtchn_ops_fifo = {
|
||||
.max_channels = evtchn_fifo_max_channels,
|
||||
.nr_channels = evtchn_fifo_nr_channels,
|
||||
|
@ -384,6 +418,7 @@ static const struct evtchn_ops evtchn_ops_fifo = {
|
|||
.unmask = evtchn_fifo_unmask,
|
||||
.handle_events = evtchn_fifo_handle_events,
|
||||
.resume = evtchn_fifo_resume,
|
||||
.close = evtchn_fifo_close,
|
||||
};
|
||||
|
||||
static int evtchn_fifo_alloc_control_block(unsigned cpu)
|
||||
|
|
|
@ -68,6 +68,7 @@ struct evtchn_ops {
|
|||
bool (*test_and_set_mask)(unsigned port);
|
||||
void (*mask)(unsigned port);
|
||||
void (*unmask)(unsigned port);
|
||||
void (*close)(unsigned port, unsigned cpu);
|
||||
|
||||
void (*handle_events)(unsigned cpu);
|
||||
void (*resume)(void);
|
||||
|
@ -145,6 +146,12 @@ static inline void xen_evtchn_resume(void)
|
|||
evtchn_ops->resume();
|
||||
}
|
||||
|
||||
static inline void xen_evtchn_op_close(unsigned port, unsigned cpu)
|
||||
{
|
||||
if (evtchn_ops->close)
|
||||
return evtchn_ops->close(port, cpu);
|
||||
}
|
||||
|
||||
void xen_evtchn_2l_init(void);
|
||||
int xen_evtchn_fifo_init(void);
|
||||
|
||||
|
|
|
@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
|
|||
|
||||
pr_debug("priv %p\n", priv);
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
while (!list_empty(&priv->maps)) {
|
||||
map = list_entry(priv->maps.next, struct grant_map, next);
|
||||
list_del(&map->next);
|
||||
gntdev_put_map(NULL /* already removed */, map);
|
||||
}
|
||||
WARN_ON(!list_empty(&priv->freeable_maps));
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
if (use_ptemod)
|
||||
mmu_notifier_unregister(&priv->mn, priv->mm);
|
||||
|
|
Loading…
Reference in New Issue