perf/x86/intel/rapl: Make PMU lock raw
This lock is taken in hard interrupt context even on Preempt-RT. Make it raw so RT does not have to patch it. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Harish Chegondi <harish.chegondi@intel.com> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20160222221012.669411833@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
7162b8fea6
commit
a208749c64
|
@ -120,7 +120,7 @@ static struct perf_pmu_events_attr event_attr_##v = { \
|
|||
};
|
||||
|
||||
struct rapl_pmu {
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
int n_active;
|
||||
struct list_head active_list;
|
||||
struct pmu *pmu;
|
||||
|
@ -210,12 +210,12 @@ static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
|
|||
if (!pmu->n_active)
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
spin_lock_irqsave(&pmu->lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu->lock, flags);
|
||||
|
||||
list_for_each_entry(event, &pmu->active_list, active_entry)
|
||||
rapl_event_update(event);
|
||||
|
||||
spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
|
||||
hrtimer_forward_now(hrtimer, pmu->timer_interval);
|
||||
|
||||
|
@ -252,9 +252,9 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
|
|||
struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pmu->lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu->lock, flags);
|
||||
__rapl_pmu_event_start(pmu, event);
|
||||
spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
}
|
||||
|
||||
static void rapl_pmu_event_stop(struct perf_event *event, int mode)
|
||||
|
@ -263,7 +263,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
|
|||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pmu->lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu->lock, flags);
|
||||
|
||||
/* mark event as deactivated and stopped */
|
||||
if (!(hwc->state & PERF_HES_STOPPED)) {
|
||||
|
@ -288,7 +288,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
|
|||
hwc->state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
}
|
||||
|
||||
static int rapl_pmu_event_add(struct perf_event *event, int mode)
|
||||
|
@ -297,14 +297,14 @@ static int rapl_pmu_event_add(struct perf_event *event, int mode)
|
|||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pmu->lock, flags);
|
||||
raw_spin_lock_irqsave(&pmu->lock, flags);
|
||||
|
||||
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
||||
|
||||
if (mode & PERF_EF_START)
|
||||
__rapl_pmu_event_start(pmu, event);
|
||||
|
||||
spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -567,7 +567,7 @@ static int rapl_cpu_prepare(int cpu)
|
|||
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!pmu)
|
||||
return -1;
|
||||
spin_lock_init(&pmu->lock);
|
||||
raw_spin_lock_init(&pmu->lock);
|
||||
|
||||
INIT_LIST_HEAD(&pmu->active_list);
|
||||
|
||||
|
|
Loading…
Reference in New Issue