mirror of https://gitee.com/openkylin/linux.git
oprofile, x86: Convert memory allocation to static array
On -rt, allocators don't work from atomic context any more, and the maximum size of the array is known at compile time. Call trace on a -rt kernel: oprofile: using NMI interrupt. BUG: sleeping function called from invalid context at kernel/rtmutex.c:645 in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: kworker/0:1 Pid: 0, comm: kworker/0:1 Tainted: G C 3.0.0-rt3-patser+ #39 Call Trace: <IRQ> [<ffffffff8103fc0a>] __might_sleep+0xca/0xf0 [<ffffffff8160d424>] rt_spin_lock+0x24/0x40 [<ffffffff811476c7>] __kmalloc+0xc7/0x370 [<ffffffffa0275c85>] ? ppro_setup_ctrs+0x215/0x260 [oprofile] [<ffffffffa0273de0>] ? oprofile_cpu_notifier+0x60/0x60 [oprofile] [<ffffffffa0275c85>] ppro_setup_ctrs+0x215/0x260 [oprofile] [<ffffffffa0273de0>] ? oprofile_cpu_notifier+0x60/0x60 [oprofile] [<ffffffffa0273de0>] ? oprofile_cpu_notifier+0x60/0x60 [oprofile] [<ffffffffa0273ea4>] nmi_cpu_setup+0xc4/0x110 [oprofile] [<ffffffff81094455>] generic_smp_call_function_interrupt+0x95/0x190 [<ffffffff8101df77>] smp_call_function_interrupt+0x27/0x40 [<ffffffff81615093>] call_function_interrupt+0x13/0x20 <EOI> [<ffffffff8131d0c4>] ? plist_check_head+0x54/0xc0 [<ffffffff81371fe8>] ? intel_idle+0xc8/0x120 [<ffffffff81371fc7>] ? intel_idle+0xa7/0x120 [<ffffffff814a57b0>] cpuidle_idle_call+0xb0/0x230 [<ffffffff810011db>] cpu_idle+0x8b/0xe0 [<ffffffff815fc82f>] start_secondary+0x1d3/0x1d8 Signed-off-by: Maarten Lankhorst <m.b.lankhorst@gmail.com> Signed-off-by: Robert Richter <robert.richter@amd.com>
This commit is contained in:
parent
4d4abdcb1d
commit
1d12d35284
|
@ -28,7 +28,7 @@ static int counter_width = 32;
|
|||
|
||||
#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
|
||||
|
||||
static u64 *reset_value;
|
||||
static u64 reset_value[OP_MAX_COUNTER];
|
||||
|
||||
static void ppro_shutdown(struct op_msrs const * const msrs)
|
||||
{
|
||||
|
@ -40,10 +40,6 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
|
|||
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
|
||||
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
|
||||
}
|
||||
if (reset_value) {
|
||||
kfree(reset_value);
|
||||
reset_value = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ppro_fill_in_addresses(struct op_msrs * const msrs)
|
||||
|
@ -79,13 +75,6 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
|
|||
u64 val;
|
||||
int i;
|
||||
|
||||
if (!reset_value) {
|
||||
reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
|
||||
GFP_ATOMIC);
|
||||
if (!reset_value)
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu_has_arch_perfmon) {
|
||||
union cpuid10_eax eax;
|
||||
eax.full = cpuid_eax(0xa);
|
||||
|
@ -141,13 +130,6 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
|
|||
u64 val;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* This can happen if perf counters are in use when
|
||||
* we steal the die notifier NMI.
|
||||
*/
|
||||
if (unlikely(!reset_value))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < num_counters; ++i) {
|
||||
if (!reset_value[i])
|
||||
continue;
|
||||
|
@ -179,8 +161,6 @@ static void ppro_start(struct op_msrs const * const msrs)
|
|||
u64 val;
|
||||
int i;
|
||||
|
||||
if (!reset_value)
|
||||
return;
|
||||
for (i = 0; i < num_counters; ++i) {
|
||||
if (reset_value[i]) {
|
||||
rdmsrl(msrs->controls[i].addr, val);
|
||||
|
@ -196,8 +176,6 @@ static void ppro_stop(struct op_msrs const * const msrs)
|
|||
u64 val;
|
||||
int i;
|
||||
|
||||
if (!reset_value)
|
||||
return;
|
||||
for (i = 0; i < num_counters; ++i) {
|
||||
if (!reset_value[i])
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue