perf_counter, x86: rename bitmasks to ->used_mask and ->active_mask

Standardize on explicitly mentioning '_mask' in fields that
are not plain flags but masks. This avoids typos like:

       if (cpuc->used)

(which could easily slip through review unnoticed), while if a
typo looks like this:

       if (cpuc->used_mask)

it might get noticed during review.

[ Impact: cleanup ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1241016956-24648-1-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Robert Richter 2009-04-29 16:55:56 +02:00 committed by Ingo Molnar
parent ab7ef2e50a
commit 43f6201a22
1 changed files with 14 additions and 14 deletions

View File

@ -28,8 +28,8 @@ static u64 perf_counter_mask __read_mostly;
struct cpu_hw_counters { struct cpu_hw_counters {
struct perf_counter *counters[X86_PMC_IDX_MAX]; struct perf_counter *counters[X86_PMC_IDX_MAX];
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long interrupts; unsigned long interrupts;
u64 throttle_ctrl; u64 throttle_ctrl;
int enabled; int enabled;
@ -332,7 +332,7 @@ static u64 amd_pmu_save_disable_all(void)
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val; u64 val;
if (!test_bit(idx, cpuc->active)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val); rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
@ -373,7 +373,7 @@ static void amd_pmu_restore_all(u64 ctrl)
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val; u64 val;
if (!test_bit(idx, cpuc->active)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val); rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
@ -576,7 +576,7 @@ static int x86_pmu_enable(struct perf_counter *counter)
* Try to get the fixed counter, if that is already taken * Try to get the fixed counter, if that is already taken
* then try to get a generic counter: * then try to get a generic counter:
*/ */
if (test_and_set_bit(idx, cpuc->used)) if (test_and_set_bit(idx, cpuc->used_mask))
goto try_generic; goto try_generic;
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
@ -590,14 +590,14 @@ static int x86_pmu_enable(struct perf_counter *counter)
} else { } else {
idx = hwc->idx; idx = hwc->idx;
/* Try to get the previous generic counter again */ /* Try to get the previous generic counter again */
if (test_and_set_bit(idx, cpuc->used)) { if (test_and_set_bit(idx, cpuc->used_mask)) {
try_generic: try_generic:
idx = find_first_zero_bit(cpuc->used, idx = find_first_zero_bit(cpuc->used_mask,
x86_pmu.num_counters); x86_pmu.num_counters);
if (idx == x86_pmu.num_counters) if (idx == x86_pmu.num_counters)
return -EAGAIN; return -EAGAIN;
set_bit(idx, cpuc->used); set_bit(idx, cpuc->used_mask);
hwc->idx = idx; hwc->idx = idx;
} }
hwc->config_base = x86_pmu.eventsel; hwc->config_base = x86_pmu.eventsel;
@ -609,7 +609,7 @@ static int x86_pmu_enable(struct perf_counter *counter)
x86_pmu.disable(hwc, idx); x86_pmu.disable(hwc, idx);
cpuc->counters[idx] = counter; cpuc->counters[idx] = counter;
set_bit(idx, cpuc->active); set_bit(idx, cpuc->active_mask);
x86_perf_counter_set_period(counter, hwc, idx); x86_perf_counter_set_period(counter, hwc, idx);
x86_pmu.enable(hwc, idx); x86_pmu.enable(hwc, idx);
@ -643,7 +643,7 @@ void perf_counter_print_debug(void)
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
} }
pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used); pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
@ -677,7 +677,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
* Must be done before we disable, otherwise the nmi handler * Must be done before we disable, otherwise the nmi handler
* could reenable again: * could reenable again:
*/ */
clear_bit(idx, cpuc->active); clear_bit(idx, cpuc->active_mask);
x86_pmu.disable(hwc, idx); x86_pmu.disable(hwc, idx);
/* /*
@ -692,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
*/ */
x86_perf_counter_update(counter, hwc, idx); x86_perf_counter_update(counter, hwc, idx);
cpuc->counters[idx] = NULL; cpuc->counters[idx] = NULL;
clear_bit(idx, cpuc->used); clear_bit(idx, cpuc->used_mask);
} }
/* /*
@ -741,7 +741,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
struct perf_counter *counter = cpuc->counters[bit]; struct perf_counter *counter = cpuc->counters[bit];
clear_bit(bit, (unsigned long *) &status); clear_bit(bit, (unsigned long *) &status);
if (!test_bit(bit, cpuc->active)) if (!test_bit(bit, cpuc->active_mask))
continue; continue;
intel_pmu_save_and_restart(counter); intel_pmu_save_and_restart(counter);
@ -779,7 +779,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
++cpuc->interrupts; ++cpuc->interrupts;
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
counter = cpuc->counters[idx]; counter = cpuc->counters[idx];
hwc = &counter->hw; hwc = &counter->hw;