MIPS: perf: Cleanup formatting in arch/mips/kernel/perf_event.c
Get rid of a bunch of useless inline declarations, and join a bunch of improperly split lines. Signed-off-by: David Daney <david.daney@cavium.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2793/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
4d36f59d87
commit
4409af37b8
|
@ -118,10 +118,9 @@ struct mips_pmu {
|
||||||
|
|
||||||
static const struct mips_pmu *mipspmu;
|
static const struct mips_pmu *mipspmu;
|
||||||
|
|
||||||
static int
|
static int mipspmu_event_set_period(struct perf_event *event,
|
||||||
mipspmu_event_set_period(struct perf_event *event,
|
struct hw_perf_event *hwc,
|
||||||
struct hw_perf_event *hwc,
|
int idx)
|
||||||
int idx)
|
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
s64 left = local64_read(&hwc->period_left);
|
s64 left = local64_read(&hwc->period_left);
|
||||||
|
@ -162,8 +161,8 @@ mipspmu_event_set_period(struct perf_event *event,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mipspmu_event_update(struct perf_event *event,
|
static void mipspmu_event_update(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc,
|
struct hw_perf_event *hwc,
|
||||||
int idx)
|
int idx)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -420,8 +419,7 @@ static struct pmu pmu = {
|
||||||
.read = mipspmu_read,
|
.read = mipspmu_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline unsigned int
|
static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
|
||||||
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
|
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
|
* Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
|
||||||
|
@ -437,8 +435,7 @@ mipspmu_perf_event_encode(const struct mips_perf_event *pev)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mips_perf_event *
|
static const struct mips_perf_event *mipspmu_map_general_event(int idx)
|
||||||
mipspmu_map_general_event(int idx)
|
|
||||||
{
|
{
|
||||||
const struct mips_perf_event *pev;
|
const struct mips_perf_event *pev;
|
||||||
|
|
||||||
|
@ -449,8 +446,7 @@ mipspmu_map_general_event(int idx)
|
||||||
return pev;
|
return pev;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mips_perf_event *
|
static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
|
||||||
mipspmu_map_cache_event(u64 config)
|
|
||||||
{
|
{
|
||||||
unsigned int cache_type, cache_op, cache_result;
|
unsigned int cache_type, cache_op, cache_result;
|
||||||
const struct mips_perf_event *pev;
|
const struct mips_perf_event *pev;
|
||||||
|
@ -513,9 +509,9 @@ static int validate_group(struct perf_event *event)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is needed by specific irq handlers in perf_event_*.c */
|
/* This is needed by specific irq handlers in perf_event_*.c */
|
||||||
static void
|
static void handle_associated_event(struct cpu_hw_events *cpuc,
|
||||||
handle_associated_event(struct cpu_hw_events *cpuc,
|
int idx, struct perf_sample_data *data,
|
||||||
int idx, struct perf_sample_data *data, struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
|
@ -49,37 +49,32 @@ static int cpu_has_mipsmt_pertccounters;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Copied from op_model_mipsxx.c */
|
/* Copied from op_model_mipsxx.c */
|
||||||
static inline unsigned int vpe_shift(void)
|
static unsigned int vpe_shift(void)
|
||||||
{
|
{
|
||||||
if (num_possible_cpus() > 1)
|
if (num_possible_cpus() > 1)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_MIPS_MT_SMP */
|
|
||||||
#define vpe_id() 0
|
|
||||||
|
|
||||||
static inline unsigned int vpe_shift(void)
|
static unsigned int counters_total_to_per_cpu(unsigned int counters)
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_MIPS_MT_SMP */
|
|
||||||
|
|
||||||
static inline unsigned int
|
|
||||||
counters_total_to_per_cpu(unsigned int counters)
|
|
||||||
{
|
{
|
||||||
return counters >> vpe_shift();
|
return counters >> vpe_shift();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int
|
static unsigned int counters_per_cpu_to_total(unsigned int counters)
|
||||||
counters_per_cpu_to_total(unsigned int counters)
|
|
||||||
{
|
{
|
||||||
return counters << vpe_shift();
|
return counters << vpe_shift();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else /* !CONFIG_MIPS_MT_SMP */
|
||||||
|
#define vpe_id() 0
|
||||||
|
|
||||||
|
#endif /* CONFIG_MIPS_MT_SMP */
|
||||||
|
|
||||||
#define __define_perf_accessors(r, n, np) \
|
#define __define_perf_accessors(r, n, np) \
|
||||||
\
|
\
|
||||||
static inline unsigned int r_c0_ ## r ## n(void) \
|
static unsigned int r_c0_ ## r ## n(void) \
|
||||||
{ \
|
{ \
|
||||||
unsigned int cpu = vpe_id(); \
|
unsigned int cpu = vpe_id(); \
|
||||||
\
|
\
|
||||||
|
@ -94,7 +89,7 @@ static inline unsigned int r_c0_ ## r ## n(void) \
|
||||||
return 0; \
|
return 0; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
static inline void w_c0_ ## r ## n(unsigned int value) \
|
static void w_c0_ ## r ## n(unsigned int value) \
|
||||||
{ \
|
{ \
|
||||||
unsigned int cpu = vpe_id(); \
|
unsigned int cpu = vpe_id(); \
|
||||||
\
|
\
|
||||||
|
@ -121,7 +116,7 @@ __define_perf_accessors(perfctrl, 1, 3)
|
||||||
__define_perf_accessors(perfctrl, 2, 0)
|
__define_perf_accessors(perfctrl, 2, 0)
|
||||||
__define_perf_accessors(perfctrl, 3, 1)
|
__define_perf_accessors(perfctrl, 3, 1)
|
||||||
|
|
||||||
static inline int __n_counters(void)
|
static int __n_counters(void)
|
||||||
{
|
{
|
||||||
if (!(read_c0_config1() & M_CONFIG1_PC))
|
if (!(read_c0_config1() & M_CONFIG1_PC))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -135,7 +130,7 @@ static inline int __n_counters(void)
|
||||||
return 4;
|
return 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int n_counters(void)
|
static int n_counters(void)
|
||||||
{
|
{
|
||||||
int counters;
|
int counters;
|
||||||
|
|
||||||
|
@ -175,8 +170,7 @@ static void reset_counters(void *arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64
|
static u64 mipsxx_pmu_read_counter(unsigned int idx)
|
||||||
mipsxx_pmu_read_counter(unsigned int idx)
|
|
||||||
{
|
{
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -193,8 +187,7 @@ mipsxx_pmu_read_counter(unsigned int idx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
|
||||||
mipsxx_pmu_write_counter(unsigned int idx, u64 val)
|
|
||||||
{
|
{
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -212,8 +205,7 @@ mipsxx_pmu_write_counter(unsigned int idx, u64 val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int
|
static unsigned int mipsxx_pmu_read_control(unsigned int idx)
|
||||||
mipsxx_pmu_read_control(unsigned int idx)
|
|
||||||
{
|
{
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -230,8 +222,7 @@ mipsxx_pmu_read_control(unsigned int idx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
|
||||||
mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
|
|
||||||
{
|
{
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -511,9 +502,8 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_MIPS_MT_SMP
|
#ifdef CONFIG_MIPS_MT_SMP
|
||||||
static void
|
static void check_and_calc_range(struct perf_event *event,
|
||||||
check_and_calc_range(struct perf_event *event,
|
const struct mips_perf_event *pev)
|
||||||
const struct mips_perf_event *pev)
|
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
|
@ -536,9 +526,8 @@ check_and_calc_range(struct perf_event *event,
|
||||||
hwc->config_base |= M_TC_EN_ALL;
|
hwc->config_base |= M_TC_EN_ALL;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void
|
static void check_and_calc_range(struct perf_event *event,
|
||||||
check_and_calc_range(struct perf_event *event,
|
const struct mips_perf_event *pev)
|
||||||
const struct mips_perf_event *pev)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -733,8 +722,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
|
||||||
return handled;
|
return handled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t
|
static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
|
||||||
mipsxx_pmu_handle_irq(int irq, void *dev)
|
|
||||||
{
|
{
|
||||||
return mipsxx_pmu_handle_shared_irq();
|
return mipsxx_pmu_handle_shared_irq();
|
||||||
}
|
}
|
||||||
|
@ -766,9 +754,8 @@ static void mipsxx_pmu_stop(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
|
||||||
mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
|
struct hw_perf_event *hwc)
|
||||||
struct hw_perf_event *hwc)
|
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -797,8 +784,7 @@ mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
|
||||||
mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
|
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -816,8 +802,7 @@ mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void mipsxx_pmu_disable_event(int idx)
|
||||||
mipsxx_pmu_disable_event(int idx)
|
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -892,8 +877,7 @@ mipsxx_pmu_disable_event(int idx)
|
||||||
* then 128 needs to be added to 15 as the input for the event config,
|
* then 128 needs to be added to 15 as the input for the event config,
|
||||||
* i.e., 143 (0x8F) to be used.
|
* i.e., 143 (0x8F) to be used.
|
||||||
*/
|
*/
|
||||||
static const struct mips_perf_event *
|
static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
|
||||||
mipsxx_pmu_map_raw_event(u64 config)
|
|
||||||
{
|
{
|
||||||
unsigned int raw_id = config & 0xff;
|
unsigned int raw_id = config & 0xff;
|
||||||
unsigned int base_id = raw_id & 0x7f;
|
unsigned int base_id = raw_id & 0x7f;
|
||||||
|
|
Loading…
Reference in New Issue