mirror of https://gitee.com/openkylin/linux.git
perf/x86/intel/uncore: Clean up hardware on exit
When tearing down the boxes nothing undoes the hardware state which was setup by box->init_box(). Add a box->exit_box() callback and implement it for the uncores which have an init_box() callback. This misses the cleanup in the error exit pathes, but I cannot be bothered to implement it before cleaning up the rest of the driver, which makes that task way simpler. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Harish Chegondi <harish.chegondi@intel.com> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20160222221011.023930023@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
83f8ebd2eb
commit
a46195f178
|
@ -937,6 +937,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
|||
raw_spin_lock(&uncore_box_lock);
|
||||
list_del(&box->list);
|
||||
raw_spin_unlock(&uncore_box_lock);
|
||||
uncore_box_exit(box);
|
||||
kfree(box);
|
||||
}
|
||||
return ret;
|
||||
|
@ -982,6 +983,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
|
||||
uncore_box_exit(box);
|
||||
kfree(box);
|
||||
|
||||
if (last_box)
|
||||
|
@ -1091,8 +1093,10 @@ static void uncore_cpu_dying(int cpu)
|
|||
pmu = &type->pmus[j];
|
||||
box = *per_cpu_ptr(pmu->box, cpu);
|
||||
*per_cpu_ptr(pmu->box, cpu) = NULL;
|
||||
if (box && atomic_dec_and_test(&box->refcnt))
|
||||
if (box && atomic_dec_and_test(&box->refcnt)) {
|
||||
list_add(&box->list, &boxes_to_free);
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ struct intel_uncore_type {
|
|||
|
||||
struct intel_uncore_ops {
|
||||
void (*init_box)(struct intel_uncore_box *);
|
||||
void (*exit_box)(struct intel_uncore_box *);
|
||||
void (*disable_box)(struct intel_uncore_box *);
|
||||
void (*enable_box)(struct intel_uncore_box *);
|
||||
void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
|
||||
|
@ -306,6 +307,14 @@ static inline void uncore_box_init(struct intel_uncore_box *box)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void uncore_box_exit(struct intel_uncore_box *box)
|
||||
{
|
||||
if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
|
||||
if (box->pmu->type->ops->exit_box)
|
||||
box->pmu->type->ops->exit_box(box);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
|
||||
{
|
||||
return (box->phys_id < 0);
|
||||
|
|
|
@ -201,6 +201,11 @@ static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
|
|||
wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
|
||||
}
|
||||
|
||||
static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box)
|
||||
{
|
||||
wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0);
|
||||
}
|
||||
|
||||
static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
|
||||
{
|
||||
unsigned msr = uncore_msr_box_ctl(box);
|
||||
|
@ -250,6 +255,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
|
|||
|
||||
#define NHMEX_UNCORE_OPS_COMMON_INIT() \
|
||||
.init_box = nhmex_uncore_msr_init_box, \
|
||||
.exit_box = nhmex_uncore_msr_exit_box, \
|
||||
.disable_box = nhmex_uncore_msr_disable_box, \
|
||||
.enable_box = nhmex_uncore_msr_enable_box, \
|
||||
.disable_event = nhmex_uncore_msr_disable_event, \
|
||||
|
|
|
@ -95,6 +95,12 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
|
|||
}
|
||||
}
|
||||
|
||||
static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->pmu->pmu_idx == 0)
|
||||
wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
|
||||
}
|
||||
|
||||
static struct uncore_event_desc snb_uncore_events[] = {
|
||||
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
|
||||
{ /* end: all zeroes */ },
|
||||
|
@ -116,6 +122,7 @@ static struct attribute_group snb_uncore_format_group = {
|
|||
|
||||
static struct intel_uncore_ops snb_uncore_msr_ops = {
|
||||
.init_box = snb_uncore_msr_init_box,
|
||||
.exit_box = snb_uncore_msr_exit_box,
|
||||
.disable_event = snb_uncore_msr_disable_event,
|
||||
.enable_event = snb_uncore_msr_enable_event,
|
||||
.read_counter = uncore_msr_read_counter,
|
||||
|
@ -231,6 +238,11 @@ static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
|
|||
box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
|
||||
}
|
||||
|
||||
static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
|
||||
{
|
||||
iounmap(box->io_addr);
|
||||
}
|
||||
|
||||
static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
|
||||
{}
|
||||
|
||||
|
@ -458,6 +470,7 @@ static struct pmu snb_uncore_imc_pmu = {
|
|||
|
||||
static struct intel_uncore_ops snb_uncore_imc_ops = {
|
||||
.init_box = snb_uncore_imc_init_box,
|
||||
.exit_box = snb_uncore_imc_exit_box,
|
||||
.enable_box = snb_uncore_imc_enable_box,
|
||||
.disable_box = snb_uncore_imc_disable_box,
|
||||
.disable_event = snb_uncore_imc_disable_event,
|
||||
|
|
Loading…
Reference in New Issue