mirror of https://gitee.com/openkylin/linux.git
perf/x86/intel/uncore: Support MMIO type uncore blocks
A new MMIO type uncore box is introduced on Snow Ridge server. The counters of MMIO type uncore box can only be accessed by MMIO. Add a new uncore type, uncore_mmio_uncores, for MMIO type uncore blocks. Support MMIO type uncore blocks in CPU hot plug. The MMIO space has to be map/unmap for the first/last CPU. The context also need to be migrated if the bind CPU changes. Add mmio_init() to init and register PMUs for MMIO type uncore blocks. Add a helper to calculate the box_ctl address. The helpers which calculate ctl/ctr can be shared with PCI type uncore blocks. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: acme@kernel.org Cc: eranian@google.com Link: https://lkml.kernel.org/r/1556672028-119221-5-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c8872d90e0
commit
3da04b8a00
|
@ -8,6 +8,7 @@
|
||||||
static struct intel_uncore_type *empty_uncore[] = { NULL, };
|
static struct intel_uncore_type *empty_uncore[] = { NULL, };
|
||||||
struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
|
struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
|
||||||
struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
|
struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
|
||||||
|
struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
|
||||||
|
|
||||||
static bool pcidrv_registered;
|
static bool pcidrv_registered;
|
||||||
struct pci_driver *uncore_pci_driver;
|
struct pci_driver *uncore_pci_driver;
|
||||||
|
@ -1178,12 +1179,14 @@ static int uncore_event_cpu_offline(unsigned int cpu)
|
||||||
target = -1;
|
target = -1;
|
||||||
|
|
||||||
uncore_change_context(uncore_msr_uncores, cpu, target);
|
uncore_change_context(uncore_msr_uncores, cpu, target);
|
||||||
|
uncore_change_context(uncore_mmio_uncores, cpu, target);
|
||||||
uncore_change_context(uncore_pci_uncores, cpu, target);
|
uncore_change_context(uncore_pci_uncores, cpu, target);
|
||||||
|
|
||||||
unref:
|
unref:
|
||||||
/* Clear the references */
|
/* Clear the references */
|
||||||
die = topology_logical_die_id(cpu);
|
die = topology_logical_die_id(cpu);
|
||||||
uncore_box_unref(uncore_msr_uncores, die);
|
uncore_box_unref(uncore_msr_uncores, die);
|
||||||
|
uncore_box_unref(uncore_mmio_uncores, die);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1252,12 +1255,13 @@ static int uncore_box_ref(struct intel_uncore_type **types,
|
||||||
|
|
||||||
static int uncore_event_cpu_online(unsigned int cpu)
|
static int uncore_event_cpu_online(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int ret, die, target;
|
int die, target, msr_ret, mmio_ret;
|
||||||
|
|
||||||
die = topology_logical_die_id(cpu);
|
die = topology_logical_die_id(cpu);
|
||||||
ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
|
msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
|
||||||
if (ret)
|
mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
|
||||||
return ret;
|
if (msr_ret && mmio_ret)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if there is an online cpu in the package
|
* Check if there is an online cpu in the package
|
||||||
|
@ -1269,7 +1273,10 @@ static int uncore_event_cpu_online(unsigned int cpu)
|
||||||
|
|
||||||
cpumask_set_cpu(cpu, &uncore_cpu_mask);
|
cpumask_set_cpu(cpu, &uncore_cpu_mask);
|
||||||
|
|
||||||
uncore_change_context(uncore_msr_uncores, -1, cpu);
|
if (!msr_ret)
|
||||||
|
uncore_change_context(uncore_msr_uncores, -1, cpu);
|
||||||
|
if (!mmio_ret)
|
||||||
|
uncore_change_context(uncore_mmio_uncores, -1, cpu);
|
||||||
uncore_change_context(uncore_pci_uncores, -1, cpu);
|
uncore_change_context(uncore_pci_uncores, -1, cpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1317,12 +1324,35 @@ static int __init uncore_cpu_init(void)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init uncore_mmio_init(void)
|
||||||
|
{
|
||||||
|
struct intel_uncore_type **types = uncore_mmio_uncores;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = uncore_types_init(types, true);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
for (; *types; types++) {
|
||||||
|
ret = type_pmu_register(*types);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
err:
|
||||||
|
uncore_types_exit(uncore_mmio_uncores);
|
||||||
|
uncore_mmio_uncores = empty_uncore;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#define X86_UNCORE_MODEL_MATCH(model, init) \
|
#define X86_UNCORE_MODEL_MATCH(model, init) \
|
||||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
|
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
|
||||||
|
|
||||||
struct intel_uncore_init_fun {
|
struct intel_uncore_init_fun {
|
||||||
void (*cpu_init)(void);
|
void (*cpu_init)(void);
|
||||||
int (*pci_init)(void);
|
int (*pci_init)(void);
|
||||||
|
void (*mmio_init)(void);
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
|
static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
|
||||||
|
@ -1437,7 +1467,7 @@ static int __init intel_uncore_init(void)
|
||||||
{
|
{
|
||||||
const struct x86_cpu_id *id;
|
const struct x86_cpu_id *id;
|
||||||
struct intel_uncore_init_fun *uncore_init;
|
struct intel_uncore_init_fun *uncore_init;
|
||||||
int pret = 0, cret = 0, ret;
|
int pret = 0, cret = 0, mret = 0, ret;
|
||||||
|
|
||||||
id = x86_match_cpu(intel_uncore_match);
|
id = x86_match_cpu(intel_uncore_match);
|
||||||
if (!id)
|
if (!id)
|
||||||
|
@ -1460,7 +1490,12 @@ static int __init intel_uncore_init(void)
|
||||||
cret = uncore_cpu_init();
|
cret = uncore_cpu_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cret && pret)
|
if (uncore_init->mmio_init) {
|
||||||
|
uncore_init->mmio_init();
|
||||||
|
mret = uncore_mmio_init();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cret && pret && mret)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/* Install hotplug callbacks to setup the targets for each package */
|
/* Install hotplug callbacks to setup the targets for each package */
|
||||||
|
@ -1474,6 +1509,7 @@ static int __init intel_uncore_init(void)
|
||||||
|
|
||||||
err:
|
err:
|
||||||
uncore_types_exit(uncore_msr_uncores);
|
uncore_types_exit(uncore_msr_uncores);
|
||||||
|
uncore_types_exit(uncore_mmio_uncores);
|
||||||
uncore_pci_exit();
|
uncore_pci_exit();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1483,6 +1519,7 @@ static void __exit intel_uncore_exit(void)
|
||||||
{
|
{
|
||||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
||||||
uncore_types_exit(uncore_msr_uncores);
|
uncore_types_exit(uncore_msr_uncores);
|
||||||
|
uncore_types_exit(uncore_mmio_uncores);
|
||||||
uncore_pci_exit();
|
uncore_pci_exit();
|
||||||
}
|
}
|
||||||
module_exit(intel_uncore_exit);
|
module_exit(intel_uncore_exit);
|
||||||
|
|
|
@ -56,7 +56,10 @@ struct intel_uncore_type {
|
||||||
unsigned fixed_ctr;
|
unsigned fixed_ctr;
|
||||||
unsigned fixed_ctl;
|
unsigned fixed_ctl;
|
||||||
unsigned box_ctl;
|
unsigned box_ctl;
|
||||||
unsigned msr_offset;
|
union {
|
||||||
|
unsigned msr_offset;
|
||||||
|
unsigned mmio_offset;
|
||||||
|
};
|
||||||
unsigned num_shared_regs:8;
|
unsigned num_shared_regs:8;
|
||||||
unsigned single_fixed:1;
|
unsigned single_fixed:1;
|
||||||
unsigned pair_ctr_ctl:1;
|
unsigned pair_ctr_ctl:1;
|
||||||
|
@ -190,6 +193,13 @@ static inline bool uncore_pmc_freerunning(int idx)
|
||||||
return idx == UNCORE_PMC_IDX_FREERUNNING;
|
return idx == UNCORE_PMC_IDX_FREERUNNING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline
|
||||||
|
unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
|
||||||
|
{
|
||||||
|
return box->pmu->type->box_ctl +
|
||||||
|
box->pmu->type->mmio_offset * box->pmu->pmu_idx;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
|
static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
|
||||||
{
|
{
|
||||||
return box->pmu->type->box_ctl;
|
return box->pmu->type->box_ctl;
|
||||||
|
@ -330,7 +340,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
|
||||||
static inline
|
static inline
|
||||||
unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
|
unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
|
||||||
{
|
{
|
||||||
if (box->pci_dev)
|
if (box->pci_dev || box->io_addr)
|
||||||
return uncore_pci_fixed_ctl(box);
|
return uncore_pci_fixed_ctl(box);
|
||||||
else
|
else
|
||||||
return uncore_msr_fixed_ctl(box);
|
return uncore_msr_fixed_ctl(box);
|
||||||
|
@ -339,7 +349,7 @@ unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
|
||||||
static inline
|
static inline
|
||||||
unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
|
unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
|
||||||
{
|
{
|
||||||
if (box->pci_dev)
|
if (box->pci_dev || box->io_addr)
|
||||||
return uncore_pci_fixed_ctr(box);
|
return uncore_pci_fixed_ctr(box);
|
||||||
else
|
else
|
||||||
return uncore_msr_fixed_ctr(box);
|
return uncore_msr_fixed_ctr(box);
|
||||||
|
@ -348,7 +358,7 @@ unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
|
||||||
static inline
|
static inline
|
||||||
unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
|
unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
|
||||||
{
|
{
|
||||||
if (box->pci_dev)
|
if (box->pci_dev || box->io_addr)
|
||||||
return uncore_pci_event_ctl(box, idx);
|
return uncore_pci_event_ctl(box, idx);
|
||||||
else
|
else
|
||||||
return uncore_msr_event_ctl(box, idx);
|
return uncore_msr_event_ctl(box, idx);
|
||||||
|
@ -357,7 +367,7 @@ unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
|
||||||
static inline
|
static inline
|
||||||
unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
|
unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
|
||||||
{
|
{
|
||||||
if (box->pci_dev)
|
if (box->pci_dev || box->io_addr)
|
||||||
return uncore_pci_perf_ctr(box, idx);
|
return uncore_pci_perf_ctr(box, idx);
|
||||||
else
|
else
|
||||||
return uncore_msr_perf_ctr(box, idx);
|
return uncore_msr_perf_ctr(box, idx);
|
||||||
|
@ -507,6 +517,7 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
|
||||||
|
|
||||||
extern struct intel_uncore_type **uncore_msr_uncores;
|
extern struct intel_uncore_type **uncore_msr_uncores;
|
||||||
extern struct intel_uncore_type **uncore_pci_uncores;
|
extern struct intel_uncore_type **uncore_pci_uncores;
|
||||||
|
extern struct intel_uncore_type **uncore_mmio_uncores;
|
||||||
extern struct pci_driver *uncore_pci_driver;
|
extern struct pci_driver *uncore_pci_driver;
|
||||||
extern raw_spinlock_t pci2phy_map_lock;
|
extern raw_spinlock_t pci2phy_map_lock;
|
||||||
extern struct list_head pci2phy_map_head;
|
extern struct list_head pci2phy_map_head;
|
||||||
|
|
Loading…
Reference in New Issue