Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Misc fixes: two Intel uncore driver fixes, a CPU-hotplug fix and a build dependencies fix" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel/uncore: Fix boot crash on SBOX PMU on Haswell-EP perf/x86/intel/uncore: Fix IRP uncore register offsets on Haswell EP perf: Fix corruption of sibling list with hotplug perf/x86: Fix embarrasing typo
This commit is contained in:
commit
13f5004c94
|
@ -144,7 +144,7 @@ config INSTRUCTION_DECODER
|
|||
|
||||
config PERF_EVENTS_INTEL_UNCORE
|
||||
def_bool y
|
||||
depends on PERF_EVENTS && SUP_SUP_INTEL && PCI
|
||||
depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
|
||||
|
||||
config OUTPUT_FORMAT
|
||||
string
|
||||
|
|
|
@ -486,14 +486,17 @@ static struct attribute_group snbep_uncore_qpi_format_group = {
|
|||
.attrs = snbep_uncore_qpi_formats_attr,
|
||||
};
|
||||
|
||||
#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
|
||||
.init_box = snbep_uncore_msr_init_box, \
|
||||
#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
|
||||
.disable_box = snbep_uncore_msr_disable_box, \
|
||||
.enable_box = snbep_uncore_msr_enable_box, \
|
||||
.disable_event = snbep_uncore_msr_disable_event, \
|
||||
.enable_event = snbep_uncore_msr_enable_event, \
|
||||
.read_counter = uncore_msr_read_counter
|
||||
|
||||
#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
|
||||
__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
|
||||
.init_box = snbep_uncore_msr_init_box \
|
||||
|
||||
static struct intel_uncore_ops snbep_uncore_msr_ops = {
|
||||
SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
|
||||
};
|
||||
|
@ -1919,6 +1922,30 @@ static struct intel_uncore_type hswep_uncore_cbox = {
|
|||
.format_group = &hswep_uncore_cbox_format_group,
|
||||
};
|
||||
|
||||
/*
|
||||
* Write SBOX Initialization register bit by bit to avoid spurious #GPs
|
||||
*/
|
||||
static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
|
||||
{
|
||||
unsigned msr = uncore_msr_box_ctl(box);
|
||||
|
||||
if (msr) {
|
||||
u64 init = SNBEP_PMON_BOX_CTL_INT;
|
||||
u64 flags = 0;
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, (unsigned long *)&init, 64) {
|
||||
flags |= (1ULL << i);
|
||||
wrmsrl(msr, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
|
||||
__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
|
||||
.init_box = hswep_uncore_sbox_msr_init_box
|
||||
};
|
||||
|
||||
static struct attribute *hswep_uncore_sbox_formats_attr[] = {
|
||||
&format_attr_event.attr,
|
||||
&format_attr_umask.attr,
|
||||
|
@ -1944,7 +1971,7 @@ static struct intel_uncore_type hswep_uncore_sbox = {
|
|||
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
|
||||
.box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
|
||||
.msr_offset = HSWEP_SBOX_MSR_OFFSET,
|
||||
.ops = &snbep_uncore_msr_ops,
|
||||
.ops = &hswep_uncore_sbox_msr_ops,
|
||||
.format_group = &hswep_uncore_sbox_format_group,
|
||||
};
|
||||
|
||||
|
@ -2025,13 +2052,27 @@ static struct intel_uncore_type hswep_uncore_imc = {
|
|||
SNBEP_UNCORE_PCI_COMMON_INIT(),
|
||||
};
|
||||
|
||||
static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
|
||||
|
||||
static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
||||
{
|
||||
struct pci_dev *pdev = box->pci_dev;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 count = 0;
|
||||
|
||||
pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
|
||||
pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops hswep_uncore_irp_ops = {
|
||||
.init_box = snbep_uncore_pci_init_box,
|
||||
.disable_box = snbep_uncore_pci_disable_box,
|
||||
.enable_box = snbep_uncore_pci_enable_box,
|
||||
.disable_event = ivbep_uncore_irp_disable_event,
|
||||
.enable_event = ivbep_uncore_irp_enable_event,
|
||||
.read_counter = ivbep_uncore_irp_read_counter,
|
||||
.read_counter = hswep_uncore_irp_read_counter,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type hswep_uncore_irp = {
|
||||
|
|
|
@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
|
|||
|
||||
if (!task) {
|
||||
/*
|
||||
* Per cpu events are removed via an smp call and
|
||||
* the removal is always successful.
|
||||
* Per cpu events are removed via an smp call. The removal can
|
||||
* fail if the CPU is currently offline, but in that case we
|
||||
* already called __perf_remove_from_context from
|
||||
* perf_event_exit_cpu.
|
||||
*/
|
||||
cpu_function_call(event->cpu, __perf_remove_from_context, &re);
|
||||
return;
|
||||
|
@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
|
|||
|
||||
static void __perf_event_exit_context(void *__info)
|
||||
{
|
||||
struct remove_event re = { .detach_group = false };
|
||||
struct remove_event re = { .detach_group = true };
|
||||
struct perf_event_context *ctx = __info;
|
||||
|
||||
perf_pmu_rotate_stop(ctx->pmu);
|
||||
|
|
Loading…
Reference in New Issue