mirror of https://gitee.com/openkylin/linux.git
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Six fixes for bugs that were found via fuzzing, and a trivial hw-enablement patch for AMD Family-17h CPU PMUs" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel/uncore: Allow only a single PMU/box within an events group perf/x86/intel: Cure bogus unwind from PEBS entries perf/x86: Restore TASK_SIZE check on frame pointer perf/core: Fix address filter parser perf/x86: Add perf support for AMD family-17h processors perf/x86/uncore: Fix crash by removing bogus event_list[] handling for SNB client uncore IMC perf/core: Do not set cpuctx->cgrp for unscheduled cgroups
This commit is contained in:
commit
ded9b5dd20
|
@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void)
|
||||||
pr_cont("Fam15h ");
|
pr_cont("Fam15h ");
|
||||||
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
|
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
|
||||||
break;
|
break;
|
||||||
|
case 0x17:
|
||||||
|
pr_cont("Fam17h ");
|
||||||
|
/*
|
||||||
|
* In family 17h, there are no event constraints in the PMC hardware.
|
||||||
|
* We fallback to using default amd_get_event_constraints.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
pr_err("core perfctr but no constraints; unknown hardware!\n");
|
pr_err("core perfctr but no constraints; unknown hardware!\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
|
@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
|
||||||
frame.next_frame = 0;
|
frame.next_frame = 0;
|
||||||
frame.return_address = 0;
|
frame.return_address = 0;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_READ, fp, 8))
|
if (!valid_user_frame(fp, sizeof(frame)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
|
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
|
||||||
|
@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
|
||||||
if (bytes != 0)
|
if (bytes != 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!valid_user_frame(fp, sizeof(frame)))
|
|
||||||
break;
|
|
||||||
|
|
||||||
perf_callchain_store(entry, cs_base + frame.return_address);
|
perf_callchain_store(entry, cs_base + frame.return_address);
|
||||||
fp = compat_ptr(ss_base + frame.next_frame);
|
fp = compat_ptr(ss_base + frame.next_frame);
|
||||||
}
|
}
|
||||||
|
@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
|
||||||
frame.next_frame = NULL;
|
frame.next_frame = NULL;
|
||||||
frame.return_address = 0;
|
frame.return_address = 0;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
|
if (!valid_user_frame(fp, sizeof(frame)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
|
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
|
||||||
|
@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
|
||||||
if (bytes != 0)
|
if (bytes != 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!valid_user_frame(fp, sizeof(frame)))
|
|
||||||
break;
|
|
||||||
|
|
||||||
perf_callchain_store(entry, frame.return_address);
|
perf_callchain_store(entry, frame.return_address);
|
||||||
fp = (void __user *)frame.next_frame;
|
fp = (void __user *)frame.next_frame;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use the interrupt regs as a base because the PEBS record
|
* We use the interrupt regs as a base because the PEBS record does not
|
||||||
* does not contain a full regs set, specifically it seems to
|
* contain a full regs set, specifically it seems to lack segment
|
||||||
* lack segment descriptors, which get used by things like
|
* descriptors, which get used by things like user_mode().
|
||||||
* user_mode().
|
|
||||||
*
|
*
|
||||||
* In the simple case fix up only the IP and BP,SP regs, for
|
* In the simple case fix up only the IP for PERF_SAMPLE_IP.
|
||||||
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
|
*
|
||||||
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
|
* We must however always use BP,SP from iregs for the unwinder to stay
|
||||||
|
* sane; the record BP,SP can point into thin air when the record is
|
||||||
|
* from a previous PMI context or an (I)RET happend between the record
|
||||||
|
* and PMI.
|
||||||
*/
|
*/
|
||||||
*regs = *iregs;
|
*regs = *iregs;
|
||||||
regs->flags = pebs->flags;
|
regs->flags = pebs->flags;
|
||||||
set_linear_ip(regs, pebs->ip);
|
set_linear_ip(regs, pebs->ip);
|
||||||
regs->bp = pebs->bp;
|
|
||||||
regs->sp = pebs->sp;
|
|
||||||
|
|
||||||
if (sample_type & PERF_SAMPLE_REGS_INTR) {
|
if (sample_type & PERF_SAMPLE_REGS_INTR) {
|
||||||
regs->ax = pebs->ax;
|
regs->ax = pebs->ax;
|
||||||
|
@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
|
||||||
regs->dx = pebs->dx;
|
regs->dx = pebs->dx;
|
||||||
regs->si = pebs->si;
|
regs->si = pebs->si;
|
||||||
regs->di = pebs->di;
|
regs->di = pebs->di;
|
||||||
regs->bp = pebs->bp;
|
|
||||||
regs->sp = pebs->sp;
|
|
||||||
|
|
||||||
regs->flags = pebs->flags;
|
/*
|
||||||
|
* Per the above; only set BP,SP if we don't need callchains.
|
||||||
|
*
|
||||||
|
* XXX: does this make sense?
|
||||||
|
*/
|
||||||
|
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
|
||||||
|
regs->bp = pebs->bp;
|
||||||
|
regs->sp = pebs->sp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preserve PERF_EFLAGS_VM from set_linear_ip().
|
||||||
|
*/
|
||||||
|
regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
|
||||||
#ifndef CONFIG_X86_32
|
#ifndef CONFIG_X86_32
|
||||||
regs->r8 = pebs->r8;
|
regs->r8 = pebs->r8;
|
||||||
regs->r9 = pebs->r9;
|
regs->r9 = pebs->r9;
|
||||||
|
|
|
@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
|
||||||
*/
|
*/
|
||||||
static int uncore_pmu_event_init(struct perf_event *event);
|
static int uncore_pmu_event_init(struct perf_event *event);
|
||||||
|
|
||||||
static bool is_uncore_event(struct perf_event *event)
|
static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
|
||||||
{
|
{
|
||||||
return event->pmu->event_init == uncore_pmu_event_init;
|
return &box->pmu->pmu == event->pmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
|
||||||
|
|
||||||
n = box->n_events;
|
n = box->n_events;
|
||||||
|
|
||||||
if (is_uncore_event(leader)) {
|
if (is_box_event(box, leader)) {
|
||||||
box->event_list[n] = leader;
|
box->event_list[n] = leader;
|
||||||
n++;
|
n++;
|
||||||
}
|
}
|
||||||
|
@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
|
||||||
return n;
|
return n;
|
||||||
|
|
||||||
list_for_each_entry(event, &leader->sibling_list, group_entry) {
|
list_for_each_entry(event, &leader->sibling_list, group_entry) {
|
||||||
if (!is_uncore_event(event) ||
|
if (!is_box_event(box, event) ||
|
||||||
event->state <= PERF_EVENT_STATE_OFF)
|
event->state <= PERF_EVENT_STATE_OFF)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
@ -490,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
|
||||||
|
|
||||||
snb_uncore_imc_event_start(event, 0);
|
snb_uncore_imc_event_start(event, 0);
|
||||||
|
|
||||||
box->n_events++;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
|
static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
struct intel_uncore_box *box = uncore_event_to_box(event);
|
|
||||||
int i;
|
|
||||||
|
|
||||||
snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
|
snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
|
||||||
|
|
||||||
for (i = 0; i < box->n_events; i++) {
|
|
||||||
if (event == box->event_list[i]) {
|
|
||||||
--box->n_events;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int snb_pci2phy_map_init(int devid)
|
int snb_pci2phy_map_init(int devid)
|
||||||
|
|
|
@ -113,7 +113,7 @@ struct debug_store {
|
||||||
* Per register state.
|
* Per register state.
|
||||||
*/
|
*/
|
||||||
struct er_account {
|
struct er_account {
|
||||||
raw_spinlock_t lock; /* per-core: protect structure */
|
raw_spinlock_t lock; /* per-core: protect structure */
|
||||||
u64 config; /* extra MSR config */
|
u64 config; /* extra MSR config */
|
||||||
u64 reg; /* extra MSR number */
|
u64 reg; /* extra MSR number */
|
||||||
atomic_t ref; /* reference count */
|
atomic_t ref; /* reference count */
|
||||||
|
|
|
@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event,
|
||||||
* this will always be called from the right CPU.
|
* this will always be called from the right CPU.
|
||||||
*/
|
*/
|
||||||
cpuctx = __get_cpu_context(ctx);
|
cpuctx = __get_cpu_context(ctx);
|
||||||
|
|
||||||
|
/* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
|
||||||
|
if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
|
||||||
|
/*
|
||||||
|
* We are removing the last cpu event in this context.
|
||||||
|
* If that event is not active in this cpu, cpuctx->cgrp
|
||||||
|
* should've been cleared by perf_cgroup_switch.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(!add && cpuctx->cgrp);
|
||||||
|
return;
|
||||||
|
}
|
||||||
cpuctx->cgrp = add ? event->cgrp : NULL;
|
cpuctx->cgrp = add ? event->cgrp : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8018,6 +8029,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
|
||||||
* if <size> is not specified, the range is treated as a single address.
|
* if <size> is not specified, the range is treated as a single address.
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
|
IF_ACT_NONE = -1,
|
||||||
IF_ACT_FILTER,
|
IF_ACT_FILTER,
|
||||||
IF_ACT_START,
|
IF_ACT_START,
|
||||||
IF_ACT_STOP,
|
IF_ACT_STOP,
|
||||||
|
@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = {
|
||||||
{ IF_SRC_KERNEL, "%u/%u" },
|
{ IF_SRC_KERNEL, "%u/%u" },
|
||||||
{ IF_SRC_FILEADDR, "%u@%s" },
|
{ IF_SRC_FILEADDR, "%u@%s" },
|
||||||
{ IF_SRC_KERNELADDR, "%u" },
|
{ IF_SRC_KERNELADDR, "%u" },
|
||||||
|
{ IF_ACT_NONE, NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue