mirror of https://gitee.com/openkylin/linux.git
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Misc fixes: PMU driver corner cases, tooling fixes, and an 'AUX' (Intel PT) race related core fix" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel/cqm: Do not access cpu_data() from CPU_UP_PREPARE handler perf/x86/intel: Fix memory leak on hot-plug allocation fail perf: Fix PERF_EVENT_IOC_PERIOD migration race perf: Fix double-free of the AUX buffer perf: Fix fasync handling on inherited events perf tools: Fix test build error when bindir contains double slash perf stat: Fix transaction lenght metrics perf: Fix running time accounting
This commit is contained in:
commit
b25c6cee55
|
@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
|
|||
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
|
||||
cpuc->shared_regs = allocate_shared_regs(cpu);
|
||||
if (!cpuc->shared_regs)
|
||||
return NOTIFY_BAD;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
||||
|
@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
|
|||
|
||||
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
|
||||
if (!cpuc->constraint_list)
|
||||
return NOTIFY_BAD;
|
||||
goto err_shared_regs;
|
||||
|
||||
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
|
||||
if (!cpuc->excl_cntrs) {
|
||||
kfree(cpuc->constraint_list);
|
||||
kfree(cpuc->shared_regs);
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
if (!cpuc->excl_cntrs)
|
||||
goto err_constraint_list;
|
||||
|
||||
cpuc->excl_thread_id = 0;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
||||
err_constraint_list:
|
||||
kfree(cpuc->constraint_list);
|
||||
cpuc->constraint_list = NULL;
|
||||
|
||||
err_shared_regs:
|
||||
kfree(cpuc->shared_regs);
|
||||
cpuc->shared_regs = NULL;
|
||||
|
||||
err:
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_starting(int cpu)
|
||||
|
|
|
@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
|
|||
cpumask_set_cpu(cpu, &cqm_cpumask);
|
||||
}
|
||||
|
||||
static void intel_cqm_cpu_prepare(unsigned int cpu)
|
||||
static void intel_cqm_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
|
|||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_UP_PREPARE:
|
||||
intel_cqm_cpu_prepare(cpu);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
intel_cqm_cpu_exit(cpu);
|
||||
break;
|
||||
case CPU_STARTING:
|
||||
intel_cqm_cpu_starting(cpu);
|
||||
cqm_pick_event_reader(cpu);
|
||||
break;
|
||||
}
|
||||
|
@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
|
|||
goto out;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
intel_cqm_cpu_prepare(i);
|
||||
intel_cqm_cpu_starting(i);
|
||||
cqm_pick_event_reader(i);
|
||||
}
|
||||
|
||||
|
|
|
@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
|
|||
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
event->tstamp_running += tstamp - event->tstamp_stopped;
|
||||
|
||||
perf_set_shadow_time(event, ctx, tstamp);
|
||||
|
||||
perf_log_itrace_start(event);
|
||||
|
@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
|
|||
goto out;
|
||||
}
|
||||
|
||||
event->tstamp_running += tstamp - event->tstamp_stopped;
|
||||
|
||||
if (!is_software_event(event))
|
||||
cpuctx->active_oncpu++;
|
||||
if (!ctx->nr_active++)
|
||||
|
@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
|
|||
perf_event_for_each_child(sibling, func);
|
||||
}
|
||||
|
||||
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
int ret = 0, active;
|
||||
struct period_event {
|
||||
struct perf_event *event;
|
||||
u64 value;
|
||||
};
|
||||
|
||||
if (!is_sampling_event(event))
|
||||
return -EINVAL;
|
||||
static int __perf_event_period(void *info)
|
||||
{
|
||||
struct period_event *pe = info;
|
||||
struct perf_event *event = pe->event;
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
u64 value = pe->value;
|
||||
bool active;
|
||||
|
||||
if (copy_from_user(&value, arg, sizeof(value)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
if (event->attr.freq) {
|
||||
if (value > sysctl_perf_event_sample_rate) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
event->attr.sample_freq = value;
|
||||
} else {
|
||||
event->attr.sample_period = value;
|
||||
|
@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|||
event->pmu->start(event, PERF_EF_RELOAD);
|
||||
perf_pmu_enable(ctx->pmu);
|
||||
}
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
|
||||
unlock:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||
{
|
||||
struct period_event pe = { .event = event, };
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
struct task_struct *task;
|
||||
u64 value;
|
||||
|
||||
if (!is_sampling_event(event))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&value, arg, sizeof(value)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
||||
return -EINVAL;
|
||||
|
||||
task = ctx->task;
|
||||
pe.value = value;
|
||||
|
||||
if (!task) {
|
||||
cpu_function_call(event->cpu, __perf_event_period, &pe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
retry:
|
||||
if (!task_function_call(task, __perf_event_period, &pe))
|
||||
return 0;
|
||||
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
if (ctx->is_active) {
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
task = ctx->task;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
__perf_event_period(&pe);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations perf_fops;
|
||||
|
@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
|
|||
* to user-space before waking everybody up.
|
||||
*/
|
||||
|
||||
static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
|
||||
{
|
||||
/* only the parent has fasync state */
|
||||
if (event->parent)
|
||||
event = event->parent;
|
||||
return &event->fasync;
|
||||
}
|
||||
|
||||
void perf_event_wakeup(struct perf_event *event)
|
||||
{
|
||||
ring_buffer_wakeup(event);
|
||||
|
||||
if (event->pending_kill) {
|
||||
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
|
||||
kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
|
||||
event->pending_kill = 0;
|
||||
}
|
||||
}
|
||||
|
@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
|||
else
|
||||
perf_event_output(event, data, regs);
|
||||
|
||||
if (event->fasync && event->pending_kill) {
|
||||
if (*perf_event_fasync(event) && event->pending_kill) {
|
||||
event->pending_wakeup = 1;
|
||||
irq_work_queue(&event->pending);
|
||||
}
|
||||
|
|
|
@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
|
|||
rb->aux_priv = NULL;
|
||||
}
|
||||
|
||||
for (pg = 0; pg < rb->aux_nr_pages; pg++)
|
||||
rb_free_aux_page(rb, pg);
|
||||
if (rb->aux_nr_pages) {
|
||||
for (pg = 0; pg < rb->aux_nr_pages; pg++)
|
||||
rb_free_aux_page(rb, pg);
|
||||
|
||||
kfree(rb->aux_pages);
|
||||
rb->aux_nr_pages = 0;
|
||||
kfree(rb->aux_pages);
|
||||
rb->aux_nr_pages = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void rb_free_aux(struct ring_buffer *rb)
|
||||
|
|
|
@ -638,7 +638,7 @@ ifndef DESTDIR
|
|||
prefix ?= $(HOME)
|
||||
endif
|
||||
bindir_relative = bin
|
||||
bindir = $(prefix)/$(bindir_relative)
|
||||
bindir = $(abspath $(prefix)/$(bindir_relative))
|
||||
mandir = share/man
|
||||
infodir = share/info
|
||||
perfexecdir = libexec/perf-core
|
||||
|
|
|
@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
|
|||
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
|
||||
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
|
||||
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
|
||||
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
|
||||
update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
|
||||
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
|
||||
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
|
||||
else if (perf_stat_evsel__is(counter, ELISION_START))
|
||||
|
@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
|
|||
" # %5.2f%% aborted cycles ",
|
||||
100.0 * ((total2-avg) / total));
|
||||
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
|
||||
avg > 0 &&
|
||||
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
|
||||
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
|
||||
|
||||
if (total)
|
||||
if (avg)
|
||||
ratio = total / avg;
|
||||
|
||||
fprintf(out, " # %8.0f cycles / transaction ", ratio);
|
||||
} else if (perf_stat_evsel__is(evsel, ELISION_START) &&
|
||||
avg > 0 &&
|
||||
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
|
||||
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
|
||||
|
||||
if (total)
|
||||
if (avg)
|
||||
ratio = total / avg;
|
||||
|
||||
fprintf(out, " # %8.0f cycles / elision ", ratio);
|
||||
|
|
Loading…
Reference in New Issue