mirror of https://gitee.com/openkylin/linux.git
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "This tree includes various fixes" Ingo really needs to improve on the whole "explain git pull" part. "Various fixes" indeed. * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/hwpb: Invoke __perf_event_disable() if interrupts are already disabled perf/x86: Enable Intel Cedarview Atom suppport perf_event: Switch to internal refcount, fix race with close() oprofile, s390: Fix uninitialized memory access when writing to oprofilefs perf/x86: Fix microcode revision check for SNB-PEBS
This commit is contained in:
commit
7ef6e97380
|
@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
|
|||
if (*offset)
|
||||
return -EINVAL;
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
if (val < oprofile_min_interval)
|
||||
oprofile_hw_interval = oprofile_min_interval;
|
||||
|
@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
|
|||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
if (val != 0)
|
||||
return -EINVAL;
|
||||
|
@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
|
|||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
|
|||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
|
|||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
|
|
@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void)
|
|||
break;
|
||||
|
||||
case 28: /* Atom */
|
||||
case 54: /* Cedariew */
|
||||
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
|
|
|
@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
|
|||
* to have an operational LBR which can freeze
|
||||
* on PMU interrupt
|
||||
*/
|
||||
if (boot_cpu_data.x86_mask < 10) {
|
||||
if (boot_cpu_data.x86_model == 28
|
||||
&& boot_cpu_data.x86_mask < 10) {
|
||||
pr_cont("LBR disabled due to erratum");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
|
|||
if (do_microcode_update(buf, len) == 0)
|
||||
ret = (ssize_t)len;
|
||||
|
||||
if (ret > 0)
|
||||
perf_check_microcode();
|
||||
|
||||
mutex_unlock(µcode_mutex);
|
||||
put_online_cpus();
|
||||
|
||||
|
|
|
@ -926,7 +926,7 @@ struct perf_event {
|
|||
struct hw_perf_event hw;
|
||||
|
||||
struct perf_event_context *ctx;
|
||||
struct file *filp;
|
||||
atomic_long_t refcount;
|
||||
|
||||
/*
|
||||
* These accumulate total time (in nanoseconds) that children
|
||||
|
@ -1296,6 +1296,7 @@ extern int perf_swevent_get_recursion_context(void);
|
|||
extern void perf_swevent_put_recursion_context(int rctx);
|
||||
extern void perf_event_enable(struct perf_event *event);
|
||||
extern void perf_event_disable(struct perf_event *event);
|
||||
extern int __perf_event_disable(void *info);
|
||||
extern void perf_event_task_tick(void);
|
||||
#else
|
||||
static inline void
|
||||
|
@ -1334,6 +1335,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; }
|
|||
static inline void perf_swevent_put_recursion_context(int rctx) { }
|
||||
static inline void perf_event_enable(struct perf_event *event) { }
|
||||
static inline void perf_event_disable(struct perf_event *event) { }
|
||||
static inline int __perf_event_disable(void *info) { return -1; }
|
||||
static inline void perf_event_task_tick(void) { }
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1253,7 +1253,7 @@ static void perf_remove_from_context(struct perf_event *event)
|
|||
/*
|
||||
* Cross CPU call to disable a performance event
|
||||
*/
|
||||
static int __perf_event_disable(void *info)
|
||||
int __perf_event_disable(void *info)
|
||||
{
|
||||
struct perf_event *event = info;
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
|
@ -2935,12 +2935,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
|
|||
/*
|
||||
* Called when the last reference to the file is gone.
|
||||
*/
|
||||
static int perf_release(struct inode *inode, struct file *file)
|
||||
static void put_event(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *event = file->private_data;
|
||||
struct task_struct *owner;
|
||||
|
||||
file->private_data = NULL;
|
||||
if (!atomic_long_dec_and_test(&event->refcount))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
owner = ACCESS_ONCE(event->owner);
|
||||
|
@ -2975,7 +2975,13 @@ static int perf_release(struct inode *inode, struct file *file)
|
|||
put_task_struct(owner);
|
||||
}
|
||||
|
||||
return perf_event_release_kernel(event);
|
||||
perf_event_release_kernel(event);
|
||||
}
|
||||
|
||||
static int perf_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
put_event(file->private_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
|
||||
|
@ -3227,7 +3233,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|||
|
||||
static const struct file_operations perf_fops;
|
||||
|
||||
static struct perf_event *perf_fget_light(int fd, int *fput_needed)
|
||||
static struct file *perf_fget_light(int fd, int *fput_needed)
|
||||
{
|
||||
struct file *file;
|
||||
|
||||
|
@ -3241,7 +3247,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
|
|||
return ERR_PTR(-EBADF);
|
||||
}
|
||||
|
||||
return file->private_data;
|
||||
return file;
|
||||
}
|
||||
|
||||
static int perf_event_set_output(struct perf_event *event,
|
||||
|
@ -3273,19 +3279,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
|
||||
case PERF_EVENT_IOC_SET_OUTPUT:
|
||||
{
|
||||
struct file *output_file = NULL;
|
||||
struct perf_event *output_event = NULL;
|
||||
int fput_needed = 0;
|
||||
int ret;
|
||||
|
||||
if (arg != -1) {
|
||||
output_event = perf_fget_light(arg, &fput_needed);
|
||||
if (IS_ERR(output_event))
|
||||
return PTR_ERR(output_event);
|
||||
output_file = perf_fget_light(arg, &fput_needed);
|
||||
if (IS_ERR(output_file))
|
||||
return PTR_ERR(output_file);
|
||||
output_event = output_file->private_data;
|
||||
}
|
||||
|
||||
ret = perf_event_set_output(event, output_event);
|
||||
if (output_event)
|
||||
fput_light(output_event->filp, fput_needed);
|
||||
fput_light(output_file, fput_needed);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -5950,6 +5958,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
|||
|
||||
mutex_init(&event->mmap_mutex);
|
||||
|
||||
atomic_long_set(&event->refcount, 1);
|
||||
event->cpu = cpu;
|
||||
event->attr = *attr;
|
||||
event->group_leader = group_leader;
|
||||
|
@ -6260,12 +6269,12 @@ SYSCALL_DEFINE5(perf_event_open,
|
|||
return event_fd;
|
||||
|
||||
if (group_fd != -1) {
|
||||
group_leader = perf_fget_light(group_fd, &fput_needed);
|
||||
if (IS_ERR(group_leader)) {
|
||||
err = PTR_ERR(group_leader);
|
||||
group_file = perf_fget_light(group_fd, &fput_needed);
|
||||
if (IS_ERR(group_file)) {
|
||||
err = PTR_ERR(group_file);
|
||||
goto err_fd;
|
||||
}
|
||||
group_file = group_leader->filp;
|
||||
group_leader = group_file->private_data;
|
||||
if (flags & PERF_FLAG_FD_OUTPUT)
|
||||
output_event = group_leader;
|
||||
if (flags & PERF_FLAG_FD_NO_GROUP)
|
||||
|
@ -6402,7 +6411,6 @@ SYSCALL_DEFINE5(perf_event_open,
|
|||
put_ctx(gctx);
|
||||
}
|
||||
|
||||
event->filp = event_file;
|
||||
WARN_ON_ONCE(ctx->parent_ctx);
|
||||
mutex_lock(&ctx->mutex);
|
||||
|
||||
|
@ -6496,7 +6504,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
|||
goto err_free;
|
||||
}
|
||||
|
||||
event->filp = NULL;
|
||||
WARN_ON_ONCE(ctx->parent_ctx);
|
||||
mutex_lock(&ctx->mutex);
|
||||
perf_install_in_context(ctx, event, cpu);
|
||||
|
@ -6578,7 +6585,7 @@ static void sync_child_event(struct perf_event *child_event,
|
|||
* Release the parent event, if this was the last
|
||||
* reference to it.
|
||||
*/
|
||||
fput(parent_event->filp);
|
||||
put_event(parent_event);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -6654,9 +6661,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|||
*
|
||||
* __perf_event_exit_task()
|
||||
* sync_child_event()
|
||||
* fput(parent_event->filp)
|
||||
* perf_release()
|
||||
* mutex_lock(&ctx->mutex)
|
||||
* put_event()
|
||||
* mutex_lock(&ctx->mutex)
|
||||
*
|
||||
* But since its the parent context it won't be the same instance.
|
||||
*/
|
||||
|
@ -6724,7 +6730,7 @@ static void perf_free_event(struct perf_event *event,
|
|||
list_del_init(&event->child_list);
|
||||
mutex_unlock(&parent->child_mutex);
|
||||
|
||||
fput(parent->filp);
|
||||
put_event(parent);
|
||||
|
||||
perf_group_detach(event);
|
||||
list_del_event(event, ctx);
|
||||
|
@ -6804,6 +6810,12 @@ inherit_event(struct perf_event *parent_event,
|
|||
NULL, NULL);
|
||||
if (IS_ERR(child_event))
|
||||
return child_event;
|
||||
|
||||
if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
|
||||
free_event(child_event);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
get_ctx(child_ctx);
|
||||
|
||||
/*
|
||||
|
@ -6844,14 +6856,6 @@ inherit_event(struct perf_event *parent_event,
|
|||
add_event_to_ctx(child_event, child_ctx);
|
||||
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
|
||||
|
||||
/*
|
||||
* Get a reference to the parent filp - we will fput it
|
||||
* when the child event exits. This is safe to do because
|
||||
* we are in the parent and we know that the filp still
|
||||
* exists and has a nonzero count:
|
||||
*/
|
||||
atomic_long_inc(&parent_event->filp->f_count);
|
||||
|
||||
/*
|
||||
* Link this into the parent event's child list
|
||||
*/
|
||||
|
|
|
@ -453,7 +453,16 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
|
|||
int old_type = bp->attr.bp_type;
|
||||
int err = 0;
|
||||
|
||||
perf_event_disable(bp);
|
||||
/*
|
||||
* modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
|
||||
* will not be possible to raise IPIs that invoke __perf_event_disable.
|
||||
* So call the function directly after making sure we are targeting the
|
||||
* current task.
|
||||
*/
|
||||
if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
|
||||
__perf_event_disable(bp);
|
||||
else
|
||||
perf_event_disable(bp);
|
||||
|
||||
bp->attr.bp_addr = attr->bp_addr;
|
||||
bp->attr.bp_type = attr->bp_type;
|
||||
|
|
Loading…
Reference in New Issue