perf: Fix PERF_EVENT_IOC_PERIOD migration race
I ran the perf fuzzer, which triggered some WARN()s which are due to
trying to stop/restart an event on the wrong CPU.
Use the normal IPI pattern to ensure we run the code on the correct CPU.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: bad7192b84
("perf: Fix PERF_EVENT_IOC_PERIOD to force-reset the period")
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ee9397a6fb
commit
c7999c6f3f
1 changed files with 55 additions and 20 deletions
|
@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
|
||||||
perf_event_for_each_child(sibling, func);
|
perf_event_for_each_child(sibling, func);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
struct period_event {
|
||||||
{
|
struct perf_event *event;
|
||||||
struct perf_event_context *ctx = event->ctx;
|
|
||||||
int ret = 0, active;
|
|
||||||
u64 value;
|
u64 value;
|
||||||
|
};
|
||||||
|
|
||||||
if (!is_sampling_event(event))
|
static int __perf_event_period(void *info)
|
||||||
return -EINVAL;
|
{
|
||||||
|
struct period_event *pe = info;
|
||||||
|
struct perf_event *event = pe->event;
|
||||||
|
struct perf_event_context *ctx = event->ctx;
|
||||||
|
u64 value = pe->value;
|
||||||
|
bool active;
|
||||||
|
|
||||||
if (copy_from_user(&value, arg, sizeof(value)))
|
raw_spin_lock(&ctx->lock);
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
if (!value)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
raw_spin_lock_irq(&ctx->lock);
|
|
||||||
if (event->attr.freq) {
|
if (event->attr.freq) {
|
||||||
if (value > sysctl_perf_event_sample_rate) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
event->attr.sample_freq = value;
|
event->attr.sample_freq = value;
|
||||||
} else {
|
} else {
|
||||||
event->attr.sample_period = value;
|
event->attr.sample_period = value;
|
||||||
|
@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||||
event->pmu->start(event, PERF_EF_RELOAD);
|
event->pmu->start(event, PERF_EF_RELOAD);
|
||||||
perf_pmu_enable(ctx->pmu);
|
perf_pmu_enable(ctx->pmu);
|
||||||
}
|
}
|
||||||
|
raw_spin_unlock(&ctx->lock);
|
||||||
|
|
||||||
unlock:
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||||
|
{
|
||||||
|
struct period_event pe = { .event = event, };
|
||||||
|
struct perf_event_context *ctx = event->ctx;
|
||||||
|
struct task_struct *task;
|
||||||
|
u64 value;
|
||||||
|
|
||||||
|
if (!is_sampling_event(event))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (copy_from_user(&value, arg, sizeof(value)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (!value)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
task = ctx->task;
|
||||||
|
pe.value = value;
|
||||||
|
|
||||||
|
if (!task) {
|
||||||
|
cpu_function_call(event->cpu, __perf_event_period, &pe);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
retry:
|
||||||
|
if (!task_function_call(task, __perf_event_period, &pe))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
raw_spin_lock_irq(&ctx->lock);
|
||||||
|
if (ctx->is_active) {
|
||||||
|
raw_spin_unlock_irq(&ctx->lock);
|
||||||
|
task = ctx->task;
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
|
||||||
|
__perf_event_period(&pe);
|
||||||
raw_spin_unlock_irq(&ctx->lock);
|
raw_spin_unlock_irq(&ctx->lock);
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations perf_fops;
|
static const struct file_operations perf_fops;
|
||||||
|
|
Loading…
Add table
Reference in a new issue