coresight: tmc-etr: Refactor function tmc_etr_setup_perf_buf()
Refactoring function tmc_etr_setup_perf_buf() so that it only deals with the high level etr_perf_buffer, leaving the allocation of the backend buffer (i.e etr_buf) to another function. That way the backend buffer allocation function can decide if it wants to reuse an existing buffer (CPU-wide trace scenarios) or simply create a new one. Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org> Tested-by: Leo Yan <leo.yan@linaro.org> Tested-by: Robert Walker <robert.walker@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
a0f08a6a9f
commit
855ab61c16
1 changed files with 30 additions and 9 deletions
|
@ -1160,29 +1160,24 @@ out:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf.
|
* alloc_etr_buf: Allocate ETR buffer for use by perf.
|
||||||
* The size of the hardware buffer is dependent on the size configured
|
* The size of the hardware buffer is dependent on the size configured
|
||||||
* via sysfs and the perf ring buffer size. We prefer to allocate the
|
* via sysfs and the perf ring buffer size. We prefer to allocate the
|
||||||
* largest possible size, scaling down the size by half until it
|
* largest possible size, scaling down the size by half until it
|
||||||
* reaches a minimum limit (1M), beyond which we give up.
|
* reaches a minimum limit (1M), beyond which we give up.
|
||||||
*/
|
*/
|
||||||
static struct etr_perf_buffer *
|
static struct etr_buf *
|
||||||
tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
|
alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
|
||||||
int nr_pages, void **pages, bool snapshot)
|
int nr_pages, void **pages, bool snapshot)
|
||||||
{
|
{
|
||||||
int node, cpu = event->cpu;
|
int node, cpu = event->cpu;
|
||||||
struct etr_buf *etr_buf;
|
struct etr_buf *etr_buf;
|
||||||
struct etr_perf_buffer *etr_perf;
|
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
|
|
||||||
if (cpu == -1)
|
if (cpu == -1)
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
node = cpu_to_node(cpu);
|
node = cpu_to_node(cpu);
|
||||||
|
|
||||||
etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
|
|
||||||
if (!etr_perf)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to match the perf ring buffer size if it is larger
|
* Try to match the perf ring buffer size if it is larger
|
||||||
* than the size requested via sysfs.
|
* than the size requested via sysfs.
|
||||||
|
@ -1206,6 +1201,32 @@ tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
|
||||||
size /= 2;
|
size /= 2;
|
||||||
} while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
|
} while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
|
||||||
|
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
done:
|
||||||
|
return etr_buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct etr_perf_buffer *
|
||||||
|
tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
|
||||||
|
int nr_pages, void **pages, bool snapshot)
|
||||||
|
{
|
||||||
|
int node, cpu = event->cpu;
|
||||||
|
struct etr_buf *etr_buf;
|
||||||
|
struct etr_perf_buffer *etr_perf;
|
||||||
|
|
||||||
|
if (cpu == -1)
|
||||||
|
cpu = smp_processor_id();
|
||||||
|
node = cpu_to_node(cpu);
|
||||||
|
|
||||||
|
etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
|
||||||
|
if (!etr_perf)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
|
||||||
|
if (!IS_ERR(etr_buf))
|
||||||
|
goto done;
|
||||||
|
|
||||||
kfree(etr_perf);
|
kfree(etr_perf);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue