summaryrefslogtreecommitdiff
path: root/drivers/perf/arm_pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/perf/arm_pmu.c')
-rw-r--r--drivers/perf/arm_pmu.c36
1 files changed, 10 insertions, 26 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 3f07df5a7e95..9b593f985805 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -514,9 +514,6 @@ static int armpmu_event_init(struct perf_event *event)
if (has_branch_stack(event))
return -EOPNOTSUPP;
- if (armpmu->map_event(event) == -ENOENT)
- return -ENOENT;
-
return __hw_perf_event_init(event);
}
@@ -550,15 +547,14 @@ static void armpmu_disable(struct pmu *pmu)
* microarchitecture, and aren't suitable for another. Thus, only match CPUs of
* the same microarchitecture.
*/
-static int armpmu_filter_match(struct perf_event *event)
+static bool armpmu_filter(struct pmu *pmu, int cpu)
{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- unsigned int cpu = smp_processor_id();
- int ret;
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
+ bool ret;
ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
- if (ret && armpmu->filter_match)
- return armpmu->filter_match(event);
+ if (ret && armpmu->filter)
+ return armpmu->filter(pmu, cpu);
return ret;
}
@@ -861,16 +857,16 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
&cpu_pmu->node);
}
-static struct arm_pmu *__armpmu_alloc(gfp_t flags)
+struct arm_pmu *armpmu_alloc(void)
{
struct arm_pmu *pmu;
int cpu;
- pmu = kzalloc(sizeof(*pmu), flags);
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
if (!pmu)
goto out;
- pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
+ pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL);
if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu;
@@ -885,14 +881,13 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
.start = armpmu_start,
.stop = armpmu_stop,
.read = armpmu_read,
- .filter_match = armpmu_filter_match,
+ .filter = armpmu_filter,
.attr_groups = pmu->attr_groups,
/*
* This is a CPU PMU potentially in a heterogeneous
* configuration (e.g. big.LITTLE). This is not an uncore PMU,
* and we have taken ctx sharing into account (e.g. with our
- * pmu::filter_match callback and pmu::event_init group
- * validation).
+ * pmu::filter callback and pmu::event_init group validation).
*/
.capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS,
};
@@ -916,17 +911,6 @@ out:
return NULL;
}
-struct arm_pmu *armpmu_alloc(void)
-{
- return __armpmu_alloc(GFP_KERNEL);
-}
-
-struct arm_pmu *armpmu_alloc_atomic(void)
-{
- return __armpmu_alloc(GFP_ATOMIC);
-}
-
-
void armpmu_free(struct arm_pmu *pmu)
{
free_percpu(pmu->hw_events);