summaryrefslogtreecommitdiff
path: root/arch/powerpc/perf/imc-pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/perf/imc-pmu.c')
-rw-r--r--arch/powerpc/perf/imc-pmu.c178
1 files changed, 94 insertions, 84 deletions
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index d7532e7b9ab5..6954636b16d1 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -40,6 +40,7 @@ static struct imc_pmu *core_imc_pmu;
/* Thread IMC data structures and variables */
static DEFINE_PER_CPU(u64 *, thread_imc_mem);
+static struct imc_pmu *thread_imc_pmu;
static int thread_imc_mem_size;
struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
@@ -866,59 +867,6 @@ static int thread_imc_cpu_init(void)
ppc_thread_imc_cpu_offline);
}
-void thread_imc_pmu_sched_task(struct perf_event_context *ctx,
- bool sched_in)
-{
- int core_id;
- struct imc_pmu_ref *ref;
-
- if (!is_core_imc_mem_inited(smp_processor_id()))
- return;
-
- core_id = smp_processor_id() / threads_per_core;
- /*
- * imc pmus are enabled only when it is used.
- * See if this is triggered for the first time.
- * If yes, take the mutex lock and enable the counters.
- * If not, just increment the count in ref count struct.
- */
- ref = &core_imc_refc[core_id];
- if (!ref)
- return;
-
- if (sched_in) {
- mutex_lock(&ref->lock);
- if (ref->refc == 0) {
- if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to start the counter\
- for core %d\n", core_id);
- return;
- }
- }
- ++ref->refc;
- mutex_unlock(&ref->lock);
- } else {
- mutex_lock(&ref->lock);
- ref->refc--;
- if (ref->refc == 0) {
- if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to stop the counters\
- for core %d\n", core_id);
- return;
- }
- } else if (ref->refc < 0) {
- ref->refc = 0;
- }
- mutex_unlock(&ref->lock);
- }
-
- return;
-}
-
static int thread_imc_event_init(struct perf_event *event)
{
u32 config = event->attr.config;
@@ -1045,22 +993,70 @@ static int imc_event_add(struct perf_event *event, int flags)
static int thread_imc_event_add(struct perf_event *event, int flags)
{
+ int core_id;
+ struct imc_pmu_ref *ref;
+
if (flags & PERF_EF_START)
imc_event_start(event, flags);
- /* Enable the sched_task to start the engine */
- perf_sched_cb_inc(event->ctx->pmu);
+ if (!is_core_imc_mem_inited(smp_processor_id()))
+ return -EINVAL;
+
+ core_id = smp_processor_id() / threads_per_core;
+ /*
+ * imc pmus are enabled only when it is used.
+ * See if this is triggered for the first time.
+ * If yes, take the mutex lock and enable the counters.
+ * If not, just increment the count in ref count struct.
+ */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to start the counter\
+ for core %d\n", core_id);
+ return -EINVAL;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
return 0;
}
static void thread_imc_event_del(struct perf_event *event, int flags)
{
+
+ int core_id;
+ struct imc_pmu_ref *ref;
+
/*
* Take a snapshot and calculate the delta and update
* the event counter values.
*/
imc_event_update(event);
- perf_sched_cb_dec(event->ctx->pmu);
+
+ core_id = smp_processor_id() / threads_per_core;
+ ref = &core_imc_refc[core_id];
+
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to stop the counters\
+ for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
}
/* update_pmu_ops : Populate the appropriate operations for "pmu" */
@@ -1086,7 +1082,6 @@ static int update_pmu_ops(struct imc_pmu *pmu)
break;
case IMC_DOMAIN_THREAD:
pmu->pmu.event_init = thread_imc_event_init;
- pmu->pmu.sched_task = thread_imc_pmu_sched_task;
pmu->pmu.add = thread_imc_event_add;
pmu->pmu.del = thread_imc_event_del;
pmu->pmu.start_txn = thread_imc_pmu_start_txn;
@@ -1146,14 +1141,14 @@ static int init_nest_pmu_ref(void)
static void cleanup_all_core_imc_memory(void)
{
- int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
+ int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
struct imc_mem_info *ptr = core_imc_pmu->mem_info;
int size = core_imc_pmu->counter_mem_size;
/* mem_info will never be NULL */
for (i = 0; i < nr_cores; i++) {
if (ptr[i].vbase)
- free_pages((u64)ptr->vbase, get_order(size));
+ free_pages((u64)ptr[i].vbase, get_order(size));
}
kfree(ptr);
@@ -1191,7 +1186,6 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
- kfree(pmu_ptr);
}
/*
@@ -1208,6 +1202,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
kfree(nest_imc_refc);
kfree(per_nest_pmu_arr);
+ per_nest_pmu_arr = NULL;
}
if (nest_pmus > 0)
@@ -1228,6 +1223,16 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
}
}
+/*
+ * Function to unregister thread-imc if core-imc
+ * is not registered.
+ */
+void unregister_thread_imc(void)
+{
+ imc_common_cpuhp_mem_free(thread_imc_pmu);
+ imc_common_mem_free(thread_imc_pmu);
+ perf_pmu_unregister(&thread_imc_pmu->pmu);
+}
/*
* imc_mem_init : Function to support memory allocation for core imc.
@@ -1236,7 +1241,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
int pmu_index)
{
const char *s;
- int nr_cores, cpu, res;
+ int nr_cores, cpu, res = -ENOMEM;
if (of_property_read_string(parent, "name", &s))
return -ENODEV;
@@ -1246,7 +1251,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
/* Update the pmu name */
pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s);
if (!pmu_ptr->pmu.name)
- return -ENOMEM;
+ goto err;
/* Needed for hotplug/migration */
if (!per_nest_pmu_arr) {
@@ -1254,7 +1259,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
sizeof(struct imc_pmu *),
GFP_KERNEL);
if (!per_nest_pmu_arr)
- return -ENOMEM;
+ goto err;
}
per_nest_pmu_arr[pmu_index] = pmu_ptr;
break;
@@ -1262,21 +1267,21 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
/* Update the pmu name */
pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
if (!pmu_ptr->pmu.name)
- return -ENOMEM;
+ goto err;
- nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
+ nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
GFP_KERNEL);
if (!pmu_ptr->mem_info)
- return -ENOMEM;
+ goto err;
core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
GFP_KERNEL);
if (!core_imc_refc) {
kfree(pmu_ptr->mem_info);
- return -ENOMEM;
+ goto err;
}
core_imc_pmu = pmu_ptr;
@@ -1285,23 +1290,26 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
/* Update the pmu name */
pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
if (!pmu_ptr->pmu.name)
- return -ENOMEM;
+ goto err;
thread_imc_mem_size = pmu_ptr->counter_mem_size;
for_each_online_cpu(cpu) {
res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size);
if (res) {
cleanup_all_thread_imc_memory();
- return res;
+ goto err;
}
}
+ thread_imc_pmu = pmu_ptr;
break;
default:
return -EINVAL;
}
return 0;
+err:
+ return res;
}
/*
@@ -1319,10 +1327,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
int ret;
ret = imc_mem_init(pmu_ptr, parent, pmu_idx);
- if (ret) {
- imc_common_mem_free(pmu_ptr);
- return ret;
- }
+ if (ret)
+ goto err_free_mem;
switch (pmu_ptr->domain) {
case IMC_DOMAIN_NEST:
@@ -1337,7 +1343,9 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
ret = init_nest_pmu_ref();
if (ret) {
mutex_unlock(&nest_init_lock);
- goto err_free;
+ kfree(per_nest_pmu_arr);
+ per_nest_pmu_arr = NULL;
+ goto err_free_mem;
}
/* Register for cpu hotplug notification. */
ret = nest_pmu_cpumask_init();
@@ -1345,7 +1353,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
mutex_unlock(&nest_init_lock);
kfree(nest_imc_refc);
kfree(per_nest_pmu_arr);
- goto err_free;
+ per_nest_pmu_arr = NULL;
+ goto err_free_mem;
}
}
nest_pmus++;
@@ -1355,7 +1364,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
ret = core_imc_pmu_cpumask_init();
if (ret) {
cleanup_all_core_imc_memory();
- return ret;
+ goto err_free_mem;
}
break;
@@ -1363,33 +1372,34 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
ret = thread_imc_cpu_init();
if (ret) {
cleanup_all_thread_imc_memory();
- return ret;
+ goto err_free_mem;
}
break;
default:
- return -1; /* Unknown domain */
+ return -EINVAL; /* Unknown domain */
}
ret = update_events_in_group(parent, pmu_ptr);
if (ret)
- goto err_free;
+ goto err_free_cpuhp_mem;
ret = update_pmu_ops(pmu_ptr);
if (ret)
- goto err_free;
+ goto err_free_cpuhp_mem;
ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1);
if (ret)
- goto err_free;
+ goto err_free_cpuhp_mem;
- pr_info("%s performance monitor hardware support registered\n",
+ pr_debug("%s performance monitor hardware support registered\n",
pmu_ptr->pmu.name);
return 0;
-err_free:
- imc_common_mem_free(pmu_ptr);
+err_free_cpuhp_mem:
imc_common_cpuhp_mem_free(pmu_ptr);
+err_free_mem:
+ imc_common_mem_free(pmu_ptr);
return ret;
}