summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKan Liang <kan.liang@linux.intel.com>2024-10-10 07:26:03 -0700
committerPeter Zijlstra <peterz@infradead.org>2024-10-30 22:42:18 +0100
commit9b99d65c0bb4e37013bc2ec9c32b78c5751ff952 (patch)
tree2b1e27d1d6c68a7fd6962aa3690f41bc82945570
parent4d756095d3994cb41393817dc696b458938a6bd0 (diff)
downloadlwn-9b99d65c0bb4e37013bc2ec9c32b78c5751ff952.tar.gz
lwn-9b99d65c0bb4e37013bc2ec9c32b78c5751ff952.zip
perf/x86/rapl: Move the pmu allocation out of CPU hotplug
There are extra codes in the CPU hotplug function to allocate rapl pmus. The generic PMU hotplug support is hard to be applied. As long as the rapl pmus can be allocated upfront for each die/socket, the code doesn't need to be implemented in the CPU hotplug function. Move the code to the init_rapl_pmus(), and allocate a PMU for each possible die/socket. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Oliver Sang <oliver.sang@intel.com> Link: https://lore.kernel.org/r/20241010142604.770192-1-kan.liang@linux.intel.com
-rw-r--r--arch/x86/events/rapl.c44
1 files changed, 30 insertions, 14 deletions
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index a481a939862e..86cbee1a8bf0 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -602,19 +602,8 @@ static int rapl_cpu_online(unsigned int cpu)
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
- if (!pmu) {
- pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
- if (!pmu)
- return -ENOMEM;
-
- raw_spin_lock_init(&pmu->lock);
- INIT_LIST_HEAD(&pmu->active_list);
- pmu->pmu = &rapl_pmus->pmu;
- pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
- rapl_hrtimer_init(pmu);
-
- rapl_pmus->pmus[rapl_pmu_idx] = pmu;
- }
+ if (!pmu)
+ return -ENOMEM;
/*
* Check if there is an online cpu in the package which collects rapl
@@ -707,6 +696,32 @@ static const struct attribute_group *rapl_attr_update[] = {
NULL,
};
+static int __init init_rapl_pmu(void)
+{
+ struct rapl_pmu *pmu;
+ int idx;
+
+ for (idx = 0; idx < rapl_pmus->nr_rapl_pmu; idx++) {
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ goto free;
+
+ raw_spin_lock_init(&pmu->lock);
+ INIT_LIST_HEAD(&pmu->active_list);
+ pmu->pmu = &rapl_pmus->pmu;
+ pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+ rapl_hrtimer_init(pmu);
+
+ rapl_pmus->pmus[idx] = pmu;
+ }
+
+ return 0;
+free:
+ for (; idx > 0; idx--)
+ kfree(rapl_pmus->pmus[idx - 1]);
+ return -ENOMEM;
+}
+
static int __init init_rapl_pmus(void)
{
int nr_rapl_pmu = topology_max_packages();
@@ -730,7 +745,8 @@ static int __init init_rapl_pmus(void)
rapl_pmus->pmu.read = rapl_pmu_event_read;
rapl_pmus->pmu.module = THIS_MODULE;
rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
- return 0;
+
+ return init_rapl_pmu();
}
static struct rapl_model model_snb = {