[PATCH V2 05/15] power/cppc_cpufreq: validate lcore role in cpufreq API
Huisong Li
lihuisong at huawei.com
Thu May 7 04:42:20 CEST 2026
Currently, the cppc_cpufreq driver only checks if the lcore_id is
within the RTE_MAX_LCORE range, but fails to verify if the core is
actually active or managed by the application. This lacks sufficient
validation.
This patch add a lcore role check to the cpufreq-related APIs.
Although service cores do not typically invoke these APIs, they may
operate in polling states where power management is required.
To maintain compatibility with applications using service cores, the
validation logic now explicitly allows both ROLE_RTE and ROLE_SERVICE.
Signed-off-by: Huisong Li <lihuisong at huawei.com>
---
drivers/power/cppc/cppc_cpufreq.c | 56 +++++++++++++++----------------
1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/drivers/power/cppc/cppc_cpufreq.c b/drivers/power/cppc/cppc_cpufreq.c
index 3cd4165c83..7e1298110a 100644
--- a/drivers/power/cppc/cppc_cpufreq.c
+++ b/drivers/power/cppc/cppc_cpufreq.c
@@ -346,9 +346,8 @@ power_cppc_cpufreq_init(unsigned int lcore_id)
return -1;
}
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Lcore id %u can not exceeds %u",
- lcore_id, RTE_MAX_LCORE - 1U);
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -429,11 +428,11 @@ power_cppc_cpufreq_exit(unsigned int lcore_id)
struct cppc_power_info *pi;
uint32_t exp_state;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Lcore id %u can not exceeds %u",
- lcore_id, RTE_MAX_LCORE - 1U);
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
+
pi = &lcore_power_info[lcore_id];
exp_state = POWER_USED;
/* The power in use state works as a guard variable between
@@ -479,8 +478,8 @@ power_cppc_cpufreq_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t num)
{
struct cppc_power_info *pi;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return 0;
}
@@ -502,8 +501,8 @@ power_cppc_cpufreq_freqs(unsigned int lcore_id, uint32_t *freqs, uint32_t num)
uint32_t
power_cppc_cpufreq_get_freq(unsigned int lcore_id)
{
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return RTE_POWER_INVALID_FREQ_INDEX;
}
@@ -513,8 +512,8 @@ power_cppc_cpufreq_get_freq(unsigned int lcore_id)
int
power_cppc_cpufreq_set_freq(unsigned int lcore_id, uint32_t index)
{
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -526,8 +525,8 @@ power_cppc_cpufreq_freq_down(unsigned int lcore_id)
{
struct cppc_power_info *pi;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -544,8 +543,8 @@ power_cppc_cpufreq_freq_up(unsigned int lcore_id)
{
struct cppc_power_info *pi;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -561,8 +560,8 @@ power_cppc_cpufreq_freq_up(unsigned int lcore_id)
int
power_cppc_cpufreq_freq_max(unsigned int lcore_id)
{
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -585,8 +584,8 @@ power_cppc_cpufreq_freq_min(unsigned int lcore_id)
{
struct cppc_power_info *pi;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -601,8 +600,8 @@ power_cppc_turbo_status(unsigned int lcore_id)
{
struct cppc_power_info *pi;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -616,8 +615,8 @@ power_cppc_enable_turbo(unsigned int lcore_id)
{
struct cppc_power_info *pi;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -652,8 +651,8 @@ power_cppc_disable_turbo(unsigned int lcore_id)
{
struct cppc_power_info *pi;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
@@ -680,10 +679,11 @@ power_cppc_get_capabilities(unsigned int lcore_id,
{
struct cppc_power_info *pi;
- if (lcore_id >= RTE_MAX_LCORE) {
- POWER_LOG(ERR, "Invalid lcore ID");
+ if (!rte_lcore_is_eal_managed(lcore_id)) {
+ POWER_LOG(ERR, "lcore id %u is invalid.", lcore_id);
return -1;
}
+
if (caps == NULL) {
POWER_LOG(ERR, "Invalid argument");
return -1;
--
2.33.0
More information about the dev
mailing list