[v2] x86/resctrl: Fix WARN in get_domain_from_cpu()
Commit Message
reset_all_ctrls() and resctrl_arch_update_domains() use on_each_cpu_mask()
to call rdt_ctrl_update() on potentially one CPU from each domain.
But this means rdt_ctrl_update() needs to figure out which domain to
apply changes to. Doing so requires a search of all domains in a resource,
which can only be done safely if cpus_lock is held. Both callers do hold
this lock, but there isn't a way for a function called on another CPU
via IPI to verify this.
Fix by adding the target domain to the msr_param structure and passing an
array with CDP_NUM_TYPES entries. Then calling for each domain separately
using smp_call_function_single()
Change the low level cat_wrmsr(), mba_wrmsr_intel(), and mba_wrmsr_amd()
functions to just take a msr_param structure since it contains the
rdt_resource and rdt_domain information.
Signed-off-by: Tony Luck <tony.luck@intel.com>
---
Changes since v1:
* Avoid double IPI to the same core when CDP is enabled.
* Don't pass the resource and domain to functions that can
get these from the msr_param structure.
* Clean up some fir tree issues in functions that I changed.
arch/x86/kernel/cpu/resctrl/internal.h | 4 +-
arch/x86/kernel/cpu/resctrl/core.c | 56 +++++++++------------
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 59 +++++++----------------
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 23 ++++-----
4 files changed, 50 insertions(+), 92 deletions(-)
Comments
Hi Tony,
On 2/21/2024 11:31 AM, Tony Luck wrote:
> reset_all_ctrls() and resctrl_arch_update_domains() use on_each_cpu_mask()
> to call rdt_ctrl_update() on potentially one CPU from each domain.
>
> But this means rdt_ctrl_update() needs to figure out which domain to
> apply changes to. Doing so requires a search of all domains in a resource,
> which can only be done safely if cpus_lock is held. Both callers do hold
> this lock, but there isn't a way for a function called on another CPU
> via IPI to verify this.
>
> Fix by adding the target domain to the msr_param structure and passing an
> array with CDP_NUM_TYPES entries. Then calling for each domain separately
> using smp_call_function_single()
This work contains no changes to get_domain_from_cpu(). I expect the WARN
within it to be removed as intended with [1] and then this work can build
on that without urgency. As I understand, to support the stated goal of this
work, I expect get_domain_from_cpu() in the end to not have any WARN or
IS_ENABLED checks, but just a lockdep_assert_cpus_held().
Do you have different expectations?
> Change the low level cat_wrmsr(), mba_wrmsr_intel(), and mba_wrmsr_amd()
> functions to just take a msr_param structure since it contains the
> rdt_resource and rdt_domain information.
Could moving the rdt_domain into msr_param be done in a separate patch?
..
> diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> index 7997b47743a2..09f6e624f1bb 100644
> --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> @@ -272,22 +272,6 @@ static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
> }
> }
>
> -static bool apply_config(struct rdt_hw_domain *hw_dom,
> - struct resctrl_staged_config *cfg, u32 idx,
> - cpumask_var_t cpu_mask)
> -{
> - struct rdt_domain *dom = &hw_dom->d_resctrl;
> -
> - if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
> - cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
> - hw_dom->ctrl_val[idx] = cfg->new_ctrl;
> -
> - return true;
> - }
> -
> - return false;
> -}
> -
> int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
> u32 closid, enum resctrl_conf_type t, u32 cfg_val)
> {
> @@ -304,59 +288,50 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
> msr_param.res = r;
> msr_param.low = idx;
> msr_param.high = idx + 1;
> - hw_res->msr_update(d, &msr_param, r);
> + hw_res->msr_update(&msr_param);
>
Is this missing setting the domain in msr_param?
> return 0;
> }
>
> int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
> {
> + struct msr_param msr_param[CDP_NUM_TYPES];
> struct resctrl_staged_config *cfg;
> struct rdt_hw_domain *hw_dom;
> - struct msr_param msr_param;
> enum resctrl_conf_type t;
> - cpumask_var_t cpu_mask;
> struct rdt_domain *d;
> + bool need_update;
> + int cpu;
> u32 idx;
>
> /* Walking r->domains, ensure it can't race with cpuhp */
> lockdep_assert_cpus_held();
>
> - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
> - return -ENOMEM;
> -
> - msr_param.res = NULL;
> + memset(msr_param, 0, sizeof(msr_param));
> list_for_each_entry(d, &r->domains, list) {
> hw_dom = resctrl_to_arch_dom(d);
> + need_update = false;
> for (t = 0; t < CDP_NUM_TYPES; t++) {
> cfg = &hw_dom->d_resctrl.staged_config[t];
> if (!cfg->have_new_ctrl)
> continue;
>
> idx = get_config_index(closid, t);
> - if (!apply_config(hw_dom, cfg, idx, cpu_mask))
> + if (cfg->new_ctrl == hw_dom->ctrl_val[idx])
> continue;
> -
> - if (!msr_param.res) {
> - msr_param.low = idx;
> - msr_param.high = msr_param.low + 1;
> - msr_param.res = r;
> - } else {
> - msr_param.low = min(msr_param.low, idx);
> - msr_param.high = max(msr_param.high, idx + 1);
> - }
> + hw_dom->ctrl_val[idx] = cfg->new_ctrl;
> + cpu = cpumask_any(&d->cpu_mask);
> +
> + msr_param[t].low = idx;
> + msr_param[t].high = msr_param[t].low + 1;
> + msr_param[t].res = r;
> + msr_param[t].dom = d;
> + need_update = true;
> }
> + if (need_update)
> + smp_call_function_single(cpu, rdt_ctrl_update, &msr_param, 1);
It is not clear to me why it is needed to pass this additional data. Why not
just use the original mechanism of letting the low and high of msr_param span the
multiple indices that need updating? There can still be a "need_update" but it
can be set when msr_param gets its first data. Any other index needing updating
can just update low/high and a single msr_param can be used.
Reinette
[1] https://lore.kernel.org/lkml/20240221122306.633273-1-james.morse@arm.com/
On Wed, Feb 21, 2024 at 02:59:43PM -0800, Reinette Chatre wrote:
> Hi Tony,
>
> On 2/21/2024 11:31 AM, Tony Luck wrote:
> > reset_all_ctrls() and resctrl_arch_update_domains() use on_each_cpu_mask()
> > to call rdt_ctrl_update() on potentially one CPU from each domain.
> >
> > But this means rdt_ctrl_update() needs to figure out which domain to
> > apply changes to. Doing so requires a search of all domains in a resource,
> > which can only be done safely if cpus_lock is held. Both callers do hold
> > this lock, but there isn't a way for a function called on another CPU
> > via IPI to verify this.
> >
> > Fix by adding the target domain to the msr_param structure and passing an
> > array with CDP_NUM_TYPES entries. Then calling for each domain separately
> > using smp_call_function_single()
>
> This work contains no changes to get_domain_from_cpu(). I expect the WARN
> within it to be removed as intended with [1] and then this work can build
> on that without urgency. As I understand, to support the stated goal of this
> work, I expect get_domain_from_cpu() in the end to not have any WARN or
> IS_ENABLED checks, but just a lockdep_assert_cpus_held().
>
> Do you have different expectations?
Same expectations. Boris should apply the simple fix (delete the WARN
that is giving a false positive) for this current cycle.
If there is support for my patch (with changes/fixes you point out
below), then it could be added in the future and get_domain_from_cpu()
can use lockdep_assert_cpus_held().
> > Change the low level cat_wrmsr(), mba_wrmsr_intel(), and mba_wrmsr_amd()
> > functions to just take a msr_param structure since it contains the
> > rdt_resource and rdt_domain information.
>
> Could moving the rdt_domain into msr_param be done in a separate patch?
I can break it into more pieces if there is enthusiam to apply it.
> ...
>
> > diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> > index 7997b47743a2..09f6e624f1bb 100644
> > --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> > +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> > @@ -272,22 +272,6 @@ static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
> > }
> > }
> >
> > -static bool apply_config(struct rdt_hw_domain *hw_dom,
> > - struct resctrl_staged_config *cfg, u32 idx,
> > - cpumask_var_t cpu_mask)
> > -{
> > - struct rdt_domain *dom = &hw_dom->d_resctrl;
> > -
> > - if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
> > - cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
> > - hw_dom->ctrl_val[idx] = cfg->new_ctrl;
> > -
> > - return true;
> > - }
> > -
> > - return false;
> > -}
> > -
> > int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
> > u32 closid, enum resctrl_conf_type t, u32 cfg_val)
> > {
> > @@ -304,59 +288,50 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
> > msr_param.res = r;
> > msr_param.low = idx;
> > msr_param.high = idx + 1;
> > - hw_res->msr_update(d, &msr_param, r);
> > + hw_res->msr_update(&msr_param);
> >
>
> Is this missing setting the domain in msr_param?
Indeed yes.
> > return 0;
> > }
> >
> > int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
> > {
> > + struct msr_param msr_param[CDP_NUM_TYPES];
> > struct resctrl_staged_config *cfg;
> > struct rdt_hw_domain *hw_dom;
> > - struct msr_param msr_param;
> > enum resctrl_conf_type t;
> > - cpumask_var_t cpu_mask;
> > struct rdt_domain *d;
> > + bool need_update;
> > + int cpu;
> > u32 idx;
> >
> > /* Walking r->domains, ensure it can't race with cpuhp */
> > lockdep_assert_cpus_held();
> >
> > - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
> > - return -ENOMEM;
> > -
> > - msr_param.res = NULL;
> > + memset(msr_param, 0, sizeof(msr_param));
> > list_for_each_entry(d, &r->domains, list) {
> > hw_dom = resctrl_to_arch_dom(d);
> > + need_update = false;
> > for (t = 0; t < CDP_NUM_TYPES; t++) {
> > cfg = &hw_dom->d_resctrl.staged_config[t];
> > if (!cfg->have_new_ctrl)
> > continue;
> >
> > idx = get_config_index(closid, t);
> > - if (!apply_config(hw_dom, cfg, idx, cpu_mask))
> > + if (cfg->new_ctrl == hw_dom->ctrl_val[idx])
> > continue;
> > -
> > - if (!msr_param.res) {
> > - msr_param.low = idx;
> > - msr_param.high = msr_param.low + 1;
> > - msr_param.res = r;
> > - } else {
> > - msr_param.low = min(msr_param.low, idx);
> > - msr_param.high = max(msr_param.high, idx + 1);
> > - }
> > + hw_dom->ctrl_val[idx] = cfg->new_ctrl;
> > + cpu = cpumask_any(&d->cpu_mask);
> > +
> > + msr_param[t].low = idx;
> > + msr_param[t].high = msr_param[t].low + 1;
> > + msr_param[t].res = r;
> > + msr_param[t].dom = d;
> > + need_update = true;
> > }
> > + if (need_update)
> > + smp_call_function_single(cpu, rdt_ctrl_update, &msr_param, 1);
>
> It is not clear to me why it is needed to pass this additional data. Why not
> just use the original mechanism of letting the low and high of msr_param span the
> multiple indices that need updating? There can still be a "need_update" but it
> can be set when msr_param gets its first data. Any other index needing updating
> can just update low/high and a single msr_param can be used.
For some reason this morning I thought that the domain needed to be
different. It isn't, so keeping the code that just adjusts the range
of MSRs will work just fine.
The "need_update" variable isn't required. I will move the
msr_param.res = NULL;
inside the for each domain loop, and can use non-NULL value to
decide whether to IPI a CPU.
-Tony
Hi Tony,
Is sending new versions of patch series in response to the previous
versions a new custom? I am finding the SNC thread [1] to have become
a maze and now this thread is headed in the same direction. My understanding
of custom (supported by [2]) is to send new versions as a new thread.
This thread even complicates it more by mixing versions of different
features in the same email thread.
Reinette
[1] https://lore.kernel.org/lkml/20230713163207.219710-1-tony.luck@intel.com/
[2] https://www.kernel.org/doc/html/latest/process/submitting-patches.html#explicit-in-reply-to-headers
> Is sending new versions of patch series in response to the previous
> versions a new custom? I am finding the SNC thread [1] to have become
> a maze and now this thread is headed in the same direction. My understanding
> of custom (supported by [2]) is to send new versions as a new thread.
> This thread even complicates it more by mixing versions of different
> features in the same email thread.
Reinette,
Not new for me. I've always (tried) to link everything together.
But thanks for this link:
[2] https://www.kernel.org/doc/html/latest/process/submitting-patches.html#explicit-in-reply-to-headers
I see that this isn't desired. I'll switch over to adding a Link: URL in the cover
letter going forward.[1]
-Tony
[1] I'm going to need a v4 if only to s/Simply/Simplify/ in the subject of part 2 :-(
@@ -383,6 +383,7 @@ static inline struct rdt_hw_domain *resctrl_to_arch_dom(struct rdt_domain *r)
*/
struct msr_param {
struct rdt_resource *res;
+ struct rdt_domain *dom;
u32 low;
u32 high;
};
@@ -442,8 +443,7 @@ struct rdt_hw_resource {
struct rdt_resource r_resctrl;
u32 num_closid;
unsigned int msr_base;
- void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r);
+ void (*msr_update)(struct msr_param *m);
unsigned int mon_scale;
unsigned int mbm_width;
unsigned int mbm_cfg_mask;
@@ -56,14 +56,9 @@ int max_name_width, max_data_width;
*/
bool rdt_alloc_capable;
-static void
-mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r);
-static void
-cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
-static void
-mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r);
+static void mba_wrmsr_intel(struct msr_param *m);
+static void cat_wrmsr(struct msr_param *m);
+static void mba_wrmsr_amd(struct msr_param *m);
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
@@ -309,12 +304,11 @@ static void rdt_get_cdp_l2_config(void)
rdt_get_cdp_config(RDT_RESOURCE_L2);
}
-static void
-mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
+static void mba_wrmsr_amd(struct msr_param *m)
{
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
+ struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
for (i = m->low; i < m->high; i++)
wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
@@ -334,25 +328,22 @@ static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
return r->default_ctrl;
}
-static void
-mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r)
+static void mba_wrmsr_intel(struct msr_param *m)
{
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
+ struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
/* Write the delay values for mba. */
for (i = m->low; i < m->high; i++)
- wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
+ wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], m->res));
}
-static void
-cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
+static void cat_wrmsr(struct msr_param *m)
{
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
+ struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(m->dom);
unsigned int i;
- struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
for (i = m->low; i < m->high; i++)
wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
@@ -387,19 +378,14 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
void rdt_ctrl_update(void *arg)
{
+ struct rdt_hw_resource *hw_res;
struct msr_param *m = arg;
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
- struct rdt_resource *r = m->res;
- int cpu = smp_processor_id();
- struct rdt_domain *d;
+ int t;
- d = get_domain_from_cpu(cpu, r);
- if (d) {
- hw_res->msr_update(d, m, r);
- return;
- }
- pr_warn_once("cpu %d not found in any domain for resource %s\n",
- cpu, r->name);
+ hw_res = resctrl_to_arch_res(m->res);
+ for (t = 0; t < CDP_NUM_TYPES; t++)
+ if (m[t].dom)
+ hw_res->msr_update(m + t);
}
/*
@@ -472,9 +458,11 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
hw_dom->ctrl_val = dc;
setup_default_ctrlval(r, dc);
+ m.res = r;
+ m.dom = d;
m.low = 0;
m.high = hw_res->num_closid;
- hw_res->msr_update(d, &m, r);
+ hw_res->msr_update(&m);
return 0;
}
@@ -272,22 +272,6 @@ static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
}
}
-static bool apply_config(struct rdt_hw_domain *hw_dom,
- struct resctrl_staged_config *cfg, u32 idx,
- cpumask_var_t cpu_mask)
-{
- struct rdt_domain *dom = &hw_dom->d_resctrl;
-
- if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
- cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
- hw_dom->ctrl_val[idx] = cfg->new_ctrl;
-
- return true;
- }
-
- return false;
-}
-
int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val)
{
@@ -304,59 +288,50 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
msr_param.res = r;
msr_param.low = idx;
msr_param.high = idx + 1;
- hw_res->msr_update(d, &msr_param, r);
+ hw_res->msr_update(&msr_param);
return 0;
}
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
{
+ struct msr_param msr_param[CDP_NUM_TYPES];
struct resctrl_staged_config *cfg;
struct rdt_hw_domain *hw_dom;
- struct msr_param msr_param;
enum resctrl_conf_type t;
- cpumask_var_t cpu_mask;
struct rdt_domain *d;
+ bool need_update;
+ int cpu;
u32 idx;
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
- if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
- return -ENOMEM;
-
- msr_param.res = NULL;
+ memset(msr_param, 0, sizeof(msr_param));
list_for_each_entry(d, &r->domains, list) {
hw_dom = resctrl_to_arch_dom(d);
+ need_update = false;
for (t = 0; t < CDP_NUM_TYPES; t++) {
cfg = &hw_dom->d_resctrl.staged_config[t];
if (!cfg->have_new_ctrl)
continue;
idx = get_config_index(closid, t);
- if (!apply_config(hw_dom, cfg, idx, cpu_mask))
+ if (cfg->new_ctrl == hw_dom->ctrl_val[idx])
continue;
-
- if (!msr_param.res) {
- msr_param.low = idx;
- msr_param.high = msr_param.low + 1;
- msr_param.res = r;
- } else {
- msr_param.low = min(msr_param.low, idx);
- msr_param.high = max(msr_param.high, idx + 1);
- }
+ hw_dom->ctrl_val[idx] = cfg->new_ctrl;
+ cpu = cpumask_any(&d->cpu_mask);
+
+ msr_param[t].low = idx;
+ msr_param[t].high = msr_param[t].low + 1;
+ msr_param[t].res = r;
+ msr_param[t].dom = d;
+ need_update = true;
}
+ if (need_update)
+ smp_call_function_single(cpu, rdt_ctrl_update, &msr_param, 1);
}
- if (cpumask_empty(cpu_mask))
- goto done;
-
- /* Update resource control msr on all the CPUs. */
- on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
-
-done:
- free_cpumask_var(cpu_mask);
-
return 0;
}
@@ -2811,21 +2811,19 @@ static int rdt_init_fs_context(struct fs_context *fc)
static int reset_all_ctrls(struct rdt_resource *r)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
+ struct msr_param msr_param[CDP_NUM_TYPES];
struct rdt_hw_domain *hw_dom;
- struct msr_param msr_param;
- cpumask_var_t cpu_mask;
struct rdt_domain *d;
+ int cpu;
int i;
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
- if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
- return -ENOMEM;
-
- msr_param.res = r;
- msr_param.low = 0;
- msr_param.high = hw_res->num_closid;
+ memset(msr_param, 0, sizeof(msr_param));
+ msr_param[0].res = r;
+ msr_param[0].low = 0;
+ msr_param[0].high = hw_res->num_closid;
/*
* Disable resource control for this resource by setting all
@@ -2834,17 +2832,14 @@ static int reset_all_ctrls(struct rdt_resource *r)
*/
list_for_each_entry(d, &r->domains, list) {
hw_dom = resctrl_to_arch_dom(d);
- cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ cpu = cpumask_any(&d->cpu_mask);
for (i = 0; i < hw_res->num_closid; i++)
hw_dom->ctrl_val[i] = r->default_ctrl;
+ msr_param[0].dom = d;
+ smp_call_function_single(cpu, rdt_ctrl_update, &msr_param, 1);
}
- /* Update CBM on all the CPUs in cpu_mask */
- on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
-
- free_cpumask_var(cpu_mask);
-
return 0;
}