[RFC,2/3] sched/cpuset: Keep track of SCHED_DEADLINE tasks in cpusets

Message ID 20230315121812.206079-3-juri.lelli@redhat.com
State New
Headers
Series sched/deadline: cpuset: Rework DEADLINE bandwidth restoration |

Commit Message

Juri Lelli March 15, 2023, 12:18 p.m. UTC
  Qais reported that iterating over all tasks when rebuilding root domains
for finding out which ones are DEADLINE and need their bandwidth
correctly restored on such root domains can be a costly operation (10+
ms delays on suspend-resume).

To fix the problem keep track of the number of DEADLINE tasks belonging
to each cpuset and then use this information (followup patch) to only
perform the above iteration if DEADLINE tasks are actually present in
the cpuset for which a corresponding root domain is being rebuilt.

Reported-by: Qais Yousef <qyousef@layalina.io>
Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
---
 include/linux/cpuset.h |  4 ++++
 kernel/cgroup/cgroup.c |  4 ++++
 kernel/cgroup/cpuset.c | 25 +++++++++++++++++++++++++
 kernel/sched/core.c    | 10 ++++++++++
 4 files changed, 43 insertions(+)
  

Comments

Qais Yousef March 15, 2023, 2:49 p.m. UTC | #1
On 03/15/23 12:18, Juri Lelli wrote:
> Qais reported that iterating over all tasks when rebuilding root domains
> for finding out which ones are DEADLINE and need their bandwidth
> correctly restored on such root domains can be a costly operation (10+
> ms delays on suspend-resume).
> 
> To fix the problem keep track of the number of DEADLINE tasks belonging
> to each cpuset and then use this information (followup patch) to only
> perform the above iteration if DEADLINE tasks are actually present in
> the cpuset for which a corresponding root domain is being rebuilt.
> 
> Reported-by: Qais Yousef <qyousef@layalina.io>
> Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
> ---
>  include/linux/cpuset.h |  4 ++++
>  kernel/cgroup/cgroup.c |  4 ++++
>  kernel/cgroup/cpuset.c | 25 +++++++++++++++++++++++++
>  kernel/sched/core.c    | 10 ++++++++++
>  4 files changed, 43 insertions(+)
> 
> diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
> index 355f796c5f07..0348dba5680e 100644
> --- a/include/linux/cpuset.h
> +++ b/include/linux/cpuset.h
> @@ -71,6 +71,8 @@ extern void cpuset_init_smp(void);
>  extern void cpuset_force_rebuild(void);
>  extern void cpuset_update_active_cpus(void);
>  extern void cpuset_wait_for_hotplug(void);
> +extern void inc_dl_tasks_cs(struct task_struct *task);
> +extern void dec_dl_tasks_cs(struct task_struct *task);
>  extern void cpuset_lock(void);
>  extern void cpuset_unlock(void);
>  extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
> @@ -196,6 +198,8 @@ static inline void cpuset_update_active_cpus(void)
>  
>  static inline void cpuset_wait_for_hotplug(void) { }
>  
> +static inline void inc_dl_tasks_cs(struct task_struct *task) { }
> +static inline void dec_dl_tasks_cs(struct task_struct *task) { }
>  static inline void cpuset_lock(void) { }
>  static inline void cpuset_unlock(void) { }
>  
> diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
> index c099cf3fa02d..357925e1e4af 100644
> --- a/kernel/cgroup/cgroup.c
> +++ b/kernel/cgroup/cgroup.c
> @@ -57,6 +57,7 @@
>  #include <linux/file.h>
>  #include <linux/fs_parser.h>
>  #include <linux/sched/cputime.h>
> +#include <linux/sched/deadline.h>
>  #include <linux/psi.h>
>  #include <net/sock.h>
>  
> @@ -6673,6 +6674,9 @@ void cgroup_exit(struct task_struct *tsk)
>  	list_add_tail(&tsk->cg_list, &cset->dying_tasks);
>  	cset->nr_tasks--;
>  
> +	if (dl_task(tsk))
> +		dec_dl_tasks_cs(tsk);
> +
>  	WARN_ON_ONCE(cgroup_task_frozen(tsk));
>  	if (unlikely(!(tsk->flags & PF_KTHREAD) &&
>  		     test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index 8d82d66d432b..57bc60112618 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -193,6 +193,12 @@ struct cpuset {
>  	int use_parent_ecpus;
>  	int child_ecpus_count;
>  
> +	/*
> +	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
> +	 * know when to rebuild associated root domain bandwidth information.
> +	 */
> +	int nr_deadline_tasks;
> +
>  	/* Invalid partition error code, not lock protected */
>  	enum prs_errcode prs_err;
>  
> @@ -245,6 +251,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
>  	return css_cs(cs->css.parent);
>  }
>  
> +void inc_dl_tasks_cs(struct task_struct *p)
> +{
> +	struct cpuset *cs = task_cs(p);

nit:

I *think* task_cs() assumes rcu_read_lock() is held, right?

Would it make sense to WARN_ON(!rcu_read_lock_held()) to at least
annotate the deps?

Or maybe task_cs() should do that..

> +
> +	cs->nr_deadline_tasks++;
> +}
> +
> +void dec_dl_tasks_cs(struct task_struct *p)
> +{
> +	struct cpuset *cs = task_cs(p);

nit: ditto

> +
> +	cs->nr_deadline_tasks--;
> +}
> +
>  /* bits in struct cpuset flags field */
>  typedef enum {
>  	CS_ONLINE,
> @@ -2472,6 +2492,11 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
>  		ret = security_task_setscheduler(task);
>  		if (ret)
>  			goto out_unlock;
> +
> +		if (dl_task(task)) {
> +			cs->nr_deadline_tasks++;
> +			cpuset_attach_old_cs->nr_deadline_tasks--;
> +		}
>  	}
>  
>  	/*
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 5902cbb5e751..d586a8440348 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -7683,6 +7683,16 @@ static int __sched_setscheduler(struct task_struct *p,
>  		goto unlock;
>  	}
>  
> +	/*
> +	 * In case a task is setscheduled to SCHED_DEADLINE, or if a task is
> +	 * moved to a different sched policy, we need to keep track of that on
> +	 * its cpuset (for correct bandwidth tracking).
> +	 */
> +	if (dl_policy(policy) && !dl_task(p))
> +		inc_dl_tasks_cs(p);
> +	else if (dl_task(p) && !dl_policy(policy))
> +		dec_dl_tasks_cs(p);
> +

Would it be better to use switched_to_dl()/switched_from_dl() instead to
inc/dec_dl_tasks_cs()?


Thanks!

--
Qais Yousef

>  	p->sched_reset_on_fork = reset_on_fork;
>  	oldprio = p->prio;
>  
> -- 
> 2.39.2
>
  
Waiman Long March 15, 2023, 3:46 p.m. UTC | #2
On 3/15/23 08:18, Juri Lelli wrote:
> Qais reported that iterating over all tasks when rebuilding root domains
> for finding out which ones are DEADLINE and need their bandwidth
> correctly restored on such root domains can be a costly operation (10+
> ms delays on suspend-resume).
>
> To fix the problem keep track of the number of DEADLINE tasks belonging
> to each cpuset and then use this information (followup patch) to only
> perform the above iteration if DEADLINE tasks are actually present in
> the cpuset for which a corresponding root domain is being rebuilt.
>
> Reported-by: Qais Yousef <qyousef@layalina.io>
> Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
> ---
>   include/linux/cpuset.h |  4 ++++
>   kernel/cgroup/cgroup.c |  4 ++++
>   kernel/cgroup/cpuset.c | 25 +++++++++++++++++++++++++
>   kernel/sched/core.c    | 10 ++++++++++
>   4 files changed, 43 insertions(+)
>
> diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
> index 355f796c5f07..0348dba5680e 100644
> --- a/include/linux/cpuset.h
> +++ b/include/linux/cpuset.h
> @@ -71,6 +71,8 @@ extern void cpuset_init_smp(void);
>   extern void cpuset_force_rebuild(void);
>   extern void cpuset_update_active_cpus(void);
>   extern void cpuset_wait_for_hotplug(void);
> +extern void inc_dl_tasks_cs(struct task_struct *task);
> +extern void dec_dl_tasks_cs(struct task_struct *task);
>   extern void cpuset_lock(void);
>   extern void cpuset_unlock(void);
>   extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
> @@ -196,6 +198,8 @@ static inline void cpuset_update_active_cpus(void)
>   
>   static inline void cpuset_wait_for_hotplug(void) { }
>   
> +static inline void inc_dl_tasks_cs(struct task_struct *task) { }
> +static inline void dec_dl_tasks_cs(struct task_struct *task) { }
>   static inline void cpuset_lock(void) { }
>   static inline void cpuset_unlock(void) { }
>   
> diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
> index c099cf3fa02d..357925e1e4af 100644
> --- a/kernel/cgroup/cgroup.c
> +++ b/kernel/cgroup/cgroup.c
> @@ -57,6 +57,7 @@
>   #include <linux/file.h>
>   #include <linux/fs_parser.h>
>   #include <linux/sched/cputime.h>
> +#include <linux/sched/deadline.h>
>   #include <linux/psi.h>
>   #include <net/sock.h>
>   
> @@ -6673,6 +6674,9 @@ void cgroup_exit(struct task_struct *tsk)
>   	list_add_tail(&tsk->cg_list, &cset->dying_tasks);
>   	cset->nr_tasks--;
>   
> +	if (dl_task(tsk))
> +		dec_dl_tasks_cs(tsk);
> +
>   	WARN_ON_ONCE(cgroup_task_frozen(tsk));
>   	if (unlikely(!(tsk->flags & PF_KTHREAD) &&
>   		     test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index 8d82d66d432b..57bc60112618 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -193,6 +193,12 @@ struct cpuset {
>   	int use_parent_ecpus;
>   	int child_ecpus_count;
>   
> +	/*
> +	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
> +	 * know when to rebuild associated root domain bandwidth information.
> +	 */
> +	int nr_deadline_tasks;
> +
>   	/* Invalid partition error code, not lock protected */
>   	enum prs_errcode prs_err;
>   
> @@ -245,6 +251,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
>   	return css_cs(cs->css.parent);
>   }
>   
> +void inc_dl_tasks_cs(struct task_struct *p)
> +{
> +	struct cpuset *cs = task_cs(p);
> +
> +	cs->nr_deadline_tasks++;
> +}
> +
> +void dec_dl_tasks_cs(struct task_struct *p)
> +{
> +	struct cpuset *cs = task_cs(p);
> +
> +	cs->nr_deadline_tasks--;
> +}
> +
>   /* bits in struct cpuset flags field */
>   typedef enum {
>   	CS_ONLINE,
> @@ -2472,6 +2492,11 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
>   		ret = security_task_setscheduler(task);
>   		if (ret)
>   			goto out_unlock;
> +
> +		if (dl_task(task)) {
> +			cs->nr_deadline_tasks++;
> +			cpuset_attach_old_cs->nr_deadline_tasks--;
> +		}
>   	}

Any one of the tasks in the cpuset can cause the test to fail and abort 
the attachment. I would suggest that you keep a deadline task transfer 
count in the loop and then update cs and cpouset_attach_old_cs only 
after all the tasks have been iterated successfully.

Cheers,
Longman
  
Juri Lelli March 15, 2023, 5:14 p.m. UTC | #3
On 15/03/23 11:46, Waiman Long wrote:
> 
> On 3/15/23 08:18, Juri Lelli wrote:
> > Qais reported that iterating over all tasks when rebuilding root domains
> > for finding out which ones are DEADLINE and need their bandwidth
> > correctly restored on such root domains can be a costly operation (10+
> > ms delays on suspend-resume).
> > 
> > To fix the problem keep track of the number of DEADLINE tasks belonging
> > to each cpuset and then use this information (followup patch) to only
> > perform the above iteration if DEADLINE tasks are actually present in
> > the cpuset for which a corresponding root domain is being rebuilt.
> > 
> > Reported-by: Qais Yousef <qyousef@layalina.io>
> > Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
> > ---
> >   include/linux/cpuset.h |  4 ++++
> >   kernel/cgroup/cgroup.c |  4 ++++
> >   kernel/cgroup/cpuset.c | 25 +++++++++++++++++++++++++
> >   kernel/sched/core.c    | 10 ++++++++++
> >   4 files changed, 43 insertions(+)
> > 
> > diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
> > index 355f796c5f07..0348dba5680e 100644
> > --- a/include/linux/cpuset.h
> > +++ b/include/linux/cpuset.h
> > @@ -71,6 +71,8 @@ extern void cpuset_init_smp(void);
> >   extern void cpuset_force_rebuild(void);
> >   extern void cpuset_update_active_cpus(void);
> >   extern void cpuset_wait_for_hotplug(void);
> > +extern void inc_dl_tasks_cs(struct task_struct *task);
> > +extern void dec_dl_tasks_cs(struct task_struct *task);
> >   extern void cpuset_lock(void);
> >   extern void cpuset_unlock(void);
> >   extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
> > @@ -196,6 +198,8 @@ static inline void cpuset_update_active_cpus(void)
> >   static inline void cpuset_wait_for_hotplug(void) { }
> > +static inline void inc_dl_tasks_cs(struct task_struct *task) { }
> > +static inline void dec_dl_tasks_cs(struct task_struct *task) { }
> >   static inline void cpuset_lock(void) { }
> >   static inline void cpuset_unlock(void) { }
> > diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
> > index c099cf3fa02d..357925e1e4af 100644
> > --- a/kernel/cgroup/cgroup.c
> > +++ b/kernel/cgroup/cgroup.c
> > @@ -57,6 +57,7 @@
> >   #include <linux/file.h>
> >   #include <linux/fs_parser.h>
> >   #include <linux/sched/cputime.h>
> > +#include <linux/sched/deadline.h>
> >   #include <linux/psi.h>
> >   #include <net/sock.h>
> > @@ -6673,6 +6674,9 @@ void cgroup_exit(struct task_struct *tsk)
> >   	list_add_tail(&tsk->cg_list, &cset->dying_tasks);
> >   	cset->nr_tasks--;
> > +	if (dl_task(tsk))
> > +		dec_dl_tasks_cs(tsk);
> > +
> >   	WARN_ON_ONCE(cgroup_task_frozen(tsk));
> >   	if (unlikely(!(tsk->flags & PF_KTHREAD) &&
> >   		     test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
> > diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> > index 8d82d66d432b..57bc60112618 100644
> > --- a/kernel/cgroup/cpuset.c
> > +++ b/kernel/cgroup/cpuset.c
> > @@ -193,6 +193,12 @@ struct cpuset {
> >   	int use_parent_ecpus;
> >   	int child_ecpus_count;
> > +	/*
> > +	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
> > +	 * know when to rebuild associated root domain bandwidth information.
> > +	 */
> > +	int nr_deadline_tasks;
> > +
> >   	/* Invalid partition error code, not lock protected */
> >   	enum prs_errcode prs_err;
> > @@ -245,6 +251,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
> >   	return css_cs(cs->css.parent);
> >   }
> > +void inc_dl_tasks_cs(struct task_struct *p)
> > +{
> > +	struct cpuset *cs = task_cs(p);
> > +
> > +	cs->nr_deadline_tasks++;
> > +}
> > +
> > +void dec_dl_tasks_cs(struct task_struct *p)
> > +{
> > +	struct cpuset *cs = task_cs(p);
> > +
> > +	cs->nr_deadline_tasks--;
> > +}
> > +
> >   /* bits in struct cpuset flags field */
> >   typedef enum {
> >   	CS_ONLINE,
> > @@ -2472,6 +2492,11 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
> >   		ret = security_task_setscheduler(task);
> >   		if (ret)
> >   			goto out_unlock;
> > +
> > +		if (dl_task(task)) {
> > +			cs->nr_deadline_tasks++;
> > +			cpuset_attach_old_cs->nr_deadline_tasks--;
> > +		}
> >   	}
> 
> Any one of the tasks in the cpuset can cause the test to fail and abort the
> attachment. I would suggest that you keep a deadline task transfer count in
> the loop and then update cs and cpouset_attach_old_cs only after all the
> tasks have been iterated successfully.

Right, Dietmar I think commented pointing out something along these
lines. Think though we already have this problem with current
task_can_attach -> dl_cpu_busy which reserves bandwidth for each tasks
in the destination cs. Will need to look into that. Do you know which
sort of operation would move multiple tasks at once?
  
Juri Lelli March 15, 2023, 5:18 p.m. UTC | #4
On 15/03/23 14:49, Qais Yousef wrote:
> On 03/15/23 12:18, Juri Lelli wrote:

...

> > +void inc_dl_tasks_cs(struct task_struct *p)
> > +{
> > +	struct cpuset *cs = task_cs(p);
> 
> nit:
> 
> I *think* task_cs() assumes rcu_read_lock() is held, right?
> 
> Would it make sense to WARN_ON(!rcu_read_lock_held()) to at least
> annotate the deps?

Think we have that check in task_css_set_check()?

> Or maybe task_cs() should do that..
> 
> > +
> > +	cs->nr_deadline_tasks++;
> > +}
> > +
> > +void dec_dl_tasks_cs(struct task_struct *p)
> > +{
> > +	struct cpuset *cs = task_cs(p);
> 
> nit: ditto
> 
> > +
> > +	cs->nr_deadline_tasks--;
> > +}
> > +

...

> > diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> > index 5902cbb5e751..d586a8440348 100644
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -7683,6 +7683,16 @@ static int __sched_setscheduler(struct task_struct *p,
> >  		goto unlock;
> >  	}
> >  
> > +	/*
> > +	 * In case a task is setscheduled to SCHED_DEADLINE, or if a task is
> > +	 * moved to a different sched policy, we need to keep track of that on
> > +	 * its cpuset (for correct bandwidth tracking).
> > +	 */
> > +	if (dl_policy(policy) && !dl_task(p))
> > +		inc_dl_tasks_cs(p);
> > +	else if (dl_task(p) && !dl_policy(policy))
> > +		dec_dl_tasks_cs(p);
> > +
> 
> Would it be better to use switched_to_dl()/switched_from_dl() instead to
> inc/dec_dl_tasks_cs()?

Ah, makes sense. I'll play with this.

Thanks,
Juri
  
Waiman Long March 15, 2023, 6:01 p.m. UTC | #5
On 3/15/23 13:14, Juri Lelli wrote:
> On 15/03/23 11:46, Waiman Long wrote:
>> On 3/15/23 08:18, Juri Lelli wrote:
>>> Qais reported that iterating over all tasks when rebuilding root domains
>>> for finding out which ones are DEADLINE and need their bandwidth
>>> correctly restored on such root domains can be a costly operation (10+
>>> ms delays on suspend-resume).
>>>
>>> To fix the problem keep track of the number of DEADLINE tasks belonging
>>> to each cpuset and then use this information (followup patch) to only
>>> perform the above iteration if DEADLINE tasks are actually present in
>>> the cpuset for which a corresponding root domain is being rebuilt.
>>>
>>> Reported-by: Qais Yousef <qyousef@layalina.io>
>>> Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
>>> ---
>>>    include/linux/cpuset.h |  4 ++++
>>>    kernel/cgroup/cgroup.c |  4 ++++
>>>    kernel/cgroup/cpuset.c | 25 +++++++++++++++++++++++++
>>>    kernel/sched/core.c    | 10 ++++++++++
>>>    4 files changed, 43 insertions(+)
>>>
>>> diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
>>> index 355f796c5f07..0348dba5680e 100644
>>> --- a/include/linux/cpuset.h
>>> +++ b/include/linux/cpuset.h
>>> @@ -71,6 +71,8 @@ extern void cpuset_init_smp(void);
>>>    extern void cpuset_force_rebuild(void);
>>>    extern void cpuset_update_active_cpus(void);
>>>    extern void cpuset_wait_for_hotplug(void);
>>> +extern void inc_dl_tasks_cs(struct task_struct *task);
>>> +extern void dec_dl_tasks_cs(struct task_struct *task);
>>>    extern void cpuset_lock(void);
>>>    extern void cpuset_unlock(void);
>>>    extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
>>> @@ -196,6 +198,8 @@ static inline void cpuset_update_active_cpus(void)
>>>    static inline void cpuset_wait_for_hotplug(void) { }
>>> +static inline void inc_dl_tasks_cs(struct task_struct *task) { }
>>> +static inline void dec_dl_tasks_cs(struct task_struct *task) { }
>>>    static inline void cpuset_lock(void) { }
>>>    static inline void cpuset_unlock(void) { }
>>> diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
>>> index c099cf3fa02d..357925e1e4af 100644
>>> --- a/kernel/cgroup/cgroup.c
>>> +++ b/kernel/cgroup/cgroup.c
>>> @@ -57,6 +57,7 @@
>>>    #include <linux/file.h>
>>>    #include <linux/fs_parser.h>
>>>    #include <linux/sched/cputime.h>
>>> +#include <linux/sched/deadline.h>
>>>    #include <linux/psi.h>
>>>    #include <net/sock.h>
>>> @@ -6673,6 +6674,9 @@ void cgroup_exit(struct task_struct *tsk)
>>>    	list_add_tail(&tsk->cg_list, &cset->dying_tasks);
>>>    	cset->nr_tasks--;
>>> +	if (dl_task(tsk))
>>> +		dec_dl_tasks_cs(tsk);
>>> +
>>>    	WARN_ON_ONCE(cgroup_task_frozen(tsk));
>>>    	if (unlikely(!(tsk->flags & PF_KTHREAD) &&
>>>    		     test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
>>> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
>>> index 8d82d66d432b..57bc60112618 100644
>>> --- a/kernel/cgroup/cpuset.c
>>> +++ b/kernel/cgroup/cpuset.c
>>> @@ -193,6 +193,12 @@ struct cpuset {
>>>    	int use_parent_ecpus;
>>>    	int child_ecpus_count;
>>> +	/*
>>> +	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
>>> +	 * know when to rebuild associated root domain bandwidth information.
>>> +	 */
>>> +	int nr_deadline_tasks;
>>> +
>>>    	/* Invalid partition error code, not lock protected */
>>>    	enum prs_errcode prs_err;
>>> @@ -245,6 +251,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
>>>    	return css_cs(cs->css.parent);
>>>    }
>>> +void inc_dl_tasks_cs(struct task_struct *p)
>>> +{
>>> +	struct cpuset *cs = task_cs(p);
>>> +
>>> +	cs->nr_deadline_tasks++;
>>> +}
>>> +
>>> +void dec_dl_tasks_cs(struct task_struct *p)
>>> +{
>>> +	struct cpuset *cs = task_cs(p);
>>> +
>>> +	cs->nr_deadline_tasks--;
>>> +}
>>> +
>>>    /* bits in struct cpuset flags field */
>>>    typedef enum {
>>>    	CS_ONLINE,
>>> @@ -2472,6 +2492,11 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
>>>    		ret = security_task_setscheduler(task);
>>>    		if (ret)
>>>    			goto out_unlock;
>>> +
>>> +		if (dl_task(task)) {
>>> +			cs->nr_deadline_tasks++;
>>> +			cpuset_attach_old_cs->nr_deadline_tasks--;
>>> +		}
>>>    	}
>> Any one of the tasks in the cpuset can cause the test to fail and abort the
>> attachment. I would suggest that you keep a deadline task transfer count in
>> the loop and then update cs and cpouset_attach_old_cs only after all the
>> tasks have been iterated successfully.
> Right, Dietmar I think commented pointing out something along these
> lines. Think though we already have this problem with current
> task_can_attach -> dl_cpu_busy which reserves bandwidth for each tasks
> in the destination cs. Will need to look into that. Do you know which
> sort of operation would move multiple tasks at once?

Actually, what I said previously may not be enough. There can be 
multiple controllers attached to a cgroup. If any of thier can_attach() 
calls fails, the whole transaction is aborted and cancel_attach() will 
be called. My new suggestion is to add a new deadline task transfer 
count into the cpuset structure and store the information there 
temporarily. If cpuset_attach() is called, it means all the can_attach 
calls succeed. You can then update the dl task count accordingly and 
clear the temporary transfer count.

I guess you may have to do something similar with dl_cpu_busy().

My 2 cents.

Cheers,
Longman
  
Waiman Long March 15, 2023, 6:10 p.m. UTC | #6
On 3/15/23 14:01, Waiman Long wrote:
>
> On 3/15/23 13:14, Juri Lelli wrote:
>> On 15/03/23 11:46, Waiman Long wrote:
>>> On 3/15/23 08:18, Juri Lelli wrote:
>>>> Qais reported that iterating over all tasks when rebuilding root 
>>>> domains
>>>> for finding out which ones are DEADLINE and need their bandwidth
>>>> correctly restored on such root domains can be a costly operation (10+
>>>> ms delays on suspend-resume).
>>>>
>>>> To fix the problem keep track of the number of DEADLINE tasks 
>>>> belonging
>>>> to each cpuset and then use this information (followup patch) to only
>>>> perform the above iteration if DEADLINE tasks are actually present in
>>>> the cpuset for which a corresponding root domain is being rebuilt.
>>>>
>>>> Reported-by: Qais Yousef <qyousef@layalina.io>
>>>> Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
>>>> ---
>>>>    include/linux/cpuset.h |  4 ++++
>>>>    kernel/cgroup/cgroup.c |  4 ++++
>>>>    kernel/cgroup/cpuset.c | 25 +++++++++++++++++++++++++
>>>>    kernel/sched/core.c    | 10 ++++++++++
>>>>    4 files changed, 43 insertions(+)
>>>>
>>>> diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
>>>> index 355f796c5f07..0348dba5680e 100644
>>>> --- a/include/linux/cpuset.h
>>>> +++ b/include/linux/cpuset.h
>>>> @@ -71,6 +71,8 @@ extern void cpuset_init_smp(void);
>>>>    extern void cpuset_force_rebuild(void);
>>>>    extern void cpuset_update_active_cpus(void);
>>>>    extern void cpuset_wait_for_hotplug(void);
>>>> +extern void inc_dl_tasks_cs(struct task_struct *task);
>>>> +extern void dec_dl_tasks_cs(struct task_struct *task);
>>>>    extern void cpuset_lock(void);
>>>>    extern void cpuset_unlock(void);
>>>>    extern void cpuset_cpus_allowed(struct task_struct *p, struct 
>>>> cpumask *mask);
>>>> @@ -196,6 +198,8 @@ static inline void cpuset_update_active_cpus(void)
>>>>    static inline void cpuset_wait_for_hotplug(void) { }
>>>> +static inline void inc_dl_tasks_cs(struct task_struct *task) { }
>>>> +static inline void dec_dl_tasks_cs(struct task_struct *task) { }
>>>>    static inline void cpuset_lock(void) { }
>>>>    static inline void cpuset_unlock(void) { }
>>>> diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
>>>> index c099cf3fa02d..357925e1e4af 100644
>>>> --- a/kernel/cgroup/cgroup.c
>>>> +++ b/kernel/cgroup/cgroup.c
>>>> @@ -57,6 +57,7 @@
>>>>    #include <linux/file.h>
>>>>    #include <linux/fs_parser.h>
>>>>    #include <linux/sched/cputime.h>
>>>> +#include <linux/sched/deadline.h>
>>>>    #include <linux/psi.h>
>>>>    #include <net/sock.h>
>>>> @@ -6673,6 +6674,9 @@ void cgroup_exit(struct task_struct *tsk)
>>>>        list_add_tail(&tsk->cg_list, &cset->dying_tasks);
>>>>        cset->nr_tasks--;
>>>> +    if (dl_task(tsk))
>>>> +        dec_dl_tasks_cs(tsk);
>>>> +
>>>>        WARN_ON_ONCE(cgroup_task_frozen(tsk));
>>>>        if (unlikely(!(tsk->flags & PF_KTHREAD) &&
>>>>                 test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
>>>> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
>>>> index 8d82d66d432b..57bc60112618 100644
>>>> --- a/kernel/cgroup/cpuset.c
>>>> +++ b/kernel/cgroup/cpuset.c
>>>> @@ -193,6 +193,12 @@ struct cpuset {
>>>>        int use_parent_ecpus;
>>>>        int child_ecpus_count;
>>>> +    /*
>>>> +     * number of SCHED_DEADLINE tasks attached to this cpuset, so 
>>>> that we
>>>> +     * know when to rebuild associated root domain bandwidth 
>>>> information.
>>>> +     */
>>>> +    int nr_deadline_tasks;
>>>> +
>>>>        /* Invalid partition error code, not lock protected */
>>>>        enum prs_errcode prs_err;
>>>> @@ -245,6 +251,20 @@ static inline struct cpuset *parent_cs(struct 
>>>> cpuset *cs)
>>>>        return css_cs(cs->css.parent);
>>>>    }
>>>> +void inc_dl_tasks_cs(struct task_struct *p)
>>>> +{
>>>> +    struct cpuset *cs = task_cs(p);
>>>> +
>>>> +    cs->nr_deadline_tasks++;
>>>> +}
>>>> +
>>>> +void dec_dl_tasks_cs(struct task_struct *p)
>>>> +{
>>>> +    struct cpuset *cs = task_cs(p);
>>>> +
>>>> +    cs->nr_deadline_tasks--;
>>>> +}
>>>> +
>>>>    /* bits in struct cpuset flags field */
>>>>    typedef enum {
>>>>        CS_ONLINE,
>>>> @@ -2472,6 +2492,11 @@ static int cpuset_can_attach(struct 
>>>> cgroup_taskset *tset)
>>>>            ret = security_task_setscheduler(task);
>>>>            if (ret)
>>>>                goto out_unlock;
>>>> +
>>>> +        if (dl_task(task)) {
>>>> +            cs->nr_deadline_tasks++;
>>>> +            cpuset_attach_old_cs->nr_deadline_tasks--;
>>>> +        }
>>>>        }
>>> Any one of the tasks in the cpuset can cause the test to fail and 
>>> abort the
>>> attachment. I would suggest that you keep a deadline task transfer 
>>> count in
>>> the loop and then update cs and cpouset_attach_old_cs only after all 
>>> the
>>> tasks have been iterated successfully.
>> Right, Dietmar I think commented pointing out something along these
>> lines. Think though we already have this problem with current
>> task_can_attach -> dl_cpu_busy which reserves bandwidth for each tasks
>> in the destination cs. Will need to look into that. Do you know which
>> sort of operation would move multiple tasks at once?
>
> Actually, what I said previously may not be enough. There can be 
> multiple controllers attached to a cgroup. If any of thier 
> can_attach() calls fails, the whole transaction is aborted and 
> cancel_attach() will be called. My new suggestion is to add a new 
> deadline task transfer count into the cpuset structure and store the 
> information there temporarily. If cpuset_attach() is called, it means 
> all the can_attach calls succeed. You can then update the dl task 
> count accordingly and clear the temporary transfer count.
>
> I guess you may have to do something similar with dl_cpu_busy().
>
> My 2 cents.

Alternatively, you can do the nr_deadline_tasks update in 
cpuset_attach(). However, there is an optimization to skip the task 
iteration if the cpu and memory list haven't changed. You will have to 
skip that optimization if there are DL tasks in the cpuset.

Cheers,
Longman
  
Qais Yousef March 15, 2023, 7:25 p.m. UTC | #7
On 03/15/23 17:18, Juri Lelli wrote:
> On 15/03/23 14:49, Qais Yousef wrote:
> > On 03/15/23 12:18, Juri Lelli wrote:
> 
> ...
> 
> > > +void inc_dl_tasks_cs(struct task_struct *p)
> > > +{
> > > +	struct cpuset *cs = task_cs(p);
> > 
> > nit:
> > 
> > I *think* task_cs() assumes rcu_read_lock() is held, right?
> > 
> > Would it make sense to WARN_ON(!rcu_read_lock_held()) to at least
> > annotate the deps?
> 
> Think we have that check in task_css_set_check()?

Yes you're right, I didn't go forward enough in the call stack.

It seems to depend on PROVE_RCU, which sounds irrelevant, but I see PROVE_RCU
is actually an alias for PROVE_LOCKING.


Cheers

--
Qais Yousef
  
Waiman Long March 15, 2023, 11:27 p.m. UTC | #8
On 3/15/23 14:01, Waiman Long wrote:
>
> On 3/15/23 13:14, Juri Lelli wrote:
>> On 15/03/23 11:46, Waiman Long wrote:
>>> On 3/15/23 08:18, Juri Lelli wrote:
>>>> Qais reported that iterating over all tasks when rebuilding root 
>>>> domains
>>>> for finding out which ones are DEADLINE and need their bandwidth
>>>> correctly restored on such root domains can be a costly operation (10+
>>>> ms delays on suspend-resume).
>>>>
>>>> To fix the problem keep track of the number of DEADLINE tasks 
>>>> belonging
>>>> to each cpuset and then use this information (followup patch) to only
>>>> perform the above iteration if DEADLINE tasks are actually present in
>>>> the cpuset for which a corresponding root domain is being rebuilt.
>>>>
>>>> Reported-by: Qais Yousef <qyousef@layalina.io>
>>>> Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
>>>> ---
>>>>    include/linux/cpuset.h |  4 ++++
>>>>    kernel/cgroup/cgroup.c |  4 ++++
>>>>    kernel/cgroup/cpuset.c | 25 +++++++++++++++++++++++++
>>>>    kernel/sched/core.c    | 10 ++++++++++
>>>>    4 files changed, 43 insertions(+)
>>>>
>>>> diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
>>>> index 355f796c5f07..0348dba5680e 100644
>>>> --- a/include/linux/cpuset.h
>>>> +++ b/include/linux/cpuset.h
>>>> @@ -71,6 +71,8 @@ extern void cpuset_init_smp(void);
>>>>    extern void cpuset_force_rebuild(void);
>>>>    extern void cpuset_update_active_cpus(void);
>>>>    extern void cpuset_wait_for_hotplug(void);
>>>> +extern void inc_dl_tasks_cs(struct task_struct *task);
>>>> +extern void dec_dl_tasks_cs(struct task_struct *task);
>>>>    extern void cpuset_lock(void);
>>>>    extern void cpuset_unlock(void);
>>>>    extern void cpuset_cpus_allowed(struct task_struct *p, struct 
>>>> cpumask *mask);
>>>> @@ -196,6 +198,8 @@ static inline void cpuset_update_active_cpus(void)
>>>>    static inline void cpuset_wait_for_hotplug(void) { }
>>>> +static inline void inc_dl_tasks_cs(struct task_struct *task) { }
>>>> +static inline void dec_dl_tasks_cs(struct task_struct *task) { }
>>>>    static inline void cpuset_lock(void) { }
>>>>    static inline void cpuset_unlock(void) { }
>>>> diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
>>>> index c099cf3fa02d..357925e1e4af 100644
>>>> --- a/kernel/cgroup/cgroup.c
>>>> +++ b/kernel/cgroup/cgroup.c
>>>> @@ -57,6 +57,7 @@
>>>>    #include <linux/file.h>
>>>>    #include <linux/fs_parser.h>
>>>>    #include <linux/sched/cputime.h>
>>>> +#include <linux/sched/deadline.h>
>>>>    #include <linux/psi.h>
>>>>    #include <net/sock.h>
>>>> @@ -6673,6 +6674,9 @@ void cgroup_exit(struct task_struct *tsk)
>>>>        list_add_tail(&tsk->cg_list, &cset->dying_tasks);
>>>>        cset->nr_tasks--;
>>>> +    if (dl_task(tsk))
>>>> +        dec_dl_tasks_cs(tsk);
>>>> +
>>>>        WARN_ON_ONCE(cgroup_task_frozen(tsk));
>>>>        if (unlikely(!(tsk->flags & PF_KTHREAD) &&
>>>>                 test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
>>>> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
>>>> index 8d82d66d432b..57bc60112618 100644
>>>> --- a/kernel/cgroup/cpuset.c
>>>> +++ b/kernel/cgroup/cpuset.c
>>>> @@ -193,6 +193,12 @@ struct cpuset {
>>>>        int use_parent_ecpus;
>>>>        int child_ecpus_count;
>>>> +    /*
>>>> +     * number of SCHED_DEADLINE tasks attached to this cpuset, so 
>>>> that we
>>>> +     * know when to rebuild associated root domain bandwidth 
>>>> information.
>>>> +     */
>>>> +    int nr_deadline_tasks;
>>>> +
>>>>        /* Invalid partition error code, not lock protected */
>>>>        enum prs_errcode prs_err;
>>>> @@ -245,6 +251,20 @@ static inline struct cpuset *parent_cs(struct 
>>>> cpuset *cs)
>>>>        return css_cs(cs->css.parent);
>>>>    }
>>>> +void inc_dl_tasks_cs(struct task_struct *p)
>>>> +{
>>>> +    struct cpuset *cs = task_cs(p);
>>>> +
>>>> +    cs->nr_deadline_tasks++;
>>>> +}
>>>> +
>>>> +void dec_dl_tasks_cs(struct task_struct *p)
>>>> +{
>>>> +    struct cpuset *cs = task_cs(p);
>>>> +
>>>> +    cs->nr_deadline_tasks--;
>>>> +}
>>>> +
>>>>    /* bits in struct cpuset flags field */
>>>>    typedef enum {
>>>>        CS_ONLINE,
>>>> @@ -2472,6 +2492,11 @@ static int cpuset_can_attach(struct 
>>>> cgroup_taskset *tset)
>>>>            ret = security_task_setscheduler(task);
>>>>            if (ret)
>>>>                goto out_unlock;
>>>> +
>>>> +        if (dl_task(task)) {
>>>> +            cs->nr_deadline_tasks++;
>>>> +            cpuset_attach_old_cs->nr_deadline_tasks--;
>>>> +        }
>>>>        }
>>> Any one of the tasks in the cpuset can cause the test to fail and 
>>> abort the
>>> attachment. I would suggest that you keep a deadline task transfer 
>>> count in
>>> the loop and then update cs and cpouset_attach_old_cs only after all 
>>> the
>>> tasks have been iterated successfully.
>> Right, Dietmar I think commented pointing out something along these
>> lines. Think though we already have this problem with current
>> task_can_attach -> dl_cpu_busy which reserves bandwidth for each tasks
>> in the destination cs. Will need to look into that. Do you know which
>> sort of operation would move multiple tasks at once?
>
> Actually, what I said previously may not be enough. There can be 
> multiple controllers attached to a cgroup. If any of thier 
> can_attach() calls fails, the whole transaction is aborted and 
> cancel_attach() will be called. My new suggestion is to add a new 
> deadline task transfer count into the cpuset structure and store the 
> information there temporarily. If cpuset_attach() is called, it means 
> all the can_attach calls succeed. You can then update the dl task 
> count accordingly and clear the temporary transfer count.
>
> I guess you may have to do something similar with dl_cpu_busy().

Another possibility is that you may record the cpu where the new DL 
bandwidth is allocated from in the task_struct. Then in 
cpuset_cancel_attach(), you can revert the dl_cpu_busy() change if DL 
tasks are in the css_set to be transferred. That will likely require 
having a DL task transfer count in the cpuset and iterating all the 
tasks to look for ones with a previously recorded cpu # if the transfer 
count is non-zero.

Cheers,
Longman
  
Dietmar Eggemann March 22, 2023, 1:18 p.m. UTC | #9
On 15/03/2023 18:14, Juri Lelli wrote:
> On 15/03/23 11:46, Waiman Long wrote:
>>
>> On 3/15/23 08:18, Juri Lelli wrote:

[...]

>>> @@ -2472,6 +2492,11 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
>>>   		ret = security_task_setscheduler(task);
>>>   		if (ret)
>>>   			goto out_unlock;
>>> +
>>> +		if (dl_task(task)) {
>>> +			cs->nr_deadline_tasks++;
>>> +			cpuset_attach_old_cs->nr_deadline_tasks--;
>>> +		}
>>>   	}
>>
>> Any one of the tasks in the cpuset can cause the test to fail and abort the
>> attachment. I would suggest that you keep a deadline task transfer count in
>> the loop and then update cs and cpouset_attach_old_cs only after all the
>> tasks have been iterated successfully.
> 
> Right, Dietmar I think commented pointing out something along these
> lines. Think though we already have this problem with current
> task_can_attach -> dl_cpu_busy which reserves bandwidth for each tasks
> in the destination cs. Will need to look into that. Do you know which
> sort of operation would move multiple tasks at once?

Moving the process instead of the individual tasks makes
cpuset_can_attach() have to deal with multiple tasks.

# ps2 | grep DLN
 1614  1615 140      0   - DLN thread0-0
 1614  1616 140      0   - DLN thread0-1
 1614  1617 140      0   - DLN thread0-2

# echo 1614 > /sys/fs/cgroup/cpuset/cs2/cgroup.procs
  
Dietmar Eggemann March 22, 2023, 2:05 p.m. UTC | #10
On 15/03/2023 19:01, Waiman Long wrote:
> 
> On 3/15/23 13:14, Juri Lelli wrote:
>> On 15/03/23 11:46, Waiman Long wrote:
>>> On 3/15/23 08:18, Juri Lelli wrote:

[...]

>>>> @@ -2472,6 +2492,11 @@ static int cpuset_can_attach(struct
>>>> cgroup_taskset *tset)
>>>>            ret = security_task_setscheduler(task);
>>>>            if (ret)
>>>>                goto out_unlock;
>>>> +
>>>> +        if (dl_task(task)) {
>>>> +            cs->nr_deadline_tasks++;
>>>> +            cpuset_attach_old_cs->nr_deadline_tasks--;
>>>> +        }
>>>>        }
>>> Any one of the tasks in the cpuset can cause the test to fail and
>>> abort the
>>> attachment. I would suggest that you keep a deadline task transfer
>>> count in
>>> the loop and then update cs and cpouset_attach_old_cs only after all the
>>> tasks have been iterated successfully.
>> Right, Dietmar I think commented pointing out something along these
>> lines. Think though we already have this problem with current
>> task_can_attach -> dl_cpu_busy which reserves bandwidth for each tasks
>> in the destination cs. Will need to look into that. Do you know which
>> sort of operation would move multiple tasks at once?
> 
> Actually, what I said previously may not be enough. There can be
> multiple controllers attached to a cgroup. If any of thier can_attach()
> calls fails, the whole transaction is aborted and cancel_attach() will
> be called. My new suggestion is to add a new deadline task transfer
> count into the cpuset structure and store the information there
> temporarily. If cpuset_attach() is called, it means all the can_attach
> calls succeed. You can then update the dl task count accordingly and
> clear the temporary transfer count.
> 
> I guess you may have to do something similar with dl_cpu_busy().

I gave it a shot:

https://lkml.kernel.org/r/20230322135959.1998790-1-dietmar.eggemann@arm.com
  

Patch

diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 355f796c5f07..0348dba5680e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -71,6 +71,8 @@  extern void cpuset_init_smp(void);
 extern void cpuset_force_rebuild(void);
 extern void cpuset_update_active_cpus(void);
 extern void cpuset_wait_for_hotplug(void);
+extern void inc_dl_tasks_cs(struct task_struct *task);
+extern void dec_dl_tasks_cs(struct task_struct *task);
 extern void cpuset_lock(void);
 extern void cpuset_unlock(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
@@ -196,6 +198,8 @@  static inline void cpuset_update_active_cpus(void)
 
 static inline void cpuset_wait_for_hotplug(void) { }
 
+static inline void inc_dl_tasks_cs(struct task_struct *task) { }
+static inline void dec_dl_tasks_cs(struct task_struct *task) { }
 static inline void cpuset_lock(void) { }
 static inline void cpuset_unlock(void) { }
 
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index c099cf3fa02d..357925e1e4af 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -57,6 +57,7 @@ 
 #include <linux/file.h>
 #include <linux/fs_parser.h>
 #include <linux/sched/cputime.h>
+#include <linux/sched/deadline.h>
 #include <linux/psi.h>
 #include <net/sock.h>
 
@@ -6673,6 +6674,9 @@  void cgroup_exit(struct task_struct *tsk)
 	list_add_tail(&tsk->cg_list, &cset->dying_tasks);
 	cset->nr_tasks--;
 
+	if (dl_task(tsk))
+		dec_dl_tasks_cs(tsk);
+
 	WARN_ON_ONCE(cgroup_task_frozen(tsk));
 	if (unlikely(!(tsk->flags & PF_KTHREAD) &&
 		     test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 8d82d66d432b..57bc60112618 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -193,6 +193,12 @@  struct cpuset {
 	int use_parent_ecpus;
 	int child_ecpus_count;
 
+	/*
+	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
+	 * know when to rebuild associated root domain bandwidth information.
+	 */
+	int nr_deadline_tasks;
+
 	/* Invalid partition error code, not lock protected */
 	enum prs_errcode prs_err;
 
@@ -245,6 +251,20 @@  static inline struct cpuset *parent_cs(struct cpuset *cs)
 	return css_cs(cs->css.parent);
 }
 
+void inc_dl_tasks_cs(struct task_struct *p)
+{
+	struct cpuset *cs = task_cs(p);
+
+	cs->nr_deadline_tasks++;
+}
+
+void dec_dl_tasks_cs(struct task_struct *p)
+{
+	struct cpuset *cs = task_cs(p);
+
+	cs->nr_deadline_tasks--;
+}
+
 /* bits in struct cpuset flags field */
 typedef enum {
 	CS_ONLINE,
@@ -2472,6 +2492,11 @@  static int cpuset_can_attach(struct cgroup_taskset *tset)
 		ret = security_task_setscheduler(task);
 		if (ret)
 			goto out_unlock;
+
+		if (dl_task(task)) {
+			cs->nr_deadline_tasks++;
+			cpuset_attach_old_cs->nr_deadline_tasks--;
+		}
 	}
 
 	/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5902cbb5e751..d586a8440348 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7683,6 +7683,16 @@  static int __sched_setscheduler(struct task_struct *p,
 		goto unlock;
 	}
 
+	/*
+	 * In case a task is setscheduled to SCHED_DEADLINE, or if a task is
+	 * moved to a different sched policy, we need to keep track of that on
+	 * its cpuset (for correct bandwidth tracking).
+	 */
+	if (dl_policy(policy) && !dl_task(p))
+		inc_dl_tasks_cs(p);
+	else if (dl_task(p) && !dl_policy(policy))
+		dec_dl_tasks_cs(p);
+
 	p->sched_reset_on_fork = reset_on_fork;
 	oldprio = p->prio;