[RESEND] sched/fair: Add min_ratio for cfs bandwidth_control

Message ID 20221019031551.24312-1-zhouchuyi@bytedance.com
State New
Headers
Series [RESEND] sched/fair: Add min_ratio for cfs bandwidth_control |

Commit Message

Chuyi Zhou Oct. 19, 2022, 3:15 a.m. UTC
  Tasks may be throttled when holding locks for a long time by current
cfs bandwidth control mechanism once users set a too small quota/period
ratio, which can result whole system get stuck[1].

In order to prevent the above situation from happening, this patch adds
sysctl_sched_cfs_bandwidth_min_ratio in /proc/sys/kernel, which indicates
the minimum percentage of quota/period users can set. The default value is
zero and users can set quota and period without triggering this constraint.

Link[1]:https://lore.kernel.org/lkml/5987be34-b527-4ff5-a17d-5f6f0dc94d6d@huawei.com/T/
Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
Suggested-by: Abel Wu <wuyun.abel@bytedance.com>
---
 include/linux/sched/sysctl.h |  4 ++++
 kernel/sched/core.c          | 23 +++++++++++++++++++++++
 kernel/sysctl.c              | 10 ++++++++++
 3 files changed, 37 insertions(+)
  

Comments

Benjamin Segall Oct. 19, 2022, 9:01 p.m. UTC | #1
Chuyi Zhou <zhouchuyi@bytedance.com> writes:

> Tasks may be throttled when holding locks for a long time by current
> cfs bandwidth control mechanism once users set a too small quota/period
> ratio, which can result whole system get stuck[1].
>
> In order to prevent the above situation from happening, this patch adds
> sysctl_sched_cfs_bandwidth_min_ratio in /proc/sys/kernel, which indicates
> the minimum percentage of quota/period users can set. The default value is
> zero and users can set quota and period without triggering this
> constraint.


There's so many other sorts of bad inputs that can get you stuck here
that I'm not sure it's ever safe against lockups to provide direct write
access to an untrusted user. I'm not totally opposed but it seems like
an incomplete fix to a broken (non-default) configuration.


>
> Link[1]:https://lore.kernel.org/lkml/5987be34-b527-4ff5-a17d-5f6f0dc94d6d@huawei.com/T/
> Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
> Suggested-by: Abel Wu <wuyun.abel@bytedance.com>
> ---
>  include/linux/sched/sysctl.h |  4 ++++
>  kernel/sched/core.c          | 23 +++++++++++++++++++++++
>  kernel/sysctl.c              | 10 ++++++++++
>  3 files changed, 37 insertions(+)
>
> diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
> index 303ee7dd0c7e..dedb18648f0e 100644
> --- a/include/linux/sched/sysctl.h
> +++ b/include/linux/sched/sysctl.h
> @@ -21,6 +21,10 @@ enum sched_tunable_scaling {
>  	SCHED_TUNABLESCALING_END,
>  };
>  
> +#ifdef CONFIG_CFS_BANDWIDTH
> +extern unsigned int sysctl_sched_cfs_bandwidth_min_ratio;
> +#endif
> +
>  #define NUMA_BALANCING_DISABLED		0x0
>  #define NUMA_BALANCING_NORMAL		0x1
>  #define NUMA_BALANCING_MEMORY_TIERING	0x2
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 5800b0623ff3..8f6cfd889e37 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10504,6 +10504,12 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
>  }
>  
>  #ifdef CONFIG_CFS_BANDWIDTH
> +/*
> + * The minimum of quota/period ratio users can set, default is zero and users can set
> + * quota and period without triggering this constraint.
> + */
> +unsigned int sysctl_sched_cfs_bandwidth_min_ratio;
> +
>  static DEFINE_MUTEX(cfs_constraints_mutex);
>  
>  const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
> @@ -10513,6 +10519,20 @@ static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
>  
>  static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
>  
> +static int check_cfs_bandwidth_min_ratio(u64 period, u64 quota)
> +{
> +	u64 ratio;
> +
> +	if (!sysctl_sched_cfs_bandwidth_min_ratio)
> +		return 0;
> +
> +	ratio = div64_u64(quota * 100, period);
> +	if (ratio < sysctl_sched_cfs_bandwidth_min_ratio)
> +		return -1;
> +
> +	return 0;
> +}
> +
>  static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
>  				u64 burst)
>  {
> @@ -10548,6 +10568,9 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
>  				     burst + quota > max_cfs_runtime))
>  		return -EINVAL;
>  
> +	if (quota != RUNTIME_INF && check_cfs_bandwidth_min_ratio(period, quota))
> +		return -EINVAL;
> +
>  	/*
>  	 * Prevent race between setting of cfs_rq->runtime_enabled and
>  	 * unthrottle_offline_cfs_rqs().
> diff --git a/kernel/sysctl.c b/kernel/sysctl.c
> index 188c305aeb8b..7d9743e8e514 100644
> --- a/kernel/sysctl.c
> +++ b/kernel/sysctl.c
> @@ -1652,6 +1652,16 @@ static struct ctl_table kern_table[] = {
>  		.extra1		= SYSCTL_ZERO,
>  	},
>  #endif /* CONFIG_NUMA_BALANCING */
> +#ifdef CONFIG_CFS_BANDWIDTH
> +	{
> +		.procname	= "sched_cfs_bandwidth_min_ratio",
> +		.data		= &sysctl_sched_cfs_bandwidth_min_ratio,
> +		.maxlen		= sizeof(unsigned int),
> +		.mode		= 0644,
> +		.proc_handler	= proc_dointvec_minmax,
> +		.extra1		= SYSCTL_ZERO,
> +	},
> +#endif /* CONFIG_CFS_BANDWIDTH */
>  	{
>  		.procname	= "panic",
>  		.data		= &panic_timeout,
  
Tejun Heo Oct. 19, 2022, 9:21 p.m. UTC | #2
Hello,

On Wed, Oct 19, 2022 at 11:15:51AM +0800, Chuyi Zhou wrote:
> Tasks may be throttled when holding locks for a long time by current
> cfs bandwidth control mechanism once users set a too small quota/period
> ratio, which can result whole system get stuck[1].
> 
> In order to prevent the above situation from happening, this patch adds
> sysctl_sched_cfs_bandwidth_min_ratio in /proc/sys/kernel, which indicates
> the minimum percentage of quota/period users can set. The default value is
> zero and users can set quota and period without triggering this constraint.
> 
> Link[1]:https://lore.kernel.org/lkml/5987be34-b527-4ff5-a17d-5f6f0dc94d6d@huawei.com/T/
> Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
> Suggested-by: Abel Wu <wuyun.abel@bytedance.com>

This is a bit of a bandaid. I think what we really need to do is only
throttling when running in userspace. In kernel space, it should just keep
accumulating used cycles as debt which should be paid back before userspace
code can run again so that we don't throttle at random places in the kernel.

Thanks.
  
Chuyi Zhou Oct. 20, 2022, 6:35 a.m. UTC | #3
在 2022/10/20 05:21, Tejun Heo 写道:
> Hello,
> 
> On Wed, Oct 19, 2022 at 11:15:51AM +0800, Chuyi Zhou wrote:
>> Tasks may be throttled when holding locks for a long time by current
>> cfs bandwidth control mechanism once users set a too small quota/period
>> ratio, which can result whole system get stuck[1].
>>
>> In order to prevent the above situation from happening, this patch adds
>> sysctl_sched_cfs_bandwidth_min_ratio in /proc/sys/kernel, which indicates
>> the minimum percentage of quota/period users can set. The default value is
>> zero and users can set quota and period without triggering this constraint.
>>
>> Link[1]:https://lore.kernel.org/lkml/5987be34-b527-4ff5-a17d-5f6f0dc94d6d@huawei.com/T/
>> Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
>> Suggested-by: Abel Wu <wuyun.abel@bytedance.com>
> 
> This is a bit of a bandaid. I think what we really need to do is only
> throttling when running in userspace. In kernel space, it should just keep
> accumulating used cycles as debt which should be paid back before userspace
> code can run again so that we don't throttle at random places in the kernel.
> 
> Thanks.
> 
Got it. Thanks for your advice.
Chuyi Zhou
  
Chuyi Zhou Oct. 20, 2022, 9:14 a.m. UTC | #4
在 2022/10/20 05:01, Benjamin Segall 写道:
> Chuyi Zhou <zhouchuyi@bytedance.com> writes:
>
>> Tasks may be throttled when holding locks for a long time by current
>> cfs bandwidth control mechanism once users set a too small quota/period
>> ratio, which can result whole system get stuck[1].
>>
>> In order to prevent the above situation from happening, this patch adds
>> sysctl_sched_cfs_bandwidth_min_ratio in /proc/sys/kernel, which indicates
>> the minimum percentage of quota/period users can set. The default value is
>> zero and users can set quota and period without triggering this
>> constraint.
> 
> 
> There's so many other sorts of bad inputs that can get you stuck here
> that I'm not sure it's ever safe against lockups to provide direct write
> access to an untrusted user. I'm not totally opposed but it seems like
> an incomplete fix to a broken (non-default) configuration.
> 
> 
Thanks for your advice.
Chuyi Zhou
>>
>> Link[1]:https://lore.kernel.org/lkml/5987be34-b527-4ff5-a17d-5f6f0dc94d6d@huawei.com/T/
>> Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
>> Suggested-by: Abel Wu <wuyun.abel@bytedance.com>
>> ---
>>   include/linux/sched/sysctl.h |  4 ++++
>>   kernel/sched/core.c          | 23 +++++++++++++++++++++++
>>   kernel/sysctl.c              | 10 ++++++++++
>>   3 files changed, 37 insertions(+)
>>
>> diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
>> index 303ee7dd0c7e..dedb18648f0e 100644
>> --- a/include/linux/sched/sysctl.h
>> +++ b/include/linux/sched/sysctl.h
>> @@ -21,6 +21,10 @@ enum sched_tunable_scaling {
>>   	SCHED_TUNABLESCALING_END,
>>   };
>>   
>> +#ifdef CONFIG_CFS_BANDWIDTH
>> +extern unsigned int sysctl_sched_cfs_bandwidth_min_ratio;
>> +#endif
>> +
>>   #define NUMA_BALANCING_DISABLED		0x0
>>   #define NUMA_BALANCING_NORMAL		0x1
>>   #define NUMA_BALANCING_MEMORY_TIERING	0x2
>> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
>> index 5800b0623ff3..8f6cfd889e37 100644
>> --- a/kernel/sched/core.c
>> +++ b/kernel/sched/core.c
>> @@ -10504,6 +10504,12 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
>>   }
>>   
>>   #ifdef CONFIG_CFS_BANDWIDTH
>> +/*
>> + * The minimum of quota/period ratio users can set, default is zero and users can set
>> + * quota and period without triggering this constraint.
>> + */
>> +unsigned int sysctl_sched_cfs_bandwidth_min_ratio;
>> +
>>   static DEFINE_MUTEX(cfs_constraints_mutex);
>>   
>>   const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
>> @@ -10513,6 +10519,20 @@ static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
>>   
>>   static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
>>   
>> +static int check_cfs_bandwidth_min_ratio(u64 period, u64 quota)
>> +{
>> +	u64 ratio;
>> +
>> +	if (!sysctl_sched_cfs_bandwidth_min_ratio)
>> +		return 0;
>> +
>> +	ratio = div64_u64(quota * 100, period);
>> +	if (ratio < sysctl_sched_cfs_bandwidth_min_ratio)
>> +		return -1;
>> +
>> +	return 0;
>> +}
>> +
>>   static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
>>   				u64 burst)
>>   {
>> @@ -10548,6 +10568,9 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
>>   				     burst + quota > max_cfs_runtime))
>>   		return -EINVAL;
>>   
>> +	if (quota != RUNTIME_INF && check_cfs_bandwidth_min_ratio(period, quota))
>> +		return -EINVAL;
>> +
>>   	/*
>>   	 * Prevent race between setting of cfs_rq->runtime_enabled and
>>   	 * unthrottle_offline_cfs_rqs().
>> diff --git a/kernel/sysctl.c b/kernel/sysctl.c
>> index 188c305aeb8b..7d9743e8e514 100644
>> --- a/kernel/sysctl.c
>> +++ b/kernel/sysctl.c
>> @@ -1652,6 +1652,16 @@ static struct ctl_table kern_table[] = {
>>   		.extra1		= SYSCTL_ZERO,
>>   	},
>>   #endif /* CONFIG_NUMA_BALANCING */
>> +#ifdef CONFIG_CFS_BANDWIDTH
>> +	{
>> +		.procname	= "sched_cfs_bandwidth_min_ratio",
>> +		.data		= &sysctl_sched_cfs_bandwidth_min_ratio,
>> +		.maxlen		= sizeof(unsigned int),
>> +		.mode		= 0644,
>> +		.proc_handler	= proc_dointvec_minmax,
>> +		.extra1		= SYSCTL_ZERO,
>> +	},
>> +#endif /* CONFIG_CFS_BANDWIDTH */
>>   	{
>>   		.procname	= "panic",
>>   		.data		= &panic_timeout,
  
Peter Zijlstra Oct. 20, 2022, 5:08 p.m. UTC | #5
On Wed, Oct 19, 2022 at 11:21:19AM -1000, Tejun Heo wrote:
> Hello,
> 
> On Wed, Oct 19, 2022 at 11:15:51AM +0800, Chuyi Zhou wrote:
> > Tasks may be throttled when holding locks for a long time by current
> > cfs bandwidth control mechanism once users set a too small quota/period
> > ratio, which can result whole system get stuck[1].
> > 
> > In order to prevent the above situation from happening, this patch adds
> > sysctl_sched_cfs_bandwidth_min_ratio in /proc/sys/kernel, which indicates
> > the minimum percentage of quota/period users can set. The default value is
> > zero and users can set quota and period without triggering this constraint.
> > 
> > Link[1]:https://lore.kernel.org/lkml/5987be34-b527-4ff5-a17d-5f6f0dc94d6d@huawei.com/T/
> > Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
> > Suggested-by: Abel Wu <wuyun.abel@bytedance.com>
> 
> This is a bit of a bandaid. I think what we really need to do is only
> throttling when running in userspace. In kernel space, it should just keep
> accumulating used cycles as debt which should be paid back before userspace
> code can run again so that we don't throttle at random places in the kernel.

That's just moving the problem. But yeah; perhaps. Starving random
userspace is less of a problem I suppose.
  
Tejun Heo Oct. 21, 2022, 6:04 p.m. UTC | #6
Hello,

On Thu, Oct 20, 2022 at 07:08:13PM +0200, Peter Zijlstra wrote:
> > This is a bit of a bandaid. I think what we really need to do is only
> > throttling when running in userspace. In kernel space, it should just keep
> > accumulating used cycles as debt which should be paid back before userspace
> > code can run again so that we don't throttle at random places in the kernel.
> 
> That's just moving the problem. But yeah; perhaps. Starving random
> userspace is less of a problem I suppose.

Given that our primary mean of guaranteeing forward progress is the fact
that the system runs out of other things to do when there are severe
priority inversions, I don't think we can safely give control of throttling
something running in the kernel to userspace.

IO control takes a similar approach with shared IOs which can have
system-wide impacts and it's been working out pretty well. While some may go
over the limit briefly, it's not that difficult to remain true to the
intended configuration over time.

The only problem is the cases where userspace can cause a large amount of
forced consumptions (e.g. for IOs, creating a lot of metadata updates
without doing anything else), but even in the unlikely case similar problem
exists for CPU, it's pretty easy to add specific control mechanisms around
those (e.g. sth along the style of might_resched()).

So, yeah, I think this is the actual solution.

Thanks.
  

Patch

diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 303ee7dd0c7e..dedb18648f0e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -21,6 +21,10 @@  enum sched_tunable_scaling {
 	SCHED_TUNABLESCALING_END,
 };
 
+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_min_ratio;
+#endif
+
 #define NUMA_BALANCING_DISABLED		0x0
 #define NUMA_BALANCING_NORMAL		0x1
 #define NUMA_BALANCING_MEMORY_TIERING	0x2
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5800b0623ff3..8f6cfd889e37 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10504,6 +10504,12 @@  static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
 }
 
 #ifdef CONFIG_CFS_BANDWIDTH
+/*
+ * The minimum of quota/period ratio users can set, default is zero and users can set
+ * quota and period without triggering this constraint.
+ */
+unsigned int sysctl_sched_cfs_bandwidth_min_ratio;
+
 static DEFINE_MUTEX(cfs_constraints_mutex);
 
 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
@@ -10513,6 +10519,20 @@  static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
 
 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
 
+static int check_cfs_bandwidth_min_ratio(u64 period, u64 quota)
+{
+	u64 ratio;
+
+	if (!sysctl_sched_cfs_bandwidth_min_ratio)
+		return 0;
+
+	ratio = div64_u64(quota * 100, period);
+	if (ratio < sysctl_sched_cfs_bandwidth_min_ratio)
+		return -1;
+
+	return 0;
+}
+
 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
 				u64 burst)
 {
@@ -10548,6 +10568,9 @@  static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
 				     burst + quota > max_cfs_runtime))
 		return -EINVAL;
 
+	if (quota != RUNTIME_INF && check_cfs_bandwidth_min_ratio(period, quota))
+		return -EINVAL;
+
 	/*
 	 * Prevent race between setting of cfs_rq->runtime_enabled and
 	 * unthrottle_offline_cfs_rqs().
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 188c305aeb8b..7d9743e8e514 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1652,6 +1652,16 @@  static struct ctl_table kern_table[] = {
 		.extra1		= SYSCTL_ZERO,
 	},
 #endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_CFS_BANDWIDTH
+	{
+		.procname	= "sched_cfs_bandwidth_min_ratio",
+		.data		= &sysctl_sched_cfs_bandwidth_min_ratio,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= SYSCTL_ZERO,
+	},
+#endif /* CONFIG_CFS_BANDWIDTH */
 	{
 		.procname	= "panic",
 		.data		= &panic_timeout,