sched/rt: Redefine RR_TIMESLICE to 100 msecs

Message ID 20231018081709.2289264-1-yajun.deng@linux.dev
State New
Headers
Series sched/rt: Redefine RR_TIMESLICE to 100 msecs |

Commit Message

Yajun Deng Oct. 18, 2023, 8:17 a.m. UTC
  The RR_TIMESLICE is currently defined as the jiffies corresponding to
100 msecs. And then sysctl_sched_rr_timeslice will convert RR_TIMESLICE
to 100 msecs. These are opposite calculations.

There are msecs_to_jiffies and jiffies_to_msecs in sched_rr_handler.
These are also opposite calculations.

Redefine RR_TIMESLICE to 100 msecs, only sched_rr_timeslice needs to
convert RR_TIMESLICE to jiffies.

Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
---
 include/linux/sched/rt.h |  2 +-
 init/init_task.c         |  2 +-
 kernel/sched/rt.c        | 12 +++++-------
 3 files changed, 7 insertions(+), 9 deletions(-)
  

Comments

Yajun Deng Nov. 8, 2023, 11:02 a.m. UTC | #1
Kindly ping...

Thanks


On 2023/10/18 16:17, Yajun Deng wrote:
> The RR_TIMESLICE is currently defined as the jiffies corresponding to
> 100 msecs. And then sysctl_sched_rr_timeslice will convert RR_TIMESLICE
> to 100 msecs. These are opposite calculations.
>
> There are msecs_to_jiffies and jiffies_to_msecs in sched_rr_handler.
> These are also opposite calculations.
>
> Redefine RR_TIMESLICE to 100 msecs, only sched_rr_timeslice needs to
> convert RR_TIMESLICE to jiffies.
>
> Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
> ---
>   include/linux/sched/rt.h |  2 +-
>   init/init_task.c         |  2 +-
>   kernel/sched/rt.c        | 12 +++++-------
>   3 files changed, 7 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
> index b2b9e6eb9683..74f8f456a804 100644
> --- a/include/linux/sched/rt.h
> +++ b/include/linux/sched/rt.h
> @@ -58,6 +58,6 @@ extern void normalize_rt_tasks(void);
>    * default timeslice is 100 msecs (used only for SCHED_RR tasks).
>    * Timeslices get refilled after they expire.
>    */
> -#define RR_TIMESLICE		(100 * HZ / 1000)
> +#define RR_TIMESLICE		(100)
>   
>   #endif /* _LINUX_SCHED_RT_H */
> diff --git a/init/init_task.c b/init/init_task.c
> index 5727d42149c3..86619a425342 100644
> --- a/init/init_task.c
> +++ b/init/init_task.c
> @@ -94,7 +94,7 @@ struct task_struct init_task
>   	},
>   	.rt		= {
>   		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
> -		.time_slice	= RR_TIMESLICE,
> +		.time_slice	= (RR_TIMESLICE * HZ) / MSEC_PER_SEC,
>   	},
>   	.tasks		= LIST_HEAD_INIT(init_task.tasks),
>   #ifdef CONFIG_SMP
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 6aaf0a3d6081..7c0e912094a9 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -4,7 +4,7 @@
>    * policies)
>    */
>   
> -int sched_rr_timeslice = RR_TIMESLICE;
> +int sched_rr_timeslice = (RR_TIMESLICE * HZ) / MSEC_PER_SEC;
>   /* More than 4 hours if BW_SHIFT equals 20. */
>   static const u64 max_rt_runtime = MAX_BW;
>   
> @@ -25,7 +25,7 @@ int sysctl_sched_rt_period = 1000000;
>   int sysctl_sched_rt_runtime = 950000;
>   
>   #ifdef CONFIG_SYSCTL
> -static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
> +static int sysctl_sched_rr_timeslice = RR_TIMESLICE;
>   static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
>   		size_t *lenp, loff_t *ppos);
>   static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
> @@ -3014,12 +3014,10 @@ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
>   	 * Also, writing zero resets the timeslice to default:
>   	 */
>   	if (!ret && write) {
> -		sched_rr_timeslice =
> -			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
> -			msecs_to_jiffies(sysctl_sched_rr_timeslice);
> -
>   		if (sysctl_sched_rr_timeslice <= 0)
> -			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
> +			sysctl_sched_rr_timeslice = RR_TIMESLICE;
> +
> +		sched_rr_timeslice = msecs_to_jiffies(sysctl_sched_rr_timeslice);
>   	}
>   	mutex_unlock(&mutex);
>
  
Steven Rostedt Nov. 8, 2023, 4:55 p.m. UTC | #2
On Wed, 18 Oct 2023 16:17:09 +0800
Yajun Deng <yajun.deng@linux.dev> wrote:

> The RR_TIMESLICE is currently defined as the jiffies corresponding to
> 100 msecs. And then sysctl_sched_rr_timeslice will convert RR_TIMESLICE
> to 100 msecs. These are opposite calculations.

Do we care? The compiler will do this at build time. What's wrong with the
current code?

> 
> There are msecs_to_jiffies and jiffies_to_msecs in sched_rr_handler.
> These are also opposite calculations.
> 
> Redefine RR_TIMESLICE to 100 msecs, only sched_rr_timeslice needs to
> convert RR_TIMESLICE to jiffies.

Why? What's the difference if we do it in one place or another? The
calculations are done at compile time anyway. There's no performance
benefit by this change.

> 
> Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
> ---
>  include/linux/sched/rt.h |  2 +-
>  init/init_task.c         |  2 +-
>  kernel/sched/rt.c        | 12 +++++-------
>  3 files changed, 7 insertions(+), 9 deletions(-)
> 
> diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
> index b2b9e6eb9683..74f8f456a804 100644
> --- a/include/linux/sched/rt.h
> +++ b/include/linux/sched/rt.h
> @@ -58,6 +58,6 @@ extern void normalize_rt_tasks(void);
>   * default timeslice is 100 msecs (used only for SCHED_RR tasks).
>   * Timeslices get refilled after they expire.
>   */
> -#define RR_TIMESLICE		(100 * HZ / 1000)
> +#define RR_TIMESLICE		(100)

If anything, if this change is accepted, the above should be renamed to
RR_TIMESLICE_MS to explicitly state that they are in milliseconds.

>  
>  #endif /* _LINUX_SCHED_RT_H */
> diff --git a/init/init_task.c b/init/init_task.c
> index 5727d42149c3..86619a425342 100644
> --- a/init/init_task.c
> +++ b/init/init_task.c
> @@ -94,7 +94,7 @@ struct task_struct init_task
>  	},
>  	.rt		= {
>  		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
> -		.time_slice	= RR_TIMESLICE,
> +		.time_slice	= (RR_TIMESLICE * HZ) / MSEC_PER_SEC,
>  	},
>  	.tasks		= LIST_HEAD_INIT(init_task.tasks),
>  #ifdef CONFIG_SMP
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 6aaf0a3d6081..7c0e912094a9 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -4,7 +4,7 @@
>   * policies)
>   */
>  
> -int sched_rr_timeslice = RR_TIMESLICE;
> +int sched_rr_timeslice = (RR_TIMESLICE * HZ) / MSEC_PER_SEC;
>  /* More than 4 hours if BW_SHIFT equals 20. */
>  static const u64 max_rt_runtime = MAX_BW;
>  
> @@ -25,7 +25,7 @@ int sysctl_sched_rt_period = 1000000;
>  int sysctl_sched_rt_runtime = 950000;
>  
>  #ifdef CONFIG_SYSCTL
> -static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
> +static int sysctl_sched_rr_timeslice = RR_TIMESLICE;

So you are just moving calculations around that get done at compile time?

>  static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
>  		size_t *lenp, loff_t *ppos);
>  static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
> @@ -3014,12 +3014,10 @@ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
>  	 * Also, writing zero resets the timeslice to default:
>  	 */
>  	if (!ret && write) {
> -		sched_rr_timeslice =
> -			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
> -			msecs_to_jiffies(sysctl_sched_rr_timeslice);
> -
>  		if (sysctl_sched_rr_timeslice <= 0)
> -			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
> +			sysctl_sched_rr_timeslice = RR_TIMESLICE;
> +
> +		sched_rr_timeslice = msecs_to_jiffies(sysctl_sched_rr_timeslice);

If anything, this patch may make this code easier to follow, but it's up to
Peter if he wants to take it or not.

Agnostic-by: Steven Rostedt (Google) <rostedt@goodmis.org>

-- Steve



>  	}
>  	mutex_unlock(&mutex);
>
  
Yajun Deng Nov. 9, 2023, 2:11 a.m. UTC | #3
On 2023/11/9 00:55, Steven Rostedt wrote:
> On Wed, 18 Oct 2023 16:17:09 +0800
> Yajun Deng <yajun.deng@linux.dev> wrote:
>
>> The RR_TIMESLICE is currently defined as the jiffies corresponding to
>> 100 msecs. And then sysctl_sched_rr_timeslice will convert RR_TIMESLICE
>> to 100 msecs. These are opposite calculations.
> Do we care? The compiler will do this at build time. What's wrong with the
> current code?


Yes, two calculations may introduce rounding error like the following.

>> There are msecs_to_jiffies and jiffies_to_msecs in sched_rr_handler.
>> These are also opposite calculations.
>>
>> Redefine RR_TIMESLICE to 100 msecs, only sched_rr_timeslice needs to
>> convert RR_TIMESLICE to jiffies.
> Why? What's the difference if we do it in one place or another? The
> calculations are done at compile time anyway. There's no performance
> benefit by this change.
>
The current code requires two calculations, it only needs to be 
calculated once after this patch.


>> Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
>> ---
>>   include/linux/sched/rt.h |  2 +-
>>   init/init_task.c         |  2 +-
>>   kernel/sched/rt.c        | 12 +++++-------
>>   3 files changed, 7 insertions(+), 9 deletions(-)
>>
>> diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
>> index b2b9e6eb9683..74f8f456a804 100644
>> --- a/include/linux/sched/rt.h
>> +++ b/include/linux/sched/rt.h
>> @@ -58,6 +58,6 @@ extern void normalize_rt_tasks(void);
>>    * default timeslice is 100 msecs (used only for SCHED_RR tasks).
>>    * Timeslices get refilled after they expire.
>>    */
>> -#define RR_TIMESLICE		(100 * HZ / 1000)
>> +#define RR_TIMESLICE		(100)
> If anything, if this change is accepted, the above should be renamed to
> RR_TIMESLICE_MS to explicitly state that they are in milliseconds.

Okay.

>>   
>>   #endif /* _LINUX_SCHED_RT_H */
>> diff --git a/init/init_task.c b/init/init_task.c
>> index 5727d42149c3..86619a425342 100644
>> --- a/init/init_task.c
>> +++ b/init/init_task.c
>> @@ -94,7 +94,7 @@ struct task_struct init_task
>>   	},
>>   	.rt		= {
>>   		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
>> -		.time_slice	= RR_TIMESLICE,
>> +		.time_slice	= (RR_TIMESLICE * HZ) / MSEC_PER_SEC,
>>   	},
>>   	.tasks		= LIST_HEAD_INIT(init_task.tasks),
>>   #ifdef CONFIG_SMP
>> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
>> index 6aaf0a3d6081..7c0e912094a9 100644
>> --- a/kernel/sched/rt.c
>> +++ b/kernel/sched/rt.c
>> @@ -4,7 +4,7 @@
>>    * policies)
>>    */
>>   
>> -int sched_rr_timeslice = RR_TIMESLICE;
>> +int sched_rr_timeslice = (RR_TIMESLICE * HZ) / MSEC_PER_SEC;
>>   /* More than 4 hours if BW_SHIFT equals 20. */
>>   static const u64 max_rt_runtime = MAX_BW;
>>   
>> @@ -25,7 +25,7 @@ int sysctl_sched_rt_period = 1000000;
>>   int sysctl_sched_rt_runtime = 950000;
>>   
>>   #ifdef CONFIG_SYSCTL
>> -static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
>> +static int sysctl_sched_rr_timeslice = RR_TIMESLICE;
> So you are just moving calculations around that get done at compile time?
>

It's a constant which avoids the rounding error in the calculation. Like 
commit:

c7fcb99877f9 ("sched/rt: Fix sysctl_sched_rr_timeslice intial value" ).

>>   static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
>>   		size_t *lenp, loff_t *ppos);
>>   static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
>> @@ -3014,12 +3014,10 @@ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
>>   	 * Also, writing zero resets the timeslice to default:
>>   	 */
>>   	if (!ret && write) {
>> -		sched_rr_timeslice =
>> -			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
>> -			msecs_to_jiffies(sysctl_sched_rr_timeslice);
>> -
>>   		if (sysctl_sched_rr_timeslice <= 0)
>> -			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
>> +			sysctl_sched_rr_timeslice = RR_TIMESLICE;
>> +
>> +		sched_rr_timeslice = msecs_to_jiffies(sysctl_sched_rr_timeslice);
> If anything, this patch may make this code easier to follow, but it's up to
> Peter if he wants to take it or not.
>
> Agnostic-by: Steven Rostedt (Google) <rostedt@goodmis.org>
>
> -- Steve
>
>
>
>>   	}
>>   	mutex_unlock(&mutex);
>>
  

Patch

diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index b2b9e6eb9683..74f8f456a804 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -58,6 +58,6 @@  extern void normalize_rt_tasks(void);
  * default timeslice is 100 msecs (used only for SCHED_RR tasks).
  * Timeslices get refilled after they expire.
  */
-#define RR_TIMESLICE		(100 * HZ / 1000)
+#define RR_TIMESLICE		(100)
 
 #endif /* _LINUX_SCHED_RT_H */
diff --git a/init/init_task.c b/init/init_task.c
index 5727d42149c3..86619a425342 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -94,7 +94,7 @@  struct task_struct init_task
 	},
 	.rt		= {
 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
-		.time_slice	= RR_TIMESLICE,
+		.time_slice	= (RR_TIMESLICE * HZ) / MSEC_PER_SEC,
 	},
 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
 #ifdef CONFIG_SMP
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 6aaf0a3d6081..7c0e912094a9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -4,7 +4,7 @@ 
  * policies)
  */
 
-int sched_rr_timeslice = RR_TIMESLICE;
+int sched_rr_timeslice = (RR_TIMESLICE * HZ) / MSEC_PER_SEC;
 /* More than 4 hours if BW_SHIFT equals 20. */
 static const u64 max_rt_runtime = MAX_BW;
 
@@ -25,7 +25,7 @@  int sysctl_sched_rt_period = 1000000;
 int sysctl_sched_rt_runtime = 950000;
 
 #ifdef CONFIG_SYSCTL
-static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
+static int sysctl_sched_rr_timeslice = RR_TIMESLICE;
 static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
 		size_t *lenp, loff_t *ppos);
 static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
@@ -3014,12 +3014,10 @@  static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
 	 * Also, writing zero resets the timeslice to default:
 	 */
 	if (!ret && write) {
-		sched_rr_timeslice =
-			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
-			msecs_to_jiffies(sysctl_sched_rr_timeslice);
-
 		if (sysctl_sched_rr_timeslice <= 0)
-			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
+			sysctl_sched_rr_timeslice = RR_TIMESLICE;
+
+		sched_rr_timeslice = msecs_to_jiffies(sysctl_sched_rr_timeslice);
 	}
 	mutex_unlock(&mutex);