[v2,2/2] sched/rt: Return NULL when rt entity isn't a task in rt_task_of()

Message ID 20231206063400.3206346-3-yajun.deng@linux.dev
State New
Headers
Series sched: Return NULL when entity isn't a task |

Commit Message

Yajun Deng Dec. 6, 2023, 6:34 a.m. UTC
  Before calling rt_task_of(), we need to make sure that the rt entity is
a task. There is also a warning in rt_task_of() if the rt entity isn't a
task. That means we need to check the rt entity twice. If the rt entity
isn't a task, return the task struct is meaningless.

Return NULL when rt entity isn't a task in rt_task_of(), and call
rt_task_of() instead of rt_entity_is_task() when we need a task_struct.

Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
---
v2: fix 'struct rt_rq' no member named 'highest_prio'.
v1: https://lore.kernel.org/all/20231201022704.3526377-1-yajun.deng@linux.dev/
---
 kernel/sched/rt.c | 60 ++++++++++++++---------------------------------
 1 file changed, 17 insertions(+), 43 deletions(-)
  

Patch

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 40d24f64b444..061f5f005c35 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -169,9 +169,9 @@  static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 
 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 {
-#ifdef CONFIG_SCHED_DEBUG
-	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
-#endif
+	if (!rt_entity_is_task(rt_se))
+		return NULL;
+
 	return container_of(rt_se, struct task_struct, rt);
 }
 
@@ -1266,54 +1266,34 @@  static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_arr
 	rt_se->on_list = 0;
 }
 
-static inline struct sched_statistics *
-__schedstats_from_rt_se(struct sched_rt_entity *rt_se)
-{
-#ifdef CONFIG_RT_GROUP_SCHED
-	/* schedstats is not supported for rt group. */
-	if (!rt_entity_is_task(rt_se))
-		return NULL;
-#endif
-
-	return &rt_task_of(rt_se)->stats;
-}
-
 static inline void
 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
 {
-	struct sched_statistics *stats;
-	struct task_struct *p = NULL;
+	struct task_struct *p;
 
 	if (!schedstat_enabled())
 		return;
 
-	if (rt_entity_is_task(rt_se))
-		p = rt_task_of(rt_se);
-
-	stats = __schedstats_from_rt_se(rt_se);
-	if (!stats)
+	p = rt_task_of(rt_se);
+	if (!p)
 		return;
 
-	__update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
+	__update_stats_wait_start(rq_of_rt_rq(rt_rq), p, &p->stats);
 }
 
 static inline void
 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
 {
-	struct sched_statistics *stats;
-	struct task_struct *p = NULL;
+	struct task_struct *p;
 
 	if (!schedstat_enabled())
 		return;
 
-	if (rt_entity_is_task(rt_se))
-		p = rt_task_of(rt_se);
-
-	stats = __schedstats_from_rt_se(rt_se);
-	if (!stats)
+	p = rt_task_of(rt_se);
+	if (!p)
 		return;
 
-	__update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
+	__update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, &p->stats);
 }
 
 static inline void
@@ -1330,34 +1310,28 @@  update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
 static inline void
 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
 {
-	struct sched_statistics *stats;
-	struct task_struct *p = NULL;
+	struct task_struct *p;
 
 	if (!schedstat_enabled())
 		return;
 
-	if (rt_entity_is_task(rt_se))
-		p = rt_task_of(rt_se);
-
-	stats = __schedstats_from_rt_se(rt_se);
-	if (!stats)
+	p = rt_task_of(rt_se);
+	if (!p)
 		return;
 
-	__update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
+	__update_stats_wait_end(rq_of_rt_rq(rt_rq), p, &p->stats);
 }
 
 static inline void
 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
 			int flags)
 {
-	struct task_struct *p = NULL;
+	struct task_struct *p;
 
 	if (!schedstat_enabled())
 		return;
 
-	if (rt_entity_is_task(rt_se))
-		p = rt_task_of(rt_se);
-
+	p = rt_task_of(rt_se);
 	if ((flags & DEQUEUE_SLEEP) && p) {
 		unsigned int state;