Thomas Gleixner <tglx@linutronix.de> writes:
> On Mon, Feb 12 2024 at 21:55, Ankur Arora wrote:
>
> The subject line reads odd...
>
>> -static inline bool test_tsk_need_resched(struct task_struct *tsk)
>> +static inline bool test_tsk_need_resched(struct task_struct *tsk, resched_t rs)
>> {
>> - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
>> + if (IS_ENABLED(CONFIG_PREEMPT_AUTO) || rs == NR_now)
>> + return unlikely(test_tsk_thread_flag(tsk, tif_resched(rs)));
>> + else
>> + return false;
>> }
>
> Same like the others. This wants wrappers with now/lazy.
So, when working on the scheduler changes, I found the simplest
implementation was to define a function that takes into account
current preemption mode, checks for idle, tick etc and returns
the rescheduling policy, which __resched_curr() carries out.
So, it would be useful to just pass the resched_t as a parameter
instead of having now/lazy wrappers.
That said, as I mention in the other thread, the current primitives
are unnecessarily noisy because everyone needs to use it.
-static inline bool test_tsk_need_resched(struct task_struct *tsk, resched_t rs)
+static inline bool __test_tsk_need_resched(struct task_struct *tsk, resched_t rs)
{
if (IS_ENABLED(CONFIG_PREEMPT_AUTO) || rs == NR_now)
return unlikely(test_tsk_thread_flag(tsk, tif_resched(rs)));
@@ -1980,6 +1985,11 @@ static inline bool test_tsk_need_resched(struct task_struct *tsk, resched_t rs)
return false;
}
+static inline bool test_tsk_need_resched(struct task_struct *tsk)
+{
+ return __test_tsk_need_resched(tsk, NR_now);
+}
+
How about something like this (and similar elsewhere)?
>> /*
>> @@ -2104,7 +2121,8 @@ static __always_inline bool need_resched(void)
>>
>> static __always_inline bool need_resched_lazy(void)
>> {
>> - return unlikely(tif_need_resched(NR_lazy));
>> + return IS_ENABLED(CONFIG_PREEMPT_AUTO) &&
>> + unlikely(tif_need_resched(NR_lazy));
>
> Shouldn't this be folded into the patch which adds need_resched_lazy()?
I think I had messed up a rebase. Will fix.
Thanks
--
ankur
On Tue, Feb 20 2024 at 14:37, Ankur Arora wrote:
> Thomas Gleixner <tglx@linutronix.de> writes:
>> On Mon, Feb 12 2024 at 21:55, Ankur Arora wrote:
>>
>> The subject line reads odd...
>>
>>> -static inline bool test_tsk_need_resched(struct task_struct *tsk)
>>> +static inline bool test_tsk_need_resched(struct task_struct *tsk, resched_t rs)
>>> {
>>> - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
>>> + if (IS_ENABLED(CONFIG_PREEMPT_AUTO) || rs == NR_now)
>>> + return unlikely(test_tsk_thread_flag(tsk, tif_resched(rs)));
>>> + else
>>> + return false;
>>> }
>>
>> Same like the others. This wants wrappers with now/lazy.
>
> So, when working on the scheduler changes, I found the simplest
> implementation was to define a function that takes into account
> current preemption mode, checks for idle, tick etc and returns
> the rescheduling policy, which __resched_curr() carries out.
>
> So, it would be useful to just pass the resched_t as a parameter
> instead of having now/lazy wrappers.
That's fine for specific functions which really need to handle the
rescheduling mode, but for all other random places having a nice wrapper
makes the code more readable.
Thanks,
tglx
@@ -198,7 +198,7 @@ static void pfault_interrupt(struct ext_code ext_code,
* return to userspace schedule() to block.
*/
__set_current_state(TASK_UNINTERRUPTIBLE);
- set_tsk_need_resched(tsk);
+ set_tsk_need_resched(tsk, NR_now);
set_preempt_need_resched();
}
}
@@ -1948,19 +1948,36 @@ static inline bool test_tsk_thread_flag(struct task_struct *tsk, int flag)
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
-static inline void set_tsk_need_resched(struct task_struct *tsk)
+/*
+ * With !CONFIG_PREEMPT_AUTO, tif_resched(NR_lazy) reduces to
+ * tif_resched(NR_now). Add a check in the helpers below to ensure
+ * we don't touch the tif_reshed(NR_now) bit unnecessarily.
+ */
+static inline void set_tsk_need_resched(struct task_struct *tsk, resched_t rs)
{
- set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+ if (IS_ENABLED(CONFIG_PREEMPT_AUTO) || rs == NR_now)
+ set_tsk_thread_flag(tsk, tif_resched(rs));
+ else
+ /*
+ * NR_lazy is only touched under CONFIG_PREEMPT_AUTO.
+ */
+ BUG();
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+ clear_tsk_thread_flag(tsk, tif_resched(NR_now));
+
+ if (IS_ENABLED(CONFIG_PREEMPT_AUTO))
+ clear_tsk_thread_flag(tsk, tif_resched(NR_lazy));
}
-static inline bool test_tsk_need_resched(struct task_struct *tsk)
+static inline bool test_tsk_need_resched(struct task_struct *tsk, resched_t rs)
{
- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+ if (IS_ENABLED(CONFIG_PREEMPT_AUTO) || rs == NR_now)
+ return unlikely(test_tsk_thread_flag(tsk, tif_resched(rs)));
+ else
+ return false;
}
/*
@@ -2104,7 +2121,8 @@ static __always_inline bool need_resched(void)
static __always_inline bool need_resched_lazy(void)
{
- return unlikely(tif_need_resched(NR_lazy));
+ return IS_ENABLED(CONFIG_PREEMPT_AUTO) &&
+ unlikely(tif_need_resched(NR_lazy));
}
/*
@@ -73,7 +73,7 @@ void rcu_sched_clock_irq(int user)
if (user) {
rcu_qs();
} else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
- set_tsk_need_resched(current);
+ set_tsk_need_resched(current, NR_now);
set_preempt_need_resched();
}
}
@@ -2250,7 +2250,7 @@ void rcu_sched_clock_irq(int user)
if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
/* Idle and userspace execution already are quiescent states. */
if (!rcu_is_cpu_rrupt_from_idle() && !user) {
- set_tsk_need_resched(current);
+ set_tsk_need_resched(current, NR_now);
set_preempt_need_resched();
}
__this_cpu_write(rcu_data.rcu_urgent_qs, false);
@@ -2409,7 +2409,7 @@ static __latent_entropy void rcu_core(void)
if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
rcu_preempt_deferred_qs(current);
} else if (rcu_preempt_need_deferred_qs(current)) {
- set_tsk_need_resched(current);
+ set_tsk_need_resched(current, NR_now);
set_preempt_need_resched();
}
@@ -759,7 +759,7 @@ static void rcu_exp_handler(void *unused)
rcu_report_exp_rdp(rdp);
} else {
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
- set_tsk_need_resched(t);
+ set_tsk_need_resched(t, NR_now);
set_preempt_need_resched();
}
return;
@@ -860,7 +860,7 @@ static void rcu_exp_need_qs(void)
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
- set_tsk_need_resched(current);
+ set_tsk_need_resched(current, NR_now);
set_preempt_need_resched();
}
@@ -658,7 +658,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
// Also if no expediting and no possible deboosting,
// slow is OK. Plus nohz_full CPUs eventually get
// tick enabled.
- set_tsk_need_resched(current);
+ set_tsk_need_resched(current, NR_now);
set_preempt_need_resched();
if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
@@ -725,7 +725,7 @@ static void rcu_flavor_sched_clock_irq(int user)
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
/* No QS, force context switch if deferred. */
if (rcu_preempt_need_deferred_qs(t)) {
- set_tsk_need_resched(t);
+ set_tsk_need_resched(t, NR_now);
set_preempt_need_resched();
}
} else if (rcu_preempt_need_deferred_qs(t)) {
@@ -712,7 +712,7 @@ static void print_cpu_stall(unsigned long gps)
* progress and it could be we're stuck in kernel space without context
* switches for an entirely unreasonable amount of time.
*/
- set_tsk_need_resched(current);
+ set_tsk_need_resched(current, NR_now);
set_preempt_need_resched();
}
@@ -933,7 +933,7 @@ static bool set_nr_if_polling(struct task_struct *p)
#else
static inline bool set_nr_and_not_polling(struct task_struct *p)
{
- set_tsk_need_resched(p);
+ set_tsk_need_resched(p, NR_now);
return true;
}
@@ -1045,13 +1045,13 @@ void resched_curr(struct rq *rq)
lockdep_assert_rq_held(rq);
- if (test_tsk_need_resched(curr))
+ if (test_tsk_need_resched(curr, NR_now))
return;
cpu = cpu_of(rq);
if (cpu == smp_processor_id()) {
- set_tsk_need_resched(curr);
+ set_tsk_need_resched(curr, NR_now);
set_preempt_need_resched();
return;
}
@@ -2247,7 +2247,8 @@ void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
- if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
+ if (task_on_rq_queued(rq->curr) &&
+ test_tsk_need_resched(rq->curr, NR_now))
rq_clock_skip_update(rq);
}
@@ -2035,7 +2035,7 @@ static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
* let us try to decide what's the best thing to do...
*/
if ((p->dl.deadline == rq->curr->dl.deadline) &&
- !test_tsk_need_resched(rq->curr))
+ !test_tsk_need_resched(rq->curr, NR_now))
check_preempt_equal_dl(rq, p);
#endif /* CONFIG_SMP */
}
@@ -2564,7 +2564,7 @@ static void pull_dl_task(struct rq *this_rq)
static void task_woken_dl(struct rq *rq, struct task_struct *p)
{
if (!task_on_cpu(rq, p) &&
- !test_tsk_need_resched(rq->curr) &&
+ !test_tsk_need_resched(rq->curr, NR_now) &&
p->nr_cpus_allowed > 1 &&
dl_task(rq->curr) &&
(rq->curr->nr_cpus_allowed < 2 ||
@@ -8298,7 +8298,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
* prevents us from potentially nominating it as a false LAST_BUDDY
* below.
*/
- if (test_tsk_need_resched(curr))
+ if (test_tsk_need_resched(curr, NR_now))
return;
/* Idle tasks are by definition preempted by non-idle tasks. */
@@ -359,7 +359,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
struct idle_timer *it = container_of(timer, struct idle_timer, timer);
WRITE_ONCE(it->done, 1);
- set_tsk_need_resched(current);
+ set_tsk_need_resched(current, NR_now);
return HRTIMER_NORESTART;
}
@@ -1680,7 +1680,7 @@ static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
* to move current somewhere else, making room for our non-migratable
* task.
*/
- if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
+ if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr, NR_now))
check_preempt_equal_prio(rq, p);
#endif
}
@@ -2415,7 +2415,7 @@ static void pull_rt_task(struct rq *this_rq)
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
bool need_to_push = !task_on_cpu(rq, p) &&
- !test_tsk_need_resched(rq->curr) &&
+ !test_tsk_need_resched(rq->curr, NR_now) &&
p->nr_cpus_allowed > 1 &&
(dl_task(rq->curr) || rt_task(rq->curr)) &&
(rq->curr->nr_cpus_allowed < 2 ||