There are broadly three sets of uses of cond_resched():
1. Calls to cond_resched() out of the goodness of our heart,
otherwise known as avoiding lockup splats.
2. Open coded variants of cond_resched_lock() which call
cond_resched().
3. Retry or error handling loops, where cond_resched() is used as a
quick alternative to spinning in a tight-loop.
When running under a full preemption model, the cond_resched() reduces
to a NOP (not even a barrier) so removing it obviously cannot matter.
But considering only voluntary preemption models (for say code that
has been mostly tested under those), for set-1 and set-2 the
scheduler can now preempt kernel tasks running beyond their time
quanta anywhere they are preemptible() [1]. Which removes any need
for these explicitly placed scheduling points.
The cond_resched() calls in set-3 are a little more difficult.
To start with, given it's NOP character under full preemption, it
never actually saved us from a tight loop.
With voluntary preemption, it's not a NOP, but it might as well be --
for most workloads the scheduler does not have an interminable supply
of runnable tasks on the runqueue.
So, cond_resched() is useful to not get softlockup splats, but not
terribly good for error handling. Ideally, these should be replaced
with some kind of timed or event wait.
For now we use cond_resched_stall(), which tries to schedule if
possible, and executes a cpu_relax() if not.
All of these are from set-1 except for the retry loops in
task_function_call() or the mutex testing logic.
Replace these with cond_resched_stall(). The others can be removed.
[1] https://lore.kernel.org/lkml/20231107215742.363031-1-ankur.a.arora@oracle.com/
Cc: Tejun Heo <tj@kernel.org>
Cc: Zefan Li <lizefan.x@bytedance.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Peter Oberparleiter <oberpar@linux.ibm.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Will Deacon <will@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
include/linux/sched/cond_resched.h | 1 -
kernel/auditsc.c | 2 --
kernel/cgroup/rstat.c | 3 +--
kernel/dma/debug.c | 2 --
kernel/events/core.c | 2 +-
kernel/gcov/base.c | 1 -
kernel/kallsyms.c | 4 +---
kernel/kexec_core.c | 6 ------
kernel/locking/test-ww_mutex.c | 4 ++--
kernel/module/main.c | 1 -
kernel/ptrace.c | 2 --
kernel/sched/core.c | 1 -
kernel/sched/fair.c | 4 ----
13 files changed, 5 insertions(+), 28 deletions(-)
delete mode 100644 include/linux/sched/cond_resched.h
On Tue, Nov 07, 2023 at 03:08:02PM -0800, Ankur Arora wrote:
> There are broadly three sets of uses of cond_resched():
>
> 1. Calls to cond_resched() out of the goodness of our heart,
> otherwise known as avoiding lockup splats.
>
> 2. Open coded variants of cond_resched_lock() which call
> cond_resched().
>
> 3. Retry or error handling loops, where cond_resched() is used as a
> quick alternative to spinning in a tight-loop.
>
> When running under a full preemption model, the cond_resched() reduces
> to a NOP (not even a barrier) so removing it obviously cannot matter.
>
> But considering only voluntary preemption models (for say code that
> has been mostly tested under those), for set-1 and set-2 the
> scheduler can now preempt kernel tasks running beyond their time
> quanta anywhere they are preemptible() [1]. Which removes any need
> for these explicitly placed scheduling points.
>
> The cond_resched() calls in set-3 are a little more difficult.
> To start with, given it's NOP character under full preemption, it
> never actually saved us from a tight loop.
> With voluntary preemption, it's not a NOP, but it might as well be --
> for most workloads the scheduler does not have an interminable supply
> of runnable tasks on the runqueue.
>
> So, cond_resched() is useful to not get softlockup splats, but not
> terribly good for error handling. Ideally, these should be replaced
> with some kind of timed or event wait.
> For now we use cond_resched_stall(), which tries to schedule if
> possible, and executes a cpu_relax() if not.
>
> All of these are from set-1 except for the retry loops in
> task_function_call() or the mutex testing logic.
>
> Replace these with cond_resched_stall(). The others can be removed.
>
> [1] https://lore.kernel.org/lkml/20231107215742.363031-1-ankur.a.arora@oracle.com/
>
> Cc: Tejun Heo <tj@kernel.org>
> Cc: Zefan Li <lizefan.x@bytedance.com>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Peter Oberparleiter <oberpar@linux.ibm.com>
> Cc: Eric Biederman <ebiederm@xmission.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Luis Chamberlain <mcgrof@kernel.org>
> Cc: Oleg Nesterov <oleg@redhat.com>
> Cc: Juri Lelli <juri.lelli@redhat.com>
> Cc: Vincent Guittot <vincent.guittot@linaro.org>
> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
Sounds like the sort of test which should be put into linux-next to get
test coverage right away. To see what really blows up.
Luis
deleted file mode 100644
@@ -1 +0,0 @@
-#include <linux/sched.h>
@@ -2460,8 +2460,6 @@ void __audit_inode_child(struct inode *parent,
}
}
- cond_resched();
-
/* is there a matching child entry? */
list_for_each_entry(n, &context->names_list, list) {
/* can only match entries that have a name */
@@ -210,8 +210,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
/* play nice and yield if necessary */
if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
spin_unlock_irq(&cgroup_rstat_lock);
- if (!cond_resched())
- cpu_relax();
+ cond_resched_stall();
spin_lock_irq(&cgroup_rstat_lock);
}
}
@@ -543,8 +543,6 @@ void debug_dma_dump_mappings(struct device *dev)
}
}
spin_unlock_irqrestore(&bucket->lock, flags);
-
- cond_resched();
}
}
@@ -125,7 +125,7 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
if (ret != -EAGAIN)
break;
- cond_resched();
+ cond_resched_stall();
}
return ret;
@@ -43,7 +43,6 @@ void gcov_enable_events(void)
/* Perform event callback for previously registered entries. */
while ((info = gcov_info_next(info))) {
gcov_event(GCOV_ADD, info);
- cond_resched();
}
mutex_unlock(&gcov_lock);
@@ -19,7 +19,7 @@
#include <linux/kdb.h>
#include <linux/err.h>
#include <linux/proc_fs.h>
-#include <linux/sched.h> /* for cond_resched */
+#include <linux/sched.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/filter.h>
@@ -295,7 +295,6 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
ret = fn(data, namebuf, kallsyms_sym_address(i));
if (ret != 0)
return ret;
- cond_resched();
}
return 0;
}
@@ -312,7 +311,6 @@ int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
for (i = start; !ret && i <= end; i++) {
ret = fn(data, kallsyms_sym_address(get_symbol_seq(i)));
- cond_resched();
}
return ret;
@@ -452,8 +452,6 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
while (hole_end <= crashk_res.end) {
unsigned long i;
- cond_resched();
-
if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
break;
/* See if I overlap any of the segments */
@@ -832,8 +830,6 @@ static int kimage_load_normal_segment(struct kimage *image,
else
buf += mchunk;
mbytes -= mchunk;
-
- cond_resched();
}
out:
return result;
@@ -900,8 +896,6 @@ static int kimage_load_crash_segment(struct kimage *image,
else
buf += mchunk;
mbytes -= mchunk;
-
- cond_resched();
}
out:
return result;
@@ -46,7 +46,7 @@ static void test_mutex_work(struct work_struct *work)
if (mtx->flags & TEST_MTX_TRY) {
while (!ww_mutex_trylock(&mtx->mutex, NULL))
- cond_resched();
+ cond_resched_stall();
} else {
ww_mutex_lock(&mtx->mutex, NULL);
}
@@ -84,7 +84,7 @@ static int __test_mutex(unsigned int flags)
ret = -EINVAL;
break;
}
- cond_resched();
+ cond_resched_stall();
} while (time_before(jiffies, timeout));
} else {
ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
@@ -1908,7 +1908,6 @@ static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned l
if (copy_from_user(dst, usrc, n) != 0)
return -EFAULT;
- cond_resched();
dst += n;
usrc += n;
len -= n;
@@ -798,8 +798,6 @@ static int ptrace_peek_siginfo(struct task_struct *child,
if (signal_pending(current))
break;
-
- cond_resched();
}
if (i > 0)
@@ -25,7 +25,6 @@
#include <linux/refcount_api.h>
#include <linux/topology.h>
#include <linux/sched/clock.h>
-#include <linux/sched/cond_resched.h>
#include <linux/sched/cputime.h>
#include <linux/sched/debug.h>
#include <linux/sched/hotplug.h>
@@ -33,7 +33,6 @@
#include <linux/refcount_api.h>
#include <linux/topology.h>
#include <linux/sched/clock.h>
-#include <linux/sched/cond_resched.h>
#include <linux/sched/cputime.h>
#include <linux/sched/isolation.h>
#include <linux/sched/nohz.h>
@@ -51,8 +50,6 @@
#include <asm/switch_to.h>
-#include <linux/sched/cond_resched.h>
-
#include "sched.h"
#include "stats.h"
#include "autogroup.h"
@@ -3374,7 +3371,6 @@ static void task_numa_work(struct callback_head *work)
if (pages <= 0 || virtpages <= 0)
goto out;
- cond_resched();
} while (end != vma->vm_end);
} for_each_vma(vmi, vma);