[1/2] sched/isolation: Exclude dynamically isolated CPUs from housekeeping masks
Commit Message
The housekeeping CPU masks, set up by the "isolcpus" and "nohz_full"
boot command line options, are used at boot time to exclude selected CPUs
from running some kernel background processes to minimize disturbance
to latency sensitive userspace applications. Some of housekeeping CPU
masks are also checked at run time to avoid using those isolated CPUs.
The cpuset subsystem is now able to dynamically create a set of isolated
CPUs to be used in isolated cpuset partitions. The long term goal is
to make the degree of isolation as close as possible to what can be
done statically using those boot command line options.
This patch is a step in that direction by making the housekeeping CPU
mask APIs exclude the dynamically isolated CPUs when they are called
at run time. The housekeeping CPU masks will fall back to the bootup
default when all the dynamically isolated CPUs are released.
A new housekeeping_exlude_isolcpus() function is added which is to be
called by the cpuset subsystem to provide a list of isolated CPUs to
be excluded.
Signed-off-by: Waiman Long <longman@redhat.com>
---
include/linux/sched/isolation.h | 8 +++
kernel/sched/isolation.c | 101 +++++++++++++++++++++++++++++++-
2 files changed, 108 insertions(+), 1 deletion(-)
@@ -27,6 +27,8 @@ extern bool housekeeping_enabled(enum hk_type type);
extern void housekeeping_affine(struct task_struct *t, enum hk_type type);
extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
extern void __init housekeeping_init(void);
+extern int housekeeping_exlude_isolcpus(const struct cpumask *isolcpus,
+ unsigned long flags);
#else
@@ -54,6 +56,12 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type)
}
static inline void housekeeping_init(void) { }
+
+static inline int housekeeping_exlude_isolcpus(struct cpumask *isolcpus,
+ unsigned long flags)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_CPU_ISOLATION */
static inline bool housekeeping_cpu(int cpu, enum hk_type type)
@@ -28,7 +28,8 @@ struct housekeeping {
unsigned long flags;
};
-static struct housekeeping housekeeping;
+static struct housekeeping housekeeping __read_mostly;
+static struct housekeeping housekeeping_boot __read_mostly;
bool housekeeping_enabled(enum hk_type type)
{
@@ -239,3 +240,101 @@ static int __init housekeeping_isolcpus_setup(char *str)
return housekeeping_setup(str, flags);
}
__setup("isolcpus=", housekeeping_isolcpus_setup);
+
+/*
+ * Save init housekeeping masks to housekeeping_boot
+ */
+static int housekeeping_copy2_boot(void)
+{
+ enum hk_type type;
+
+ housekeeping_boot.flags = housekeeping.flags;
+ for_each_set_bit(type, &housekeeping.flags, HK_TYPE_MAX) {
+ if (!alloc_cpumask_var(&housekeeping_boot.cpumasks[type],
+ GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(housekeeping_boot.cpumasks[type],
+ housekeeping.cpumasks[type]);
+ }
+ return 0;
+}
+
+/*
+ * Exclude the given dynamically isolated CPUs from the housekeeping CPUs
+ * External synchronization is required to make sure that concurrent call to
+ * this function will not happen.
+ *
+ * Return: 0 if successful, an error code if not
+ */
+int housekeeping_exlude_isolcpus(const struct cpumask *isolcpus, unsigned long flags)
+{
+ static unsigned long alloc_flags;
+ static cpumask_var_t tmp_mask;
+ static bool excluded; /* @true if some CPUs have been excluded */
+ static bool inited; /* @true if called before */
+
+ bool mask_empty = !isolcpus || cpumask_empty(isolcpus);
+ enum hk_type type;
+
+ lockdep_assert_cpus_held();
+
+ if (mask_empty && (!inited || !excluded))
+ return 0;
+
+ if (unlikely(!inited)) {
+ if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
+ return -ENOMEM;
+ if (housekeeping.flags) {
+ int err = housekeeping_copy2_boot();
+
+ if (err)
+ return err;
+ }
+ alloc_flags = housekeeping.flags;
+ inited = true;
+ }
+
+ if (mask_empty) {
+ excluded = false;
+
+ /*
+ * Reset housekeeping to bootup default
+ */
+ for_each_set_bit(type, &housekeeping_boot.flags, HK_TYPE_MAX)
+ cpumask_copy(housekeeping.cpumasks[type],
+ housekeeping_boot.cpumasks[type]);
+
+ WRITE_ONCE(housekeeping.flags, housekeeping_boot.flags);
+ if (!housekeeping_boot.flags &&
+ static_branch_unlikely(&housekeeping_overridden))
+ static_key_disable_cpuslocked(&housekeeping_overridden.key);
+ return 0;
+ }
+
+ /*
+ * Setting up the new housekeeping cpumasks
+ */
+ for_each_set_bit(type, &flags, HK_TYPE_MAX) {
+ const struct cpumask *src_mask;
+
+ if (!(BIT(type) & alloc_flags)) {
+ if (!alloc_cpumask_var(&housekeeping.cpumasks[type], GFP_KERNEL))
+ return -ENOMEM;
+ alloc_flags |= BIT(type);
+ }
+ src_mask = (BIT(type) & housekeeping_boot.flags)
+ ? housekeeping_boot.cpumasks[type] : cpu_possible_mask;
+ /*
+ * Make sure there is at least one online housekeeping CPU
+ */
+ cpumask_andnot(tmp_mask, src_mask, isolcpus);
+ if (!cpumask_intersects(tmp_mask, cpu_online_mask))
+ return -EINVAL; /* Invalid isolated CPUs */
+ cpumask_copy(housekeeping.cpumasks[type], tmp_mask);
+ }
+ WRITE_ONCE(housekeeping.flags, housekeeping_boot.flags | flags);
+ excluded = true;
+ if (!static_branch_unlikely(&housekeeping_overridden))
+ static_key_enable_cpuslocked(&housekeeping_overridden.key);
+ return 0;
+}