[2/2] cgroup/cpuset: Exclude isolated CPUs from housekeeping CPU masks

Message ID 20240229021414.508972-3-longman@redhat.com
State New
Headers
Series isolation: Exclude dynamically isolated CPUs from housekeeping masks |

Commit Message

Waiman Long Feb. 29, 2024, 2:14 a.m. UTC
  Call the newly introduced housekeeping_exlude_isolcpus() function to
exclude isolated CPUs from the selected housekeeping CPU masks. This
is in addition to the exclusion of isolated CPUs from the workqueue
unbound CPU mask.

Right now only HK_TYPE_TIMER and HK_TYPE_RCU CPU masks are updated,
but more may be added in the future when appropriate.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 kernel/cgroup/cpuset.c | 30 +++++++++++++++++++++++-------
 1 file changed, 23 insertions(+), 7 deletions(-)
  

Comments

Tejun Heo Feb. 29, 2024, 8:41 p.m. UTC | #1
On Wed, Feb 28, 2024 at 09:14:14PM -0500, Waiman Long wrote:
> Call the newly introduced housekeeping_exlude_isolcpus() function to
> exclude isolated CPUs from the selected housekeeping CPU masks. This
> is in addition to the exclusion of isolated CPUs from the workqueue
> unbound CPU mask.
> 
> Right now only HK_TYPE_TIMER and HK_TYPE_RCU CPU masks are updated,
> but more may be added in the future when appropriate.
> 
> Signed-off-by: Waiman Long <longman@redhat.com>

This looks fine to me from cgroup POV. Please feel free to route the patch
any way you see fit.

  Acked-by: Tejun Heo <tj@kernel.org>

Thanks.
  

Patch

diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index ba36c073304a..d2541af71c22 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -214,6 +214,11 @@  static cpumask_var_t	isolated_cpus;
 /* List of remote partition root children */
 static struct list_head remote_children;
 
+/*
+ * The set of housekeeping flags to be updated for CPU isolation
+ */
+#define	HOUSEKEEPING_FLAGS	(BIT(HK_TYPE_TIMER) | BIT(HK_TYPE_RCU))
+
 /*
  * Partition root states:
  *
@@ -1505,7 +1510,15 @@  static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
 	return isolcpus_updated;
 }
 
-static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
+/**
+ * update_isolation_cpumasks - Update external isolation CPU masks
+ * @isolcpus_updated - @true if isolation CPU masks update needed
+ *
+ * The following external CPU masks will be updated if necessary:
+ * - workqueue unbound cpumask
+ * - housekeeping cpumasks
+ */
+static void update_isolation_cpumasks(bool isolcpus_updated)
 {
 	int ret;
 
@@ -1515,7 +1528,10 @@  static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
 		return;
 
 	ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
-	WARN_ON_ONCE(ret < 0);
+	if (WARN_ON_ONCE(ret < 0))
+		return;
+	ret = housekeeping_exlude_isolcpus(isolated_cpus, HOUSEKEEPING_FLAGS);
+	WARN_ON_ONCE((ret < 0) && (ret != -EOPNOTSUPP));
 }
 
 /**
@@ -1609,7 +1625,7 @@  static int remote_partition_enable(struct cpuset *cs, int new_prs,
 		parent->child_ecpus_count--;
 	}
 	spin_unlock_irq(&callback_lock);
-	update_unbound_workqueue_cpumask(isolcpus_updated);
+	update_isolation_cpumasks(isolcpus_updated);
 
 	/*
 	 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1645,7 +1661,7 @@  static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
 		cs->prs_err = PERR_INVCPUS;
 	reset_partition_data(cs);
 	spin_unlock_irq(&callback_lock);
-	update_unbound_workqueue_cpumask(isolcpus_updated);
+	update_isolation_cpumasks(isolcpus_updated);
 
 	/*
 	 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1697,7 +1713,7 @@  static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
 	if (deleting)
 		isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
 	spin_unlock_irq(&callback_lock);
-	update_unbound_workqueue_cpumask(isolcpus_updated);
+	update_isolation_cpumasks(isolcpus_updated);
 
 	/*
 	 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -2067,7 +2083,7 @@  static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
 		WARN_ON_ONCE(parent->nr_subparts < 0);
 	}
 	spin_unlock_irq(&callback_lock);
-	update_unbound_workqueue_cpumask(isolcpus_updated);
+	update_isolation_cpumasks(isolcpus_updated);
 
 	if ((old_prs != new_prs) && (cmd == partcmd_update))
 		update_partition_exclusive(cs, new_prs);
@@ -3131,7 +3147,7 @@  static int update_prstate(struct cpuset *cs, int new_prs)
 	else if (new_xcpus_state)
 		partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
 	spin_unlock_irq(&callback_lock);
-	update_unbound_workqueue_cpumask(new_xcpus_state);
+	update_isolation_cpumasks(new_xcpus_state);
 
 	/* Force update if switching back to member */
 	update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);