[RFC,3/6] softirq: Introduce softirq disabled mask

Message ID 20230801132441.559222-4-frederic@kernel.org
State New
Headers
Series softirq: Start pushing down the big softirq lock |

Commit Message

Frederic Weisbecker Aug. 1, 2023, 1:24 p.m. UTC
  (DISCLAIMER: contains -RT bits)

When softirq vectors will be able to re-enable softirqs when deemed
safe, for example when a timer callback is tagged as soft-interruptible
by other softirq vectors, care must be taken to ensure a given vector
is not re-entrant. Ie: a vector can be interrupted by others but not
by itself.

In order to prepare for this, introduce a softirq disabled mask so that
vectors can disable themselves before re-enabling softirqs.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
---
 arch/Kconfig                |  3 +++
 include/linux/bottom_half.h |  2 ++
 include/linux/interrupt.h   | 15 ++++++++++++---
 kernel/softirq.c            | 16 +++++++++++++++-
 4 files changed, 32 insertions(+), 4 deletions(-)
  

Patch

diff --git a/arch/Kconfig b/arch/Kconfig
index 205fd23e0cad..d23968860ddf 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1358,6 +1358,9 @@  config RELR
 config ARCH_HAS_MEM_ENCRYPT
 	bool
 
+config ARCH_HAS_SOFTIRQ_DISABLED_MASK
+       bool
+
 config ARCH_HAS_CC_PLATFORM
 	bool
 
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 2243c7de4917..d5b37b580c79 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -42,6 +42,8 @@  extern void local_bh_exit(void);
 
 #ifdef CONFIG_PREEMPT_RT
 extern bool local_bh_blocked(void);
+extern void local_bh_vec_enable(int vec);
+extern void local_bh_vec_disable(int vec);
 #else
 static inline bool local_bh_blocked(void) { return false; }
 #endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 2099fe3980bc..7819d16d8d6f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -523,8 +523,16 @@  DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
 #define local_softirq_pending_ref irq_stat.__softirq_pending
 #endif
 
-#define local_softirq_pending()	(__this_cpu_read(local_softirq_pending_ref))
-#define reset_softirq_pending()	(__this_cpu_write(local_softirq_pending_ref, 0))
+#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_ARCH_HAS_SOFTIRQ_DISABLED_MASK)
+#define local_softirq_disabled()	(__this_cpu_read(local_softirq_disabled_ref))
+#else
+#define local_softirq_disabled()	(0)
+#endif
+
+#define local_softirq_pending()	(__this_cpu_read(local_softirq_pending_ref) & \
+				 ~local_softirq_disabled())
+#define reset_softirq_pending() (__this_cpu_and(local_softirq_pending_ref, \
+				local_softirq_disabled()))
 #define or_softirq_pending(x)	(__this_cpu_or(local_softirq_pending_ref, (x)))
 
 #endif /* local_softirq_pending */
@@ -614,7 +622,8 @@  extern void raise_hrtimer_softirq(void);
 
 static inline unsigned int local_pending_timers(void)
 {
-        return __this_cpu_read(pending_timer_softirq);
+        return __this_cpu_read(pending_timer_softirq) &
+		~local_softirq_disabled();
 }
 
 #else
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ba998d572ef4..a394f78de627 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -297,6 +297,18 @@  void do_softirq_post_smp_call_flush(unsigned int was_pending)
 		invoke_softirq();
 }
 
+#ifdef CONFIG_ARCH_HAS_SOFTIRQ_DISABLED_MASK
+void local_bh_vec_enable(int vec)
+{
+	__this_cpu_and(local_softirq_disabled_ref, ~vec);
+}
+
+void local_bh_vec_disable(int vec)
+{
+	__this_cpu_or(local_softirq_disabled_ref, vec);
+}
+#endif
+
 #else /* CONFIG_PREEMPT_RT */
 
 /*
@@ -1009,11 +1021,13 @@  static int timersd_should_run(unsigned int cpu)
 static void run_timersd(unsigned int cpu)
 {
 	unsigned int timer_si;
+	unsigned long timersd_vecs = (1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ);
 
 	ksoftirqd_run_begin();
 
 	timer_si = local_pending_timers();
-	__this_cpu_write(pending_timer_softirq, 0);
+	__this_cpu_and(pending_timer_softirq,
+		       local_softirq_disabled() & timersd_vecs);
 	or_softirq_pending(timer_si);
 
 	__do_softirq();