@@ -187,12 +187,18 @@ EXPORT_SYMBOL(jiffies_64);
#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
#ifdef CONFIG_NO_HZ_COMMON
-# define NR_BASES 2
-# define BASE_STD 0
-# define BASE_DEF 1
+/*
+ * If multiple bases need to be locked, use the base ordering for lock
+ * nesting, i.e. lowest number first.
+ */
+# define NR_BASES 3
+# define BASE_LOCAL 0
+# define BASE_GLOBAL 1
+# define BASE_DEF 2
#else
# define NR_BASES 1
-# define BASE_STD 0
+# define BASE_LOCAL 0
+# define BASE_GLOBAL 0
# define BASE_DEF 0
#endif
@@ -899,7 +905,10 @@ static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
{
- struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
+ int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
+ struct timer_base *base;
+
+ base = per_cpu_ptr(&timer_bases[index], cpu);
/*
* If the timer is deferrable and NO_HZ_COMMON is set then we need
@@ -912,7 +921,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
{
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
+ struct timer_base *base;
+
+ base = this_cpu_ptr(&timer_bases[index]);
/*
* If the timer is deferrable and NO_HZ_COMMON is set then we need
@@ -1960,13 +1972,31 @@ static unsigned long next_timer_interrupt(struct timer_base *base)
}
static inline unsigned long __get_next_timer_interrupt(unsigned long basej,
- struct timer_base *base)
+ struct timer_base *base_local,
+ struct timer_base *base_global)
{
- unsigned long nextevt;
+ unsigned long nextevt, nextevt_local, nextevt_global;
+ bool local_first;
- nextevt = next_timer_interrupt(base);
+ nextevt_local = next_timer_interrupt(base_local);
+ nextevt_global = next_timer_interrupt(base_global);
- if (base->timers_pending) {
+ /*
+ * Check whether the local event is expiring before or at the same
+ * time as the global event.
+ *
+ * Note, that nextevt_global and nextevt_local might be based on
+ * different base->clk values. So it's not guaranteed that
+ * comparing with empty bases results in a correct local_first.
+ */
+ if (base_local->timers_pending && base_global->timers_pending)
+ local_first = time_before_eq(nextevt_local, nextevt_global);
+ else
+ local_first = base_local->timers_pending;
+
+ nextevt = local_first ? nextevt_local : nextevt_global;
+
+ if (base_local->timers_pending || base_global->timers_pending) {
/* If we missed a tick already, force 0 delta */
if (time_before(nextevt, basej))
nextevt = basej;
@@ -1985,7 +2015,7 @@ static inline unsigned long __get_next_timer_interrupt(unsigned long basej,
*/
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
{
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ struct timer_base *base_local, *base_global;
u64 expires = KTIME_MAX;
unsigned long nextevt;
@@ -1996,9 +2026,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
if (cpu_is_offline(smp_processor_id()))
return expires;
- raw_spin_lock(&base->lock);
- nextevt = __get_next_timer_interrupt(basej, base);
- raw_spin_unlock(&base->lock);
+ base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
+ base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
+
+ raw_spin_lock(&base_local->lock);
+ raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
+
+ nextevt = __get_next_timer_interrupt(basej, base_local, base_global);
+
+ raw_spin_unlock(&base_global->lock);
+ raw_spin_unlock(&base_local->lock);
expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
@@ -2018,7 +2055,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
*/
u64 timer_set_idle(unsigned long basej, u64 basem, bool *idle)
{
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ struct timer_base *base_local, *base_global;
unsigned long nextevt;
/*
@@ -2030,25 +2067,35 @@ u64 timer_set_idle(unsigned long basej, u64 basem, bool *idle)
return KTIME_MAX;
}
- raw_spin_lock(&base->lock);
- nextevt = __get_next_timer_interrupt(basej, base);
+ base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
+ base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
+
+ raw_spin_lock(&base_local->lock);
+ raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
+
+ nextevt = __get_next_timer_interrupt(basej, base_local, base_global);
/*
* We have a fresh next event. Check whether we can forward the
* base.
*/
- __forward_timer_base(base, basej);
+ __forward_timer_base(base_local, basej);
+ __forward_timer_base(base_global, basej);
/*
- * Base is idle if the next event is more than a tick away. Also
+ * Bases are idle if the next event is more than a tick away. Also
* the tick is stopped so any added timer must forward the base clk
* itself to keep granularity small. This idle logic is only
- * maintained for the BASE_STD base, deferrable timers may still
- * see large granularity skew (by design).
+ * maintained for the BASE_LOCAL and BASE_GLOBAL base, deferrable
+ * timers may still see large granularity skew (by design).
*/
- base->is_idle = *idle = time_after(nextevt, basej + 1);
+ *idle = time_after(nextevt, basej + 1);
+
+ /* We need to mark both bases in sync */
+ base_local->is_idle = base_global->is_idle = *idle;
- raw_spin_unlock(&base->lock);
+ raw_spin_unlock(&base_global->lock);
+ raw_spin_unlock(&base_local->lock);
return basem + (u64)(nextevt - basej) * TICK_NSEC;
}
@@ -2060,15 +2107,14 @@ u64 timer_set_idle(unsigned long basej, u64 basem, bool *idle)
*/
void timer_clear_idle(void)
{
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
-
/*
* We do this unlocked. The worst outcome is a remote enqueue sending
* a pointless IPI, but taking the lock would just make the window for
* sending the IPI a few instructions smaller for the cost of taking
* the lock in the exit from idle path.
*/
- base->is_idle = false;
+ __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false);
+ __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false);
}
#endif
@@ -2114,11 +2160,13 @@ static inline void __run_timers(struct timer_base *base)
*/
static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
__run_timers(base);
- if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
+ __run_timers(this_cpu_ptr(&timer_bases[BASE_GLOBAL]));
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+ }
}
/*
@@ -2126,7 +2174,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
*/
static void run_local_timers(void)
{
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
hrtimer_run_queues();