get_next_timer_interrupt() does more than simply getting the next timer
interrupt. The timer bases are forwarded and also marked as idle when
possible and the next timer interrupt information is required for this.
To get not confused, rename function to a more descriptive name. No
functional change.
Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
---
kernel/time/tick-internal.h | 4 ++--
kernel/time/tick-sched.c | 16 ++++++++--------
kernel/time/timer.c | 10 +++++++---
3 files changed, 17 insertions(+), 13 deletions(-)
@@ -168,8 +168,8 @@ static inline void timers_update_nohz(void) { }
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
-extern void get_next_timer_interrupt(unsigned long basej, u64 basem,
- struct timer_events *tevt);
+extern void forward_and_idle_timer_bases(unsigned long basej, u64 basem,
+ struct timer_events *tevt);
void timer_clear_idle(void);
#define CLOCK_SET_WALL \
@@ -802,11 +802,11 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
* Keep the periodic tick, when RCU, architecture or irq_work
* requests it.
* Aside of that check whether the local timer softirq is
- * pending. If so its a bad idea to call get_next_timer_interrupt()
- * because there is an already expired timer, so it will request
- * immediate expiry, which rearms the hardware timer with a
- * minimal delta which brings us back to this place
- * immediately. Lather, rinse and repeat...
+ * pending. If so its a bad idea to call
+ * forward_and_idle_timer_bases() because there is an already
+ * expired timer, so it will request immeditate expiry, which
+ * rearms the hardware timer with a minimal delta which brings us
+ * back to this place immediately. Lather, rinse and repeat...
*/
if (rcu_needs_cpu() || arch_needs_cpu() ||
irq_work_needs_cpu() || local_timer_softirq_pending()) {
@@ -823,7 +823,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
* disabled this also looks at the next expiring
* hrtimer.
*/
- get_next_timer_interrupt(basejiff, basemono, &tevt);
+ forward_and_idle_timer_bases(basejiff, basemono, &tevt);
tevt.local = min_t(u64, tevt.local, tevt.global);
ts->next_timer = tevt.local;
}
@@ -838,7 +838,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
if (delta <= (u64)TICK_NSEC) {
/*
* Tell the timer code that the base is not idle, i.e. undo
- * the effect of get_next_timer_interrupt():
+ * the effect of forward_and_idle_timer_bases():
*/
timer_clear_idle();
/*
@@ -1141,7 +1141,7 @@ void tick_nohz_idle_retain_tick(void)
{
tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
/*
- * Undo the effect of get_next_timer_interrupt() called from
+ * Undo the effect of forward_and_idle_timer_bases() called from
* tick_nohz_next_event().
*/
timer_clear_idle();
@@ -1705,7 +1705,7 @@ static unsigned long next_timer_interrupt(struct timer_base *base)
}
/**
- * get_next_timer_interrupt
+ * forward_and_idle_timer_bases
* @basej: base time jiffies
* @basem: base time clock monotonic
* @tevt: Pointer to the storage for the expiry values
@@ -1713,9 +1713,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base)
* Stores the next pending local and global timer expiry values in the
* struct pointed to by @tevt. If a queue is empty the corresponding field
* is set to KTIME_MAX.
+ *
+ * If required, base->clk is forwarded and base is also marked as
+ * idle. Idle handling of timer bases is allowed only to be done by
+ * CPU itself.
*/
-void get_next_timer_interrupt(unsigned long basej, u64 basem,
- struct timer_events *tevt)
+void forward_and_idle_timer_bases(unsigned long basej, u64 basem,
+ struct timer_events *tevt)
{
unsigned long nextevt, nextevt_local, nextevt_global;
struct timer_base *base_local, *base_global;