[tip:,sched/core] seqlock/latch: Provide raw_read_seqcount_latch_retry()
Commit Message
The following commit has been merged into the sched/core branch of tip:
Commit-ID: d16317de9b412aa7bd3598c607112298e36b4352
Gitweb: https://git.kernel.org/tip/d16317de9b412aa7bd3598c607112298e36b4352
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Fri, 19 May 2023 12:20:59 +02:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Mon, 05 Jun 2023 21:11:03 +02:00
seqlock/latch: Provide raw_read_seqcount_latch_retry()
The read side of seqcount_latch consists of:
do {
seq = raw_read_seqcount_latch(&latch->seq);
...
} while (read_seqcount_latch_retry(&latch->seq, seq));
which is asymmetric in the raw_ department, and sure enough,
read_seqcount_latch_retry() includes (explicit) instrumentation where
raw_read_seqcount_latch() does not.
This inconsistency becomes a problem when trying to use it from
noinstr code. As such, fix it by renaming and re-implementing
raw_read_seqcount_latch_retry() without the instrumentation.
Specifically the instrumentation in question is kcsan_atomic_next(0)
in do___read_seqcount_retry(). Loosing this annotation is not a
problem because raw_read_seqcount_latch() does not pass through
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V
Link: https://lore.kernel.org/r/20230519102715.233598176@infradead.org
---
include/linux/rbtree_latch.h | 2 +-
include/linux/seqlock.h | 15 ++++++++-------
kernel/printk/printk.c | 2 +-
kernel/time/sched_clock.c | 2 +-
kernel/time/timekeeping.c | 4 ++--
5 files changed, 13 insertions(+), 12 deletions(-)
@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
do {
seq = raw_read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (read_seqcount_latch_retry(&root->seq, seq));
+ } while (raw_read_seqcount_latch_retry(&root->seq, seq));
return node;
}
@@ -671,9 +671,9 @@ typedef struct {
*
* Return: sequence counter raw value. Use the lowest bit as an index for
* picking which data copy to read. The full counter must then be checked
- * with read_seqcount_latch_retry().
+ * with raw_read_seqcount_latch_retry().
*/
-static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
+static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
{
/*
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
@@ -683,16 +683,17 @@ static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
}
/**
- * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from raw_read_seqcount_latch()
*
* Return: true if a read section retry is required, else false
*/
-static inline int
-read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+static __always_inline int
+raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
- return read_seqcount_retry(&s->seqcount, start);
+ smp_rmb();
+ return unlikely(READ_ONCE(s->seqcount.sequence) != start);
}
/**
@@ -752,7 +753,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* entry = data_query(latch->data[idx], ...);
*
* // This includes needed smp_rmb()
- * } while (read_seqcount_latch_retry(&latch->seq, seq));
+ * } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
*
* return entry;
* }
@@ -528,7 +528,7 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls)
seq = raw_read_seqcount_latch(&ls->latch);
idx = seq & 0x1;
val = ls->val[idx];
- } while (read_seqcount_latch_retry(&ls->latch, seq));
+ } while (raw_read_seqcount_latch_retry(&ls->latch, seq));
return val;
}
@@ -77,7 +77,7 @@ notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
notrace int sched_clock_read_retry(unsigned int seq)
{
- return read_seqcount_latch_retry(&cd.seq, seq);
+ return raw_read_seqcount_latch_retry(&cd.seq, seq);
}
unsigned long long notrace sched_clock(void)
@@ -450,7 +450,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base);
now += fast_tk_get_delta_ns(tkr);
- } while (read_seqcount_latch_retry(&tkf->seq, seq));
+ } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
return now;
}
@@ -566,7 +566,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
basem = ktime_to_ns(tkr->base);
baser = ktime_to_ns(tkr->base_real);
delta = fast_tk_get_delta_ns(tkr);
- } while (read_seqcount_latch_retry(&tkf->seq, seq));
+ } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
if (mono)
*mono = basem + delta;