[tip:,locking/core] locking/seqlock: Change __seqprop() to return the function pointer
Commit Message
The following commit has been merged into the locking/core branch of tip:
Commit-ID: e6115c6f7a0ce3388cc60b69a284facf78b5dbfd
Gitweb: https://git.kernel.org/tip/e6115c6f7a0ce3388cc60b69a284facf78b5dbfd
Author: Oleg Nesterov <oleg@redhat.com>
AuthorDate: Thu, 12 Oct 2023 16:32:27 +02:00
Committer: Ingo Molnar <mingo@kernel.org>
CommitterDate: Thu, 12 Oct 2023 20:18:21 +02:00
locking/seqlock: Change __seqprop() to return the function pointer
This simplifies the macro and makes it easy to add the new seqprop's
with 2 or more args.
Plus this way we do not lose the type info, the (void*) type cast is
no longer needed.
And the latter reveals the problem: a lot of seqcount_t helpers pass
the "const seqcount_t *s" argument to __seqprop_ptr(seqcount_t *s)
but (before this patch) "(void *)(s)" masked the problem.
So this patch changes __seqprop_ptr() and __seqprop_##lockname##_ptr()
to accept the "const LOCKNAME *s" argument. This is not nice either,
they need to drop the constness on return because these helpers are used
by both the readers and writers, but at least it is clear what's going on.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@kernel.org>
Link: https://lore.kernel.org/r/20231012143227.GA16143@redhat.com
---
include/linux/seqlock.h | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
@@ -200,9 +200,9 @@ typedef struct seqcount_##lockname { \
} seqcount_##lockname##_t; \
\
static __always_inline seqcount_t * \
-__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_ptr(const seqcount_##lockname##_t *s) \
{ \
- return &s->seqcount; \
+ return (void *)&s->seqcount; /* drop const */ \
} \
\
static __always_inline unsigned \
@@ -247,9 +247,9 @@ __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
* __seqprop() for seqcount_t
*/
-static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
+static inline seqcount_t *__seqprop_ptr(const seqcount_t *s)
{
- return s;
+ return (void *)s; /* drop const */
}
static inline unsigned __seqprop_sequence(const seqcount_t *s)
@@ -292,19 +292,19 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop
#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqprop_##prop((void *)(s)), \
+ seqcount_t: __seqprop_##prop, \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
__seqprop_case((s), mutex, prop))
-#define seqprop_ptr(s) __seqprop(s, ptr)
-#define seqprop_sequence(s) __seqprop(s, sequence)
-#define seqprop_preemptible(s) __seqprop(s, preemptible)
-#define seqprop_assert(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_sequence(s) __seqprop(s, sequence)(s)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
+#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier