[v3,4/4] riscv/barrier: Resolve checkpath.pl error
Commit Message
The original form would cause checkpath.pl to issue a error.
The error message is as follows:
ERROR: space required after that ',' (ctx:VxV)
+#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
^
correct the form of RISCV_FENCE and RISCV_FENCE_ASM even if they
already exist.
Signed-off-by: Eric Chan <ericchancf@google.com>
---
arch/riscv/include/asm/atomic.h | 4 ++--
arch/riscv/include/asm/barrier.h | 18 +++++++++---------
arch/riscv/include/asm/fence.h | 6 +++---
arch/riscv/include/asm/io.h | 8 ++++----
arch/riscv/include/asm/mmio.h | 4 ++--
arch/riscv/include/asm/mmiowb.h | 2 +-
6 files changed, 21 insertions(+), 21 deletions(-)
Comments
Eric Chan wrote:
> The original form would cause checkpath.pl to issue a error.
> The error message is as follows:
> ERROR: space required after that ',' (ctx:VxV)
> +#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
> ^
> correct the form of RISCV_FENCE and RISCV_FENCE_ASM even if they
> already exist.
A lot of the changes in this patch fixes lines that was added in the previous
patches. I'd prefer to add new code correctly and then only fix the remaining
instances in this patch.
/Emil
>
> Signed-off-by: Eric Chan <ericchancf@google.com>
> ---
> arch/riscv/include/asm/atomic.h | 4 ++--
> arch/riscv/include/asm/barrier.h | 18 +++++++++---------
> arch/riscv/include/asm/fence.h | 6 +++---
> arch/riscv/include/asm/io.h | 8 ++++----
> arch/riscv/include/asm/mmio.h | 4 ++--
> arch/riscv/include/asm/mmiowb.h | 2 +-
> 6 files changed, 21 insertions(+), 21 deletions(-)
>
> diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> index 1b2ae3259f1d..19050d13b6c1 100644
> --- a/arch/riscv/include/asm/atomic.h
> +++ b/arch/riscv/include/asm/atomic.h
> @@ -18,8 +18,8 @@
>
> #include <asm/cmpxchg.h>
>
> -#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
> -#define __atomic_release_fence() RISCV_FENCE(rw,r)
> +#define __atomic_acquire_fence() RISCV_FENCE(r, rw)
> +#define __atomic_release_fence() RISCV_FENCE(rw, r)
>
> static __always_inline int arch_atomic_read(const atomic_t *v)
> {
> diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
> index 4f4743d7440d..880b56d8480d 100644
> --- a/arch/riscv/include/asm/barrier.h
> +++ b/arch/riscv/include/asm/barrier.h
> @@ -19,19 +19,19 @@
>
>
> /* These barriers need to enforce ordering on both devices or memory. */
> -#define __mb() RISCV_FENCE(iorw,iorw)
> -#define __rmb() RISCV_FENCE(ir,ir)
> -#define __wmb() RISCV_FENCE(ow,ow)
> +#define __mb() RISCV_FENCE(iorw, iorw)
> +#define __rmb() RISCV_FENCE(ir, ir)
> +#define __wmb() RISCV_FENCE(ow, ow)
>
> /* These barriers do not need to enforce ordering on devices, just memory. */
> -#define __smp_mb() RISCV_FENCE(rw,rw)
> -#define __smp_rmb() RISCV_FENCE(r,r)
> -#define __smp_wmb() RISCV_FENCE(w,w)
> +#define __smp_mb() RISCV_FENCE(rw, rw)
> +#define __smp_rmb() RISCV_FENCE(r, r)
> +#define __smp_wmb() RISCV_FENCE(w, w)
>
> #define __smp_store_release(p, v) \
> do { \
> compiletime_assert_atomic_type(*p); \
> - RISCV_FENCE(rw,w); \
> + RISCV_FENCE(rw, w); \
> WRITE_ONCE(*p, v); \
> } while (0)
>
> @@ -39,7 +39,7 @@ do { \
> ({ \
> typeof(*p) ___p1 = READ_ONCE(*p); \
> compiletime_assert_atomic_type(*p); \
> - RISCV_FENCE(r,rw); \
> + RISCV_FENCE(r, rw); \
> ___p1; \
> })
>
> @@ -68,7 +68,7 @@ do { \
> * instances the scheduler pairs this with an mb(), so nothing is necessary on
> * the new hart.
> */
> -#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
> +#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
>
> #include <asm-generic/barrier.h>
>
> diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
> index ca094d72ec20..5b46f96a3ec8 100644
> --- a/arch/riscv/include/asm/fence.h
> +++ b/arch/riscv/include/asm/fence.h
> @@ -6,9 +6,9 @@
> ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
>
> #ifdef CONFIG_SMP
> -#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r,rw)
> -#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw,r)
> -#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw,rw)
> +#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
> +#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, r)
> +#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
> #else
> #define RISCV_ACQUIRE_BARRIER
> #define RISCV_RELEASE_BARRIER
> diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
> index afb5ead7552e..1c5c641075d2 100644
> --- a/arch/riscv/include/asm/io.h
> +++ b/arch/riscv/include/asm/io.h
> @@ -47,10 +47,10 @@
> * sufficient to ensure this works sanely on controllers that support I/O
> * writes.
> */
> -#define __io_pbr() RISCV_FENCE(io,i)
> -#define __io_par(v) RISCV_FENCE(i,ior)
> -#define __io_pbw() RISCV_FENCE(iow,o)
> -#define __io_paw() RISCV_FENCE(o,io)
> +#define __io_pbr() RISCV_FENCE(io, i)
> +#define __io_par(v) RISCV_FENCE(i, ior)
> +#define __io_pbw() RISCV_FENCE(iow, o)
> +#define __io_paw() RISCV_FENCE(o, io)
>
> /*
> * Accesses from a single hart to a single I/O address must be ordered. This
> diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
> index a708968d4a0f..06cadfd7a237 100644
> --- a/arch/riscv/include/asm/mmio.h
> +++ b/arch/riscv/include/asm/mmio.h
> @@ -132,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
> * doesn't define any ordering between the memory space and the I/O space.
> */
> #define __io_br() do {} while (0)
> -#define __io_ar(v) RISCV_FENCE(i,ir)
> -#define __io_bw() RISCV_FENCE(w,o)
> +#define __io_ar(v) RISCV_FENCE(i, ir)
> +#define __io_bw() RISCV_FENCE(w, o)
> #define __io_aw() mmiowb_set_pending()
>
> #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
> diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
> index 3bcae97d4803..52ce4a399d9b 100644
> --- a/arch/riscv/include/asm/mmiowb.h
> +++ b/arch/riscv/include/asm/mmiowb.h
> @@ -7,7 +7,7 @@
> * "o,w" is sufficient to ensure that all writes to the device have completed
> * before the write to the spinlock is allowed to commit.
> */
> -#define mmiowb() RISCV_FENCE(o,w)
> +#define mmiowb() RISCV_FENCE(o, w)
>
> #include <linux/smp.h>
> #include <asm-generic/mmiowb.h>
> --
> 2.43.0.687.g38aa6559b0-goog
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
Hi Emil,
Thank you for reviewing my patch! I appreciate the feedback.
I've updated patch v4 at [0].
Please let me know if you have any further questions or suggestions.
[0] [https://lore.kernel.org/lkml/20240213200923.2547570-1-ericchancf@google.com/]
Sincerely,
Eric Chan
@@ -18,8 +18,8 @@
#include <asm/cmpxchg.h>
-#define __atomic_acquire_fence() RISCV_FENCE(r,rw)
-#define __atomic_release_fence() RISCV_FENCE(rw,r)
+#define __atomic_acquire_fence() RISCV_FENCE(r, rw)
+#define __atomic_release_fence() RISCV_FENCE(rw, r)
static __always_inline int arch_atomic_read(const atomic_t *v)
{
@@ -19,19 +19,19 @@
/* These barriers need to enforce ordering on both devices or memory. */
-#define __mb() RISCV_FENCE(iorw,iorw)
-#define __rmb() RISCV_FENCE(ir,ir)
-#define __wmb() RISCV_FENCE(ow,ow)
+#define __mb() RISCV_FENCE(iorw, iorw)
+#define __rmb() RISCV_FENCE(ir, ir)
+#define __wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
-#define __smp_mb() RISCV_FENCE(rw,rw)
-#define __smp_rmb() RISCV_FENCE(r,r)
-#define __smp_wmb() RISCV_FENCE(w,w)
+#define __smp_mb() RISCV_FENCE(rw, rw)
+#define __smp_rmb() RISCV_FENCE(r, r)
+#define __smp_wmb() RISCV_FENCE(w, w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(rw,w); \
+ RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)
@@ -39,7 +39,7 @@ do { \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(r,rw); \
+ RISCV_FENCE(r, rw); \
___p1; \
})
@@ -68,7 +68,7 @@ do { \
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
-#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
#include <asm-generic/barrier.h>
@@ -6,9 +6,9 @@
({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
#ifdef CONFIG_SMP
-#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r,rw)
-#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw,r)
-#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw,rw)
+#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
+#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, r)
+#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
@@ -47,10 +47,10 @@
* sufficient to ensure this works sanely on controllers that support I/O
* writes.
*/
-#define __io_pbr() RISCV_FENCE(io,i)
-#define __io_par(v) RISCV_FENCE(i,ior)
-#define __io_pbw() RISCV_FENCE(iow,o)
-#define __io_paw() RISCV_FENCE(o,io)
+#define __io_pbr() RISCV_FENCE(io, i)
+#define __io_par(v) RISCV_FENCE(i, ior)
+#define __io_pbw() RISCV_FENCE(iow, o)
+#define __io_paw() RISCV_FENCE(o, io)
/*
* Accesses from a single hart to a single I/O address must be ordered. This
@@ -132,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
-#define __io_ar(v) RISCV_FENCE(i,ir)
-#define __io_bw() RISCV_FENCE(w,o)
+#define __io_ar(v) RISCV_FENCE(i, ir)
+#define __io_bw() RISCV_FENCE(w, o)
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
@@ -7,7 +7,7 @@
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
-#define mmiowb() RISCV_FENCE(o,w)
+#define mmiowb() RISCV_FENCE(o, w)
#include <linux/smp.h>
#include <asm-generic/mmiowb.h>