[memory-model] Add smp_mb__after_srcu_read_unlock()
Commit Message
This commit adds support for smp_mb__after_srcu_read_unlock(), which,
when combined with a prior srcu_read_unlock(), implies a full memory
barrier. No ordering is guaranteed to accesses between the two, and
placing accesses between is bad practice in any case.
Tests may be found at https://github.com/paulmckrcu/litmus in files
matching manual/kernel/C-srcu-mb-*.litmus.
If we really do figure a way to weaken srcu_read_unlock() to release
semantics, this functionality might play a greater role.
It can be argued that smp_mb__after_srcu_read_unlock() should instead
be smp_mb__before_srcu_read_unlock() to make it more clear that the full
memory barrier precedes the end of any ongoing grace period. There are
not that many uses of smp_mb__after_srcu_read_unlock(), so...
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
@@ -31,7 +31,8 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'before-atomic (*smp_mb__before_atomic*) ||
'after-atomic (*smp_mb__after_atomic*) ||
'after-spinlock (*smp_mb__after_spinlock*) ||
- 'after-unlock-lock (*smp_mb__after_unlock_lock*)
+ 'after-unlock-lock (*smp_mb__after_unlock_lock*) ||
+ 'after-srcu-read-unlock (*smp_mb__after_srcu_read_unlock*)
instructions F[Barriers]
(* SRCU *)
@@ -49,7 +49,8 @@ let mb = ([M] ; fencerel(Mb) ; [M]) |
* also affected by the fence.
*)
([M] ; po-unlock-lock-po ;
- [After-unlock-lock] ; po ; [M])
+ [After-unlock-lock] ; po ; [M]) |
+ ([M] ; po? ; [Srcu-unlock] ; fencerel(After-srcu-read-unlock) ; [M])
let gp = po ; [Sync-rcu | Sync-srcu] ; po?
let strong-fence = mb | gp
@@ -24,6 +24,7 @@ smp_mb__before_atomic() { __fence{before-atomic}; }
smp_mb__after_atomic() { __fence{after-atomic}; }
smp_mb__after_spinlock() { __fence{after-spinlock}; }
smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
+smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange