[v5,6/6] locking/rwsem: Update handoff lock events tracking
Commit Message
With the new direct rwsem lock handoff, the corresponding handoff lock
events are updated to also track the number of secondary lock handoffs
in rwsem_down_read_slowpath() to see how prevalent those handoff
events are. The number of primary lock handoffs in the unlock paths is
(rwsem_handoff_read + rwsem_handoff_write - rwsem_handoff_rslow).
After running a 96-thread rwsem microbenchmark with equal number
of readers and writers on a 2-socket 96-thread system for 40s, the
following handoff stats were obtained:
rwsem_handoff_read=189
rwsem_handoff_rslow=1
rwsem_handoff_write=6678
rwsem_handoff_wspin=6681
The number of primary handoffs was 6866, whereas there was only one
secondary handoff for this test run.
Signed-off-by: Waiman Long <longman@redhat.com>
---
kernel/locking/lock_events_list.h | 6 ++++--
kernel/locking/rwsem.c | 9 +++++----
2 files changed, 9 insertions(+), 6 deletions(-)
@@ -63,7 +63,9 @@ LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
LOCK_EVENT(rwsem_rlock_steal) /* # of read locks by lock stealing */
LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
-LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
-LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */
+LOCK_EVENT(rwsem_handoff_read) /* # of read lock handoffs */
+LOCK_EVENT(rwsem_handoff_write) /* # of write lock handoffs */
+LOCK_EVENT(rwsem_handoff_rslow) /* # of handoffs in read slowpath */
+LOCK_EVENT(rwsem_handoff_wspin) /* # of handoff spins in write slowpath */
@@ -469,10 +469,8 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
* force the issue.
*/
if (time_after(jiffies, waiter->timeout)) {
- if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
+ if (!(oldcount & RWSEM_FLAG_HANDOFF))
adjustment -= RWSEM_FLAG_HANDOFF;
- lockevent_inc(rwsem_rlock_handoff);
- }
WRITE_ONCE(waiter->handoff_state, HANDOFF_REQUESTED);
}
@@ -677,7 +675,6 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
*/
if (new & RWSEM_FLAG_HANDOFF) {
WRITE_ONCE(first->handoff_state, HANDOFF_REQUESTED);
- lockevent_inc(rwsem_wlock_handoff);
return false;
}
@@ -1011,10 +1008,12 @@ static void rwsem_handoff(struct rw_semaphore *sem, long adj,
wake_type = RWSEM_WAKE_ANY;
adj += RWSEM_WRITER_LOCKED;
atomic_long_set(&sem->owner, (long)waiter->task);
+ lockevent_inc(rwsem_handoff_write);
} else {
wake_type = RWSEM_WAKE_READ_OWNED;
adj += RWSEM_READER_BIAS;
__rwsem_set_reader_owned(sem, waiter->task);
+ lockevent_inc(rwsem_handoff_read);
}
atomic_long_add(adj, &sem->count);
rwsem_mark_wake(sem, wake_type, wake_q);
@@ -1123,6 +1122,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
if (rwsem_first_waiter(sem)->type == RWSEM_WAITING_FOR_READ)
adjustment = 0;
rwsem_handoff(sem, adjustment, &wake_q);
+ lockevent_inc(rwsem_handoff_rslow);
if (!adjustment) {
raw_spin_unlock_irq(&sem->wait_lock);
@@ -1253,6 +1253,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
if (handoff == HANDOFF_REQUESTED) {
rwsem_spin_on_owner(sem);
handoff = READ_ONCE(waiter.handoff_state);
+ lockevent_inc(rwsem_handoff_wspin);
}
if (handoff == HANDOFF_GRANTED)