[v5,3/6] locking/rwsem: Disable preemption at all down_write*() and up_write() code paths

Message ID 20221103182936.217120-4-longman@redhat.com
State New
Headers
Series lockinig/rwsem: Fix rwsem bugs & enable true lock handoff |

Commit Message

Waiman Long Nov. 3, 2022, 6:29 p.m. UTC
  The previous patch has disabled preemption at all the down_read()
and up_read() code paths. For symmetry, this patch extends commit
48dfb5d2560d ("locking/rwsem: Disable preemption while trying for rwsem
lock") to have preemption disabled at all the down_write() and up_write()
code path including downgrade_write().

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Waiman Long <longman@redhat.com>
---
 kernel/locking/rwsem.c | 35 ++++++++++++++++++-----------------
 1 file changed, 18 insertions(+), 17 deletions(-)
  

Comments

kernel test robot Nov. 3, 2022, 8:04 p.m. UTC | #1
Hi Waiman,

I love your patch! Perhaps something to improve:

[auto build test WARNING on tip/locking/core]
[also build test WARNING on tip/master linus/master v6.1-rc3 next-20221103]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Waiman-Long/lockinig-rwsem-Fix-rwsem-bugs-enable-true-lock-handoff/20221104-023520
patch link:    https://lore.kernel.org/r/20221103182936.217120-4-longman%40redhat.com
patch subject: [PATCH v5 3/6] locking/rwsem: Disable preemption at all down_write*() and up_write() code paths
config: m68k-allyesconfig
compiler: m68k-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/b83e7cdb918b02166fbe3954940a7bd6f04eef14
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Waiman-Long/lockinig-rwsem-Fix-rwsem-bugs-enable-true-lock-handoff/20221104-023520
        git checkout b83e7cdb918b02166fbe3954940a7bd6f04eef14
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=m68k SHELL=/bin/bash kernel/locking/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   kernel/locking/rwsem.c: In function '__down_write_common':
>> kernel/locking/rwsem.c:1302:13: warning: variable 'ret' set but not used [-Wunused-but-set-variable]
    1302 |         int ret = 0;
         |             ^~~


vim +/ret +1302 kernel/locking/rwsem.c

  1296	
  1297	/*
  1298	 * lock for writing
  1299	 */
  1300	static inline int __down_write_common(struct rw_semaphore *sem, int state)
  1301	{
> 1302		int ret = 0;
  1303	
  1304		preempt_disable();
  1305		if (unlikely(!rwsem_write_trylock(sem))) {
  1306			if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
  1307				ret = -EINTR;
  1308		}
  1309		preempt_enable();
  1310	
  1311		return 0;
  1312	}
  1313
  
Waiman Long Nov. 4, 2022, midnight UTC | #2
On 11/3/22 16:04, kernel test robot wrote:
> Hi Waiman,
>
> I love your patch! Perhaps something to improve:
>
> [auto build test WARNING on tip/locking/core]
> [also build test WARNING on tip/master linus/master v6.1-rc3 next-20221103]
> [If your patch is applied to the wrong git tree, kindly drop us a note.
> And when submitting patch, we suggest to use '--base' as documented in
> https://git-scm.com/docs/git-format-patch#_base_tree_information]
>
> url:    https://github.com/intel-lab-lkp/linux/commits/Waiman-Long/lockinig-rwsem-Fix-rwsem-bugs-enable-true-lock-handoff/20221104-023520
> patch link:    https://lore.kernel.org/r/20221103182936.217120-4-longman%40redhat.com
> patch subject: [PATCH v5 3/6] locking/rwsem: Disable preemption at all down_write*() and up_write() code paths
> config: m68k-allyesconfig
> compiler: m68k-linux-gcc (GCC) 12.1.0
> reproduce (this is a W=1 build):
>          wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
>          chmod +x ~/bin/make.cross
>          # https://github.com/intel-lab-lkp/linux/commit/b83e7cdb918b02166fbe3954940a7bd6f04eef14
>          git remote add linux-review https://github.com/intel-lab-lkp/linux
>          git fetch --no-tags linux-review Waiman-Long/lockinig-rwsem-Fix-rwsem-bugs-enable-true-lock-handoff/20221104-023520
>          git checkout b83e7cdb918b02166fbe3954940a7bd6f04eef14
>          # save the config file
>          mkdir build_dir && cp config build_dir/.config
>          COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=m68k SHELL=/bin/bash kernel/locking/
>
> If you fix the issue, kindly add following tag where applicable
> | Reported-by: kernel test robot <lkp@intel.com>
>
> All warnings (new ones prefixed by >>):
>
>     kernel/locking/rwsem.c: In function '__down_write_common':
>>> kernel/locking/rwsem.c:1302:13: warning: variable 'ret' set but not used [-Wunused-but-set-variable]
>      1302 |         int ret = 0;
>           |             ^~~

Oh well, I will fix that.

Cheers,
Longman

>
> vim +/ret +1302 kernel/locking/rwsem.c
>
>    1296	
>    1297	/*
>    1298	 * lock for writing
>    1299	 */
>    1300	static inline int __down_write_common(struct rw_semaphore *sem, int state)
>    1301	{
>> 1302		int ret = 0;
>    1303	
>    1304		preempt_disable();
>    1305		if (unlikely(!rwsem_write_trylock(sem))) {
>    1306			if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
>    1307				ret = -EINTR;
>    1308		}
>    1309		preempt_enable();
>    1310	
>    1311		return 0;
>    1312	}
>    1313	
>
  

Patch

diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index ebaff8a87e1d..2953fa4dd790 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -256,16 +256,13 @@  static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
 static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
 {
 	long tmp = RWSEM_UNLOCKED_VALUE;
-	bool ret = false;
 
-	preempt_disable();
 	if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
 		rwsem_set_owner(sem);
-		ret = true;
+		return true;
 	}
 
-	preempt_enable();
-	return ret;
+	return false;
 }
 
 /*
@@ -716,7 +713,6 @@  static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 		return false;
 	}
 
-	preempt_disable();
 	/*
 	 * Disable preemption is equal to the RCU read-side crital section,
 	 * thus the task_strcut structure won't go away.
@@ -728,7 +724,6 @@  static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 	if ((flags & RWSEM_NONSPINNABLE) ||
 	    (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
 		ret = false;
-	preempt_enable();
 
 	lockevent_cond_inc(rwsem_opt_fail, !ret);
 	return ret;
@@ -828,8 +823,6 @@  static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 	int loop = 0;
 	u64 rspin_threshold = 0;
 
-	preempt_disable();
-
 	/* sem->wait_lock should not be held when doing optimistic spinning */
 	if (!osq_lock(&sem->osq))
 		goto done;
@@ -937,7 +930,6 @@  static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 	}
 	osq_unlock(&sem->osq);
 done:
-	preempt_enable();
 	lockevent_cond_inc(rwsem_opt_fail, !taken);
 	return taken;
 }
@@ -1178,15 +1170,12 @@  rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
 		if (waiter.handoff_set) {
 			enum owner_state owner_state;
 
-			preempt_disable();
 			owner_state = rwsem_spin_on_owner(sem);
-			preempt_enable();
-
 			if (owner_state == OWNER_NULL)
 				goto trylock_again;
 		}
 
-		schedule();
+		schedule_preempt_disabled();
 		lockevent_inc(rwsem_sleep_writer);
 		set_current_state(state);
 trylock_again:
@@ -1310,10 +1299,14 @@  static inline int __down_read_trylock(struct rw_semaphore *sem)
  */
 static inline int __down_write_common(struct rw_semaphore *sem, int state)
 {
+	int ret = 0;
+
+	preempt_disable();
 	if (unlikely(!rwsem_write_trylock(sem))) {
 		if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
-			return -EINTR;
+			ret = -EINTR;
 	}
+	preempt_enable();
 
 	return 0;
 }
@@ -1330,8 +1323,14 @@  static inline int __down_write_killable(struct rw_semaphore *sem)
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
+	int ret;
+
+	preempt_disable();
 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
-	return rwsem_write_trylock(sem);
+	ret = rwsem_write_trylock(sem);
+	preempt_enable();
+
+	return ret;
 }
 
 /*
@@ -1374,9 +1373,9 @@  static inline void __up_write(struct rw_semaphore *sem)
 	preempt_disable();
 	rwsem_clear_owner(sem);
 	tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
-	preempt_enable();
 	if (unlikely(tmp & RWSEM_FLAG_WAITERS))
 		rwsem_wake(sem);
+	preempt_enable();
 }
 
 /*
@@ -1394,11 +1393,13 @@  static inline void __downgrade_write(struct rw_semaphore *sem)
 	 * write side. As such, rely on RELEASE semantics.
 	 */
 	DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
+	preempt_disable();
 	tmp = atomic_long_fetch_add_release(
 		-RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
 	rwsem_set_reader_owned(sem);
 	if (tmp & RWSEM_FLAG_WAITERS)
 		rwsem_downgrade_wake(sem);
+	preempt_enable();
 }
 
 #else /* !CONFIG_PREEMPT_RT */