[v2,3/8] io_uring: refactor __io_cq_unlock_post_flush()

Message ID 662ee5d898168ac206be06038525e97b64072a46.1680782017.git.asml.silence@gmail.com
State New
Headers
Series optimise resheduling due to deferred tw |

Commit Message

Pavel Begunkov April 6, 2023, 1:20 p.m. UTC
  Instead of smp_mb() + __io_cqring_wake() in __io_cq_unlock_post_flush()
use equivalent io_cqring_wake(). With that we can clean it up further
and remove __io_cqring_wake().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c |  6 ++----
 io_uring/io_uring.h | 11 ++---------
 2 files changed, 4 insertions(+), 13 deletions(-)
  

Comments

Pavel Begunkov April 6, 2023, 2:23 p.m. UTC | #1
On 4/6/23 14:20, Pavel Begunkov wrote:
> Instead of smp_mb() + __io_cqring_wake() in __io_cq_unlock_post_flush()
> use equivalent io_cqring_wake(). With that we can clean it up further
> and remove __io_cqring_wake().

I didn't notice patches 3 and 7 have the same subj. This one
should've better been called refactor io_cqring_wake().


> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
> ---
>   io_uring/io_uring.c |  6 ++----
>   io_uring/io_uring.h | 11 ++---------
>   2 files changed, 4 insertions(+), 13 deletions(-)
> 
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index fb7215b543cd..d4ac62de2113 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -640,10 +640,8 @@ static inline void __io_cq_unlock_post_flush(struct io_ring_ctx *ctx)
>   	 * it will re-check the wakeup conditions once we return we can safely
>   	 * skip waking it up.
>   	 */
> -	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) {
> -		smp_mb();
> -		__io_cqring_wake(ctx);
> -	}
> +	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
> +		io_cqring_wake(ctx);
>   }
>
  

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index fb7215b543cd..d4ac62de2113 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -640,10 +640,8 @@  static inline void __io_cq_unlock_post_flush(struct io_ring_ctx *ctx)
 	 * it will re-check the wakeup conditions once we return we can safely
 	 * skip waking it up.
 	 */
-	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) {
-		smp_mb();
-		__io_cqring_wake(ctx);
-	}
+	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+		io_cqring_wake(ctx);
 }
 
 void io_cq_unlock_post(struct io_ring_ctx *ctx)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 193b2db39fe8..24d8196bbca3 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -228,8 +228,7 @@  static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
 				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
 }
 
-/* requires smb_mb() prior, see wq_has_sleeper() */
-static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
+static inline void io_cqring_wake(struct io_ring_ctx *ctx)
 {
 	/*
 	 * Trigger waitqueue handler on all waiters on our waitqueue. This
@@ -241,17 +240,11 @@  static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
 	 * waitqueue handlers, we know we have a dependency between eventfd or
 	 * epoll and should terminate multishot poll at that point.
 	 */
-	if (waitqueue_active(&ctx->cq_wait))
+	if (wq_has_sleeper(&ctx->cq_wait))
 		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
 				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
 }
 
-static inline void io_cqring_wake(struct io_ring_ctx *ctx)
-{
-	smp_mb();
-	__io_cqring_wake(ctx);
-}
-
 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
 {
 	struct io_rings *r = ctx->rings;