[1/4] blk-mq: use percpu csd to remote complete instead of per-rq csd

Message ID 20230627120854.971475-2-chengming.zhou@linux.dev
State New
Headers
Series blk-mq: optimize the size of struct request |

Commit Message

Chengming Zhou June 27, 2023, 12:08 p.m. UTC
  From: Chengming Zhou <zhouchengming@bytedance.com>

If request need to be completed remotely, we insert it into percpu llist,
and smp_call_function_single_async() if llist is empty previously.

We don't need to use per-rq csd, percpu csd is enough. And the size of
struct request is decreased by 24 bytes.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 block/blk-mq.c         | 12 ++++++++----
 include/linux/blk-mq.h |  5 +----
 2 files changed, 9 insertions(+), 8 deletions(-)
  

Comments

Ming Lei June 28, 2023, 2:20 a.m. UTC | #1
On Tue, Jun 27, 2023 at 08:08:51PM +0800, chengming.zhou@linux.dev wrote:
> From: Chengming Zhou <zhouchengming@bytedance.com>
> 
> If request need to be completed remotely, we insert it into percpu llist,
> and smp_call_function_single_async() if llist is empty previously.
> 
> We don't need to use per-rq csd, percpu csd is enough. And the size of
> struct request is decreased by 24 bytes.
> 
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> ---
>  block/blk-mq.c         | 12 ++++++++----
>  include/linux/blk-mq.h |  5 +----
>  2 files changed, 9 insertions(+), 8 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index decb6ab2d508..a36822479b94 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -43,6 +43,7 @@
>  #include "blk-ioprio.h"
>  
>  static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
> +static DEFINE_PER_CPU(struct __call_single_data, blk_cpu_csd);

It might be better to use call_single_data, given:

/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
  typedef struct __call_single_data call_single_data_t
  	__aligned(sizeof(struct __call_single_data));

>  
>  static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
>  static void blk_mq_request_bypass_insert(struct request *rq,
> @@ -1156,13 +1157,13 @@ static void blk_mq_complete_send_ipi(struct request *rq)
>  {
>  	struct llist_head *list;
>  	unsigned int cpu;
> +	struct __call_single_data *csd;
>  
>  	cpu = rq->mq_ctx->cpu;
>  	list = &per_cpu(blk_cpu_done, cpu);
> -	if (llist_add(&rq->ipi_list, list)) {
> -		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> -		smp_call_function_single_async(cpu, &rq->csd);
> -	}
> +	csd = &per_cpu(blk_cpu_csd, cpu);
> +	if (llist_add(&rq->ipi_list, list))
> +		smp_call_function_single_async(cpu, csd);
>  }

This way is cleaner, and looks correct, given block softirq is guaranteed to be
scheduled to consume the list if one new request is added to this percpu list,
either smp_call_function_single_async() returns -EBUSY or 0.

thanks
Ming
  
Chengming Zhou June 28, 2023, 3:28 a.m. UTC | #2
On 2023/6/28 10:20, Ming Lei wrote:
> On Tue, Jun 27, 2023 at 08:08:51PM +0800, chengming.zhou@linux.dev wrote:
>> From: Chengming Zhou <zhouchengming@bytedance.com>
>>
>> If request need to be completed remotely, we insert it into percpu llist,
>> and smp_call_function_single_async() if llist is empty previously.
>>
>> We don't need to use per-rq csd, percpu csd is enough. And the size of
>> struct request is decreased by 24 bytes.
>>
>> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
>> ---
>>  block/blk-mq.c         | 12 ++++++++----
>>  include/linux/blk-mq.h |  5 +----
>>  2 files changed, 9 insertions(+), 8 deletions(-)
>>
>> diff --git a/block/blk-mq.c b/block/blk-mq.c
>> index decb6ab2d508..a36822479b94 100644
>> --- a/block/blk-mq.c
>> +++ b/block/blk-mq.c
>> @@ -43,6 +43,7 @@
>>  #include "blk-ioprio.h"
>>  
>>  static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
>> +static DEFINE_PER_CPU(struct __call_single_data, blk_cpu_csd);
> 
> It might be better to use call_single_data, given:
> 
> /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
>   typedef struct __call_single_data call_single_data_t
>   	__aligned(sizeof(struct __call_single_data));
> 

Good, I will change to use this.

>>  
>>  static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
>>  static void blk_mq_request_bypass_insert(struct request *rq,
>> @@ -1156,13 +1157,13 @@ static void blk_mq_complete_send_ipi(struct request *rq)
>>  {
>>  	struct llist_head *list;
>>  	unsigned int cpu;
>> +	struct __call_single_data *csd;
>>  
>>  	cpu = rq->mq_ctx->cpu;
>>  	list = &per_cpu(blk_cpu_done, cpu);
>> -	if (llist_add(&rq->ipi_list, list)) {
>> -		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
>> -		smp_call_function_single_async(cpu, &rq->csd);
>> -	}
>> +	csd = &per_cpu(blk_cpu_csd, cpu);
>> +	if (llist_add(&rq->ipi_list, list))
>> +		smp_call_function_single_async(cpu, csd);
>>  }
> 
> This way is cleaner, and looks correct, given block softirq is guaranteed to be
> scheduled to consume the list if one new request is added to this percpu list,
> either smp_call_function_single_async() returns -EBUSY or 0.
> 

If this llist_add() see the llist is empty, the consumer function in the softirq
on the remote CPU must have consumed the llist, so smp_call_function_single_async()
won't return -EBUSY ?

Thanks.
  
Ming Lei June 28, 2023, 4:50 a.m. UTC | #3
On Wed, Jun 28, 2023 at 11:28:20AM +0800, Chengming Zhou wrote:
> On 2023/6/28 10:20, Ming Lei wrote:
> > On Tue, Jun 27, 2023 at 08:08:51PM +0800, chengming.zhou@linux.dev wrote:
> >> From: Chengming Zhou <zhouchengming@bytedance.com>
> >>
> >> If request need to be completed remotely, we insert it into percpu llist,
> >> and smp_call_function_single_async() if llist is empty previously.
> >>
> >> We don't need to use per-rq csd, percpu csd is enough. And the size of
> >> struct request is decreased by 24 bytes.
> >>
> >> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> >> ---
> >>  block/blk-mq.c         | 12 ++++++++----
> >>  include/linux/blk-mq.h |  5 +----
> >>  2 files changed, 9 insertions(+), 8 deletions(-)
> >>
> >> diff --git a/block/blk-mq.c b/block/blk-mq.c
> >> index decb6ab2d508..a36822479b94 100644
> >> --- a/block/blk-mq.c
> >> +++ b/block/blk-mq.c
> >> @@ -43,6 +43,7 @@
> >>  #include "blk-ioprio.h"
> >>  
> >>  static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
> >> +static DEFINE_PER_CPU(struct __call_single_data, blk_cpu_csd);
> > 
> > It might be better to use call_single_data, given:
> > 
> > /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
> >   typedef struct __call_single_data call_single_data_t
> >   	__aligned(sizeof(struct __call_single_data));
> > 
> 
> Good, I will change to use this.
> 
> >>  
> >>  static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
> >>  static void blk_mq_request_bypass_insert(struct request *rq,
> >> @@ -1156,13 +1157,13 @@ static void blk_mq_complete_send_ipi(struct request *rq)
> >>  {
> >>  	struct llist_head *list;
> >>  	unsigned int cpu;
> >> +	struct __call_single_data *csd;
> >>  
> >>  	cpu = rq->mq_ctx->cpu;
> >>  	list = &per_cpu(blk_cpu_done, cpu);
> >> -	if (llist_add(&rq->ipi_list, list)) {
> >> -		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> >> -		smp_call_function_single_async(cpu, &rq->csd);
> >> -	}
> >> +	csd = &per_cpu(blk_cpu_csd, cpu);
> >> +	if (llist_add(&rq->ipi_list, list))
> >> +		smp_call_function_single_async(cpu, csd);
> >>  }
> > 
> > This way is cleaner, and looks correct, given block softirq is guaranteed to be
> > scheduled to consume the list if one new request is added to this percpu list,
> > either smp_call_function_single_async() returns -EBUSY or 0.
> > 
> 
> If this llist_add() see the llist is empty, the consumer function in the softirq
> on the remote CPU must have consumed the llist, so smp_call_function_single_async()
> won't return -EBUSY ?

block softirq can be scheduled from other code path, such as blk_mq_raise_softirq()
for single queue's remote completion, where no percpu csd schedule is needed, so
two smp_call_function_single_async() could be called, and the 2nd one
may return -EBUSY.

Not mention csd_unlock() could be called after the callback returns, see
__flush_smp_call_function_queue().

But that is fine, if there is pending block softirq, the llist is
guaranteed to be consumed because the csd callback just raises block
softirq, and request/llist is consumed in softirq handler.

Thanks,
Ming
  
Chengming Zhou June 28, 2023, 6:43 a.m. UTC | #4
On 2023/6/28 12:50, Ming Lei wrote:
> On Wed, Jun 28, 2023 at 11:28:20AM +0800, Chengming Zhou wrote:
>> On 2023/6/28 10:20, Ming Lei wrote:
>>> On Tue, Jun 27, 2023 at 08:08:51PM +0800, chengming.zhou@linux.dev wrote:
>>>> From: Chengming Zhou <zhouchengming@bytedance.com>
>>>>
>>>> If request need to be completed remotely, we insert it into percpu llist,
>>>> and smp_call_function_single_async() if llist is empty previously.
>>>>
>>>> We don't need to use per-rq csd, percpu csd is enough. And the size of
>>>> struct request is decreased by 24 bytes.
>>>>
>>>> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
>>>> ---
>>>>  block/blk-mq.c         | 12 ++++++++----
>>>>  include/linux/blk-mq.h |  5 +----
>>>>  2 files changed, 9 insertions(+), 8 deletions(-)
>>>>
>>>> diff --git a/block/blk-mq.c b/block/blk-mq.c
>>>> index decb6ab2d508..a36822479b94 100644
>>>> --- a/block/blk-mq.c
>>>> +++ b/block/blk-mq.c
>>>> @@ -43,6 +43,7 @@
>>>>  #include "blk-ioprio.h"
>>>>  
>>>>  static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
>>>> +static DEFINE_PER_CPU(struct __call_single_data, blk_cpu_csd);
>>>
>>> It might be better to use call_single_data, given:
>>>
>>> /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
>>>   typedef struct __call_single_data call_single_data_t
>>>   	__aligned(sizeof(struct __call_single_data));
>>>
>>
>> Good, I will change to use this.
>>
>>>>  
>>>>  static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
>>>>  static void blk_mq_request_bypass_insert(struct request *rq,
>>>> @@ -1156,13 +1157,13 @@ static void blk_mq_complete_send_ipi(struct request *rq)
>>>>  {
>>>>  	struct llist_head *list;
>>>>  	unsigned int cpu;
>>>> +	struct __call_single_data *csd;
>>>>  
>>>>  	cpu = rq->mq_ctx->cpu;
>>>>  	list = &per_cpu(blk_cpu_done, cpu);
>>>> -	if (llist_add(&rq->ipi_list, list)) {
>>>> -		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
>>>> -		smp_call_function_single_async(cpu, &rq->csd);
>>>> -	}
>>>> +	csd = &per_cpu(blk_cpu_csd, cpu);
>>>> +	if (llist_add(&rq->ipi_list, list))
>>>> +		smp_call_function_single_async(cpu, csd);
>>>>  }
>>>
>>> This way is cleaner, and looks correct, given block softirq is guaranteed to be
>>> scheduled to consume the list if one new request is added to this percpu list,
>>> either smp_call_function_single_async() returns -EBUSY or 0.
>>>
>>
>> If this llist_add() see the llist is empty, the consumer function in the softirq
>> on the remote CPU must have consumed the llist, so smp_call_function_single_async()
>> won't return -EBUSY ?
> 
> block softirq can be scheduled from other code path, such as blk_mq_raise_softirq()
> for single queue's remote completion, where no percpu csd schedule is needed, so
> two smp_call_function_single_async() could be called, and the 2nd one
> may return -EBUSY.

Thanks for your very clear explanation! I understand what you mean.

Yes, the 2nd smp_call_function_single_async() will return -EBUSY, but it's ok since
the 1st will do the right thing.

> 
> Not mention csd_unlock() could be called after the callback returns, see
> __flush_smp_call_function_queue().

Ok, CSD_TYPE_SYNC will csd_unlock() after csd_do_func() returns, our CSD_TYPE_ASYNC
will csd_unlock() before csd_do_func().

> 
> But that is fine, if there is pending block softirq, the llist is
> guaranteed to be consumed because the csd callback just raises block
> softirq, and request/llist is consumed in softirq handler.
> 

Agree, it's fine even the 2nd return -EBUSY when the 1st function is raising block softirq,
our llist will be consumed in softirq handler.

Thanks!
  

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index decb6ab2d508..a36822479b94 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -43,6 +43,7 @@ 
 #include "blk-ioprio.h"
 
 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
+static DEFINE_PER_CPU(struct __call_single_data, blk_cpu_csd);
 
 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
 static void blk_mq_request_bypass_insert(struct request *rq,
@@ -1156,13 +1157,13 @@  static void blk_mq_complete_send_ipi(struct request *rq)
 {
 	struct llist_head *list;
 	unsigned int cpu;
+	struct __call_single_data *csd;
 
 	cpu = rq->mq_ctx->cpu;
 	list = &per_cpu(blk_cpu_done, cpu);
-	if (llist_add(&rq->ipi_list, list)) {
-		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
-		smp_call_function_single_async(cpu, &rq->csd);
-	}
+	csd = &per_cpu(blk_cpu_csd, cpu);
+	if (llist_add(&rq->ipi_list, list))
+		smp_call_function_single_async(cpu, csd);
 }
 
 static void blk_mq_raise_softirq(struct request *rq)
@@ -4796,6 +4797,9 @@  static int __init blk_mq_init(void)
 
 	for_each_possible_cpu(i)
 		init_llist_head(&per_cpu(blk_cpu_done, i));
+	for_each_possible_cpu(i)
+		INIT_CSD(&per_cpu(blk_cpu_csd, i),
+			 __blk_mq_complete_request_remote, NULL);
 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
 
 	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index f401067ac03a..070551197c0e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -182,10 +182,7 @@  struct request {
 		rq_end_io_fn		*saved_end_io;
 	} flush;
 
-	union {
-		struct __call_single_data csd;
-		u64 fifo_time;
-	};
+	u64 fifo_time;
 
 	/*
 	 * completion callback.