[v2,08/10] maple_tree: Rework mas_wr_slot_store() to be cleaner and more efficient.
Commit Message
The code of mas_wr_slot_store() is messy, make it clearer and concise,
and add comments. In addition, get whether the two gaps are empty to
avoid calling mas_update_gap() all the time.
Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
---
lib/maple_tree.c | 59 ++++++++++++++++++++----------------------------
1 file changed, 24 insertions(+), 35 deletions(-)
Comments
* Peng Zhang <zhangpeng.00@bytedance.com> [230517 04:59]:
> The code of mas_wr_slot_store() is messy, make it clearer and concise,
> and add comments. In addition, get whether the two gaps are empty to
> avoid calling mas_update_gap() all the time.
>
> Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
> ---
> lib/maple_tree.c | 59 ++++++++++++++++++++----------------------------
> 1 file changed, 24 insertions(+), 35 deletions(-)
>
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index bbe4c6f2858c..25a8b7d5d598 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -4200,49 +4200,38 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
> static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
> {
> struct ma_state *mas = wr_mas->mas;
> - unsigned long lmax; /* Logical max. */
> unsigned char offset = mas->offset;
> + unsigned char offset_end = wr_mas->offset_end;
> + unsigned long lmax = wr_mas->end_piv; /* Logical max. */
> + bool gap = false;
>
> - if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
> - (offset != wr_mas->node_end)))
> + if (offset_end - offset != 1)
> return false;
>
> - if (offset == wr_mas->node_end - 1)
> - lmax = mas->max;
> - else
> - lmax = wr_mas->pivots[offset + 1];
> -
> - /* going to overwrite too many slots. */
> - if (lmax < mas->last)
> - return false;
> -
> - if (wr_mas->r_min == mas->index) {
> - /* overwriting two or more ranges with one. */
> - if (lmax == mas->last)
> - return false;
> -
> - /* Overwriting all of offset and a portion of offset + 1. */
> + if (mas->index == wr_mas->r_min && mas->last < lmax) {
> + /* Overwriting the range and over a part of the next range. */
> + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset);
> + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1);
> rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
> wr_mas->pivots[offset] = mas->last;
> - goto done;
> - }
> -
> - /* Doesn't end on the next range end. */
> - if (lmax != mas->last)
> + } else if (mas->index > wr_mas->r_min && mas->last == lmax) {
> + /* Overwriting a part of the range and over the next range */
> + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset);
> + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1);
> + rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
> + wr_mas->pivots[offset] = mas->index - 1;
> + mas->offset++; /* Keep mas accurate. */
> + } else {
> return false;
Again here, I think you have already verified this will be a slot store
by offset_end - offset == 1 and new_end == wr_mas->node_end.
The checking of the slots for the gap could be moved outside the
statements. WDYT?
> + }
>
> - /* Overwriting a portion of offset and all of offset + 1 */
> - if ((offset + 1 < mt_pivots[wr_mas->type]) &&
> - (wr_mas->entry || wr_mas->pivots[offset + 1]))
> - wr_mas->pivots[offset + 1] = mas->last;
> -
> - rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
> - wr_mas->pivots[offset] = mas->index - 1;
> - mas->offset++; /* Keep mas accurate. */
> -
> -done:
> trace_ma_write(__func__, mas, 0, wr_mas->entry);
> - mas_update_gap(mas);
> + /*
> + * Only update gap when the new entry is empty or there is an empty
> + * entry in the original two ranges.
> + */
> + if (!wr_mas->entry || gap)
> + mas_update_gap(mas);
> return true;
> }
>
> @@ -4396,7 +4385,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
> if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas))
> return;
>
> - if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
> + if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
> return;
> else if (mas_wr_node_store(wr_mas))
> return;
> --
> 2.20.1
>
在 2023/5/18 06:48, Liam R. Howlett 写道:
> * Peng Zhang <zhangpeng.00@bytedance.com> [230517 04:59]:
>> The code of mas_wr_slot_store() is messy, make it clearer and concise,
>> and add comments. In addition, get whether the two gaps are empty to
>> avoid calling mas_update_gap() all the time.
>>
>> Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
>> ---
>> lib/maple_tree.c | 59 ++++++++++++++++++++----------------------------
>> 1 file changed, 24 insertions(+), 35 deletions(-)
>>
>> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
>> index bbe4c6f2858c..25a8b7d5d598 100644
>> --- a/lib/maple_tree.c
>> +++ b/lib/maple_tree.c
>> @@ -4200,49 +4200,38 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
>> static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
>> {
>> struct ma_state *mas = wr_mas->mas;
>> - unsigned long lmax; /* Logical max. */
>> unsigned char offset = mas->offset;
>> + unsigned char offset_end = wr_mas->offset_end;
>> + unsigned long lmax = wr_mas->end_piv; /* Logical max. */
>> + bool gap = false;
>>
>> - if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
>> - (offset != wr_mas->node_end)))
>> + if (offset_end - offset != 1)
>> return false;
>>
>> - if (offset == wr_mas->node_end - 1)
>> - lmax = mas->max;
>> - else
>> - lmax = wr_mas->pivots[offset + 1];
>> -
>> - /* going to overwrite too many slots. */
>> - if (lmax < mas->last)
>> - return false;
>> -
>> - if (wr_mas->r_min == mas->index) {
>> - /* overwriting two or more ranges with one. */
>> - if (lmax == mas->last)
>> - return false;
>> -
>> - /* Overwriting all of offset and a portion of offset + 1. */
>> + if (mas->index == wr_mas->r_min && mas->last < lmax) {
>> + /* Overwriting the range and over a part of the next range. */
>> + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset);
>> + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1);
>> rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
>> wr_mas->pivots[offset] = mas->last;
>> - goto done;
>> - }
>> -
>> - /* Doesn't end on the next range end. */
>> - if (lmax != mas->last)
>> + } else if (mas->index > wr_mas->r_min && mas->last == lmax) {
>> + /* Overwriting a part of the range and over the next range */
>> + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset);
>> + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1);
>> + rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
>> + wr_mas->pivots[offset] = mas->index - 1;
>> + mas->offset++; /* Keep mas accurate. */
>> + } else {
>> return false;
>
> Again here, I think you have already verified this will be a slot store
> by offset_end - offset == 1 and new_end == wr_mas->node_end.
>
> The checking of the slots for the gap could be moved outside the
> statements. WDYT?
Yes, I'll address this in v3. Thanks.
>
>> + }
>>
>> - /* Overwriting a portion of offset and all of offset + 1 */
>> - if ((offset + 1 < mt_pivots[wr_mas->type]) &&
>> - (wr_mas->entry || wr_mas->pivots[offset + 1]))
>> - wr_mas->pivots[offset + 1] = mas->last;
>> -
>> - rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
>> - wr_mas->pivots[offset] = mas->index - 1;
>> - mas->offset++; /* Keep mas accurate. */
>> -
>> -done:
>> trace_ma_write(__func__, mas, 0, wr_mas->entry);
>> - mas_update_gap(mas);
>> + /*
>> + * Only update gap when the new entry is empty or there is an empty
>> + * entry in the original two ranges.
>> + */
>> + if (!wr_mas->entry || gap)
>> + mas_update_gap(mas);
>> return true;
>> }
>>
>> @@ -4396,7 +4385,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
>> if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas))
>> return;
>>
>> - if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
>> + if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
>> return;
>> else if (mas_wr_node_store(wr_mas))
>> return;
>> --
>> 2.20.1
>>
@@ -4200,49 +4200,38 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
- unsigned long lmax; /* Logical max. */
unsigned char offset = mas->offset;
+ unsigned char offset_end = wr_mas->offset_end;
+ unsigned long lmax = wr_mas->end_piv; /* Logical max. */
+ bool gap = false;
- if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
- (offset != wr_mas->node_end)))
+ if (offset_end - offset != 1)
return false;
- if (offset == wr_mas->node_end - 1)
- lmax = mas->max;
- else
- lmax = wr_mas->pivots[offset + 1];
-
- /* going to overwrite too many slots. */
- if (lmax < mas->last)
- return false;
-
- if (wr_mas->r_min == mas->index) {
- /* overwriting two or more ranges with one. */
- if (lmax == mas->last)
- return false;
-
- /* Overwriting all of offset and a portion of offset + 1. */
+ if (mas->index == wr_mas->r_min && mas->last < lmax) {
+ /* Overwriting the range and over a part of the next range. */
+ gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset);
+ gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1);
rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
wr_mas->pivots[offset] = mas->last;
- goto done;
- }
-
- /* Doesn't end on the next range end. */
- if (lmax != mas->last)
+ } else if (mas->index > wr_mas->r_min && mas->last == lmax) {
+ /* Overwriting a part of the range and over the next range */
+ gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset);
+ gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1);
+ rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->index - 1;
+ mas->offset++; /* Keep mas accurate. */
+ } else {
return false;
+ }
- /* Overwriting a portion of offset and all of offset + 1 */
- if ((offset + 1 < mt_pivots[wr_mas->type]) &&
- (wr_mas->entry || wr_mas->pivots[offset + 1]))
- wr_mas->pivots[offset + 1] = mas->last;
-
- rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
- wr_mas->pivots[offset] = mas->index - 1;
- mas->offset++; /* Keep mas accurate. */
-
-done:
trace_ma_write(__func__, mas, 0, wr_mas->entry);
- mas_update_gap(mas);
+ /*
+ * Only update gap when the new entry is empty or there is an empty
+ * entry in the original two ranges.
+ */
+ if (!wr_mas->entry || gap)
+ mas_update_gap(mas);
return true;
}
@@ -4396,7 +4385,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas))
return;
- if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
+ if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
return;
else if (mas_wr_node_store(wr_mas))
return;