maple_tree: simplify and clean up mas_wr_node_store()

Simplify and clean up mas_wr_node_store(), remove unnecessary code.

Link: https://lkml.kernel.org/r/20230524031247.65949-10-zhangpeng.00@bytedance.com
Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Peng Zhang 2023-05-24 11:12:46 +08:00 committed by Andrew Morton
parent e6d1ffd611
commit 7a03ae3920
1 changed files with 26 additions and 61 deletions

View File

@ -4074,52 +4074,27 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
* *
* Return: True if stored, false otherwise * Return: True if stored, false otherwise
*/ */
static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char new_end)
{ {
struct ma_state *mas = wr_mas->mas; struct ma_state *mas = wr_mas->mas;
void __rcu **dst_slots; void __rcu **dst_slots;
unsigned long *dst_pivots; unsigned long *dst_pivots;
unsigned char dst_offset; unsigned char dst_offset, offset_end = wr_mas->offset_end;
unsigned char new_end = wr_mas->node_end;
unsigned char offset;
unsigned char node_slots = mt_slots[wr_mas->type];
struct maple_node reuse, *newnode; struct maple_node reuse, *newnode;
unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree); bool in_rcu = mt_in_rcu(mas->tree);
offset = mas->offset; /* Check if there is enough data. The room is enough. */
if (mas->last == wr_mas->r_max) {
/* runs right to the end of the node */
if (mas->last == mas->max)
new_end = offset;
/* don't copy this offset */
wr_mas->offset_end++;
} else if (mas->last < wr_mas->r_max) {
/* new range ends in this range */
if (unlikely(wr_mas->r_max == ULONG_MAX))
mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
new_end++;
} else {
if (wr_mas->end_piv == mas->last)
wr_mas->offset_end++;
new_end -= wr_mas->offset_end - offset - 1;
}
/* new range starts within a range */
if (wr_mas->r_min < mas->index)
new_end++;
/* Not enough room */
if (new_end >= node_slots)
return false;
/* Not enough data. */
if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
!(mas->mas_flags & MA_STATE_BULK)) !(mas->mas_flags & MA_STATE_BULK))
return false; return false;
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
else if (unlikely(wr_mas->r_max == ULONG_MAX))
mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
/* set up node. */ /* set up node. */
if (in_rcu) { if (in_rcu) {
mas_node_count(mas, 1); mas_node_count(mas, 1);
@ -4136,47 +4111,36 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
dst_pivots = ma_pivots(newnode, wr_mas->type); dst_pivots = ma_pivots(newnode, wr_mas->type);
dst_slots = ma_slots(newnode, wr_mas->type); dst_slots = ma_slots(newnode, wr_mas->type);
/* Copy from start to insert point */ /* Copy from start to insert point */
memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
dst_offset = offset;
/* Handle insert of new range starting after old range */ /* Handle insert of new range starting after old range */
if (wr_mas->r_min < mas->index) { if (wr_mas->r_min < mas->index) {
mas->offset++; rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); dst_pivots[mas->offset++] = mas->index - 1;
dst_pivots[dst_offset++] = mas->index - 1;
} }
/* Store the new entry and range end. */ /* Store the new entry and range end. */
if (dst_offset < max_piv) if (mas->offset < node_pivots)
dst_pivots[dst_offset] = mas->last; dst_pivots[mas->offset] = mas->last;
mas->offset = dst_offset; rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
/* /*
* this range wrote to the end of the node or it overwrote the rest of * this range wrote to the end of the node or it overwrote the rest of
* the data * the data
*/ */
if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) { if (offset_end > wr_mas->node_end)
new_end = dst_offset;
goto done; goto done;
}
dst_offset++; dst_offset = mas->offset + 1;
/* Copy to the end of node if necessary. */ /* Copy to the end of node if necessary. */
copy_size = wr_mas->node_end - wr_mas->offset_end + 1; copy_size = wr_mas->node_end - offset_end + 1;
memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
sizeof(void *) * copy_size); sizeof(void *) * copy_size);
if (dst_offset < max_piv) { memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
if (copy_size > max_piv - dst_offset) sizeof(unsigned long) * (copy_size - 1));
copy_size = max_piv - dst_offset;
memcpy(dst_pivots + dst_offset, if (new_end < node_pivots)
wr_mas->pivots + wr_mas->offset_end,
sizeof(unsigned long) * copy_size);
}
if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
dst_pivots[new_end] = mas->max; dst_pivots[new_end] = mas->max;
done: done:
@ -4378,7 +4342,8 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
return; return;
else if (mas_wr_node_store(wr_mas))
if (mas_wr_node_store(wr_mas, new_end))
return; return;
if (mas_is_err(mas)) if (mas_is_err(mas))