net/mlx5: DR, Some refactoring of miss address handling

In preparation for MATCH RANGE STE support, create a function
to set the miss address of an STE.

Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Erez Shitrit <erezsh@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Yevgeny Kliteynik 2022-11-30 00:26:05 +02:00 committed by Saeed Mahameed
parent 1339678fdd
commit f31bda789f

View file

@ -35,16 +35,25 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
return 0;
}
static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste)
{
struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
u64 icm_addr;
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
}
static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
u64 icm_addr;
/* Create new table for miss entry */
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@ -58,8 +67,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->chunk->ste_arr;
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
mlx5dr_htbl_get(new_htbl);
return ste;
@ -241,7 +249,6 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
struct mlx5dr_ste *new_ste;
u64 icm_addr;
int new_idx;
u8 sb_idx;
@ -250,9 +257,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
/* Copy STE control and tag */
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->chunk->ste_arr[new_idx];
@ -773,7 +779,6 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
u64 icm_addr;
/* Take ref on table, only on first time this ste is used */
mlx5dr_htbl_get(cur_htbl);
@ -781,8 +786,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
ste->ste_chain_location = ste_location;