Merge branch 'mlxsw-ERP-sharing-multiple-masks'

Ido Schimmel says:

====================
mlxsw: spectrum: acl: Introduce ERP sharing by multiple masks

Jiri says:

The Spectrum-2 hardware has limitation number of ERPs per-region. In
order to accommodate more masks than number of ERPs, the hardware
supports to insert rules with delta bits. By that, the rules with masks
that differ in up-to 8 consecutive bits can share the same ERP.

Patches 1 and 2 fix couple of issues that would appear in existing
selftests after adding delta support

Patch 3 introduces a generic object aggregation library. Now it is
static, but it will get extended for recalculation of aggregations in
the future in order to reach more optimal aggregation.

Patch 4 just simply converts existing ERP code to use the objagg library
instead of a rhashtable.

Patches 5-9 do more or less small changes to prepare ground for the last
patch.

Patch 10 fills-up delta callbacks of objagg library and utilizes the
delta bits for rule insertion.

The last patch adds selftest to test the mlxsw Spectrum-2 delta flows.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-11-15 14:43:44 -08:00
commit 4e86889b52
19 changed files with 2187 additions and 196 deletions

View File

@ -10679,6 +10679,14 @@ L: linux-nfc@lists.01.org (moderated for non-subscribers)
S: Supported
F: drivers/nfc/nxp-nci
OBJAGG
M: Jiri Pirko <jiri@mellanox.com>
L: netdev@vger.kernel.org
S: Supported
F: lib/objagg.c
F: lib/test_objagg.c
F: include/linux/objagg.h
OBJTOOL
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Peter Zijlstra <peterz@infradead.org>

View File

@ -80,6 +80,7 @@ config MLXSW_SPECTRUM
depends on IPV6_GRE || IPV6_GRE=n
select GENERIC_ALLOCATOR
select PARMAN
select OBJAGG
select MLXFW
default m
---help---

View File

@ -426,15 +426,17 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
char *key, char *mask, int block_start, int block_end)
char *key, char *mask)
{
unsigned int blocks_count =
mlxsw_afk_key_info_blocks_count_get(key_info);
char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
int block_index, i;
for (i = block_start; i <= block_end; i++) {
for (i = 0; i < blocks_count; i++) {
memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
@ -451,10 +453,18 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
values->storage.mask);
}
if (key)
mlxsw_afk->ops->encode_block(block_key, i, key);
if (mask)
mlxsw_afk->ops->encode_block(block_mask, i, mask);
mlxsw_afk->ops->encode_block(key, i, block_key);
mlxsw_afk->ops->encode_block(mask, i, block_mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
int block_start, int block_end)
{
int i;
for (i = block_start; i <= block_end; i++)
mlxsw_afk->ops->clear_block(key, i);
}
EXPORT_SYMBOL(mlxsw_afk_clear);

View File

@ -188,7 +188,8 @@ struct mlxsw_afk;
struct mlxsw_afk_ops {
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
void (*encode_block)(char *block, int block_index, char *output);
void (*encode_block)(char *output, int block_index, char *block);
void (*clear_block)(char *output, int block_index);
};
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
@ -228,6 +229,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
char *key, char *mask, int block_start, int block_end);
char *key, char *mask);
void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
int block_start, int block_end);
#endif

View File

@ -2834,8 +2834,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
u32 priority,
const char *tcam_region_info,
const char *key, u8 erp_id,
bool large_exists, u32 lkey_id,
u32 action_pointer)
u16 delta_start, u8 delta_mask,
u8 delta_value, bool large_exists,
u32 lkey_id, u32 action_pointer)
{
MLXSW_REG_ZERO(ptce3, payload);
mlxsw_reg_ptce3_v_set(payload, valid);
@ -2844,6 +2845,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
mlxsw_reg_ptce3_delta_start_set(payload, delta_start);
mlxsw_reg_ptce3_delta_mask_set(payload, delta_mask);
mlxsw_reg_ptce3_delta_value_set(payload, delta_value);
mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);

View File

@ -34,15 +34,15 @@ mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregio
{
struct mlxsw_sp_acl_atcam_region *aregion;
struct mlxsw_sp_acl_atcam_entry *aentry;
struct mlxsw_sp_acl_erp *erp;
struct mlxsw_sp_acl_erp_mask *erp_mask;
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
if (IS_ERR(erp))
return PTR_ERR(erp);
aentry->erp = erp;
erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true);
if (IS_ERR(erp_mask))
return PTR_ERR(erp_mask);
aentry->erp_mask = erp_mask;
return 0;
}
@ -57,7 +57,7 @@ mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregio
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
mlxsw_sp_acl_erp_put(aregion, aentry->erp);
mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
}
static const struct mlxsw_sp_acl_ctcam_region_ops

View File

@ -14,8 +14,8 @@
#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START 0
#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END 5
struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
@ -34,7 +34,7 @@ struct mlxsw_sp_acl_atcam_region_ops {
void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
struct mlxsw_sp_acl_atcam_lkey_id *
(*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
char *enc_key, u8 erp_id);
void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
};
@ -64,7 +64,7 @@ static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
static bool
mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
{
return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
return mlxsw_sp_acl_erp_mask_is_ctcam(aentry->erp_mask);
}
static int
@ -90,8 +90,7 @@ mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_rule_info *rulei,
u8 erp_id)
char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_generic *region_generic;
@ -220,8 +219,7 @@ mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_rule_info *rulei,
u8 erp_id)
char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
@ -230,9 +228,10 @@ mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
memcpy(ht_key.enc_key, enc_key, sizeof(ht_key.enc_key));
mlxsw_afk_clear(afk, ht_key.enc_key,
MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START,
MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END);
ht_key.erp_id = erp_id;
lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
mlxsw_sp_acl_atcam_lkey_id_ht_params);
@ -379,7 +378,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
u32 kvdl_index, priority;
@ -389,7 +388,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@ -398,6 +398,9 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
kvdl_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
@ -418,12 +421,17 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
char *enc_key = aentry->ht_key.enc_key;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
region->tcam_region_info, aentry->ht_key.enc_key,
erp_id, refcount_read(&lkey_id->refcnt) != 1,
region->tcam_region_info,
enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
refcount_read(&lkey_id->refcnt) != 1,
lkey_id->id, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
aregion->ops->lkey_id_put(aregion, lkey_id);
@ -438,19 +446,30 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_erp *erp;
unsigned int blocks_count;
const struct mlxsw_sp_acl_erp_delta *delta;
struct mlxsw_sp_acl_erp_mask *erp_mask;
int err;
blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
aentry->full_enc_key, mask);
erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
if (IS_ERR(erp))
return PTR_ERR(erp);
aentry->erp = erp;
aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
if (IS_ERR(erp_mask))
return PTR_ERR(erp_mask);
aentry->erp_mask = erp_mask;
aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
memcpy(aentry->ht_key.enc_key, aentry->full_enc_key,
sizeof(aentry->ht_key.enc_key));
/* Compute all needed delta information and clear the delta bits
* from the encrypted key.
*/
delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
aentry->delta_info.value =
mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key);
mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
/* We can't insert identical rules into the A-TCAM, so fail and
* let the rule spill into C-TCAM
@ -472,7 +491,7 @@ err_rule_insert:
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
err_rhashtable_insert:
mlxsw_sp_acl_erp_put(aregion, erp);
mlxsw_sp_acl_erp_mask_put(aregion, erp_mask);
return err;
}
@ -484,7 +503,7 @@ __mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
mlxsw_sp_acl_erp_put(aregion, aentry->erp);
mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
}
int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,

View File

@ -46,7 +46,6 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
unsigned int blocks_count;
char *act_set;
u32 priority;
char *mask;
@ -63,9 +62,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
centry->parman_item.index, priority);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
blocks_count - 1);
mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
err = cregion->ops->entry_insert(cregion, centry, mask);
if (err)

View File

@ -7,7 +7,7 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/objagg.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@ -29,6 +29,8 @@ struct mlxsw_sp_acl_erp_core {
struct mlxsw_sp_acl_erp_key {
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
#define __MASK_LEN 0x38
#define __MASK_IDX(i) (__MASK_LEN - (i) - 1)
bool ctcam;
};
@ -36,10 +38,8 @@ struct mlxsw_sp_acl_erp {
struct mlxsw_sp_acl_erp_key key;
u8 id;
u8 index;
refcount_t refcnt;
DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
struct list_head list;
struct rhash_head ht_node;
struct mlxsw_sp_acl_erp_table *erp_table;
};
@ -53,7 +53,6 @@ struct mlxsw_sp_acl_erp_table {
DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
struct list_head atcam_erps_list;
struct rhashtable erp_ht;
struct mlxsw_sp_acl_erp_core *erp_core;
struct mlxsw_sp_acl_atcam_region *aregion;
const struct mlxsw_sp_acl_erp_table_ops *ops;
@ -61,12 +60,8 @@ struct mlxsw_sp_acl_erp_table {
unsigned int num_atcam_erps;
unsigned int num_max_atcam_erps;
unsigned int num_ctcam_erps;
};
static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
.key_len = sizeof(struct mlxsw_sp_acl_erp_key),
.key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
.head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
unsigned int num_deltas;
struct objagg *objagg;
};
struct mlxsw_sp_acl_erp_table_ops {
@ -119,16 +114,6 @@ static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
.erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
};
bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
{
return erp->key.ctcam;
}
u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
{
return erp->id;
}
static unsigned int
mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
{
@ -194,12 +179,15 @@ mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
static int
mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
const struct mlxsw_sp_acl_erp *erp)
struct mlxsw_sp_acl_erp_key *key)
{
DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
@ -210,7 +198,7 @@ mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
return err;
@ -218,12 +206,15 @@ err_master_mask_update:
static int
mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
const struct mlxsw_sp_acl_erp *erp)
struct mlxsw_sp_acl_erp_key *key)
{
DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
@ -234,7 +225,7 @@ mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
return err;
@ -256,26 +247,16 @@ mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
goto err_erp_id_get;
memcpy(&erp->key, key, sizeof(*key));
bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
list_add(&erp->list, &erp_table->atcam_erps_list);
refcount_set(&erp->refcnt, 1);
erp_table->num_atcam_erps++;
erp->erp_table = erp_table;
err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
goto err_master_mask_set;
err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
if (err)
goto err_rhashtable_insert;
return erp;
err_rhashtable_insert:
mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
err_master_mask_set:
erp_table->num_atcam_erps--;
list_del(&erp->list);
@ -290,9 +271,7 @@ mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
erp_table->num_atcam_erps--;
list_del(&erp->list);
mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
@ -647,9 +626,56 @@ mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
mlxsw_sp_acl_erp_table_enable(erp_table, false);
}
static void
mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
static int
__mlxsw_sp_acl_erp_table_other_inc(struct mlxsw_sp_acl_erp_table *erp_table,
unsigned int *inc_num)
{
int err;
/* If there are C-TCAM eRP or deltas in use we need to transition
* the region to use eRP table, if it is not already done
*/
if (erp_table->ops != &erp_two_masks_ops &&
erp_table->ops != &erp_multiple_masks_ops) {
err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
if (err)
return err;
}
/* When C-TCAM or deltas are used, the eRP table must be used */
if (erp_table->ops != &erp_multiple_masks_ops)
erp_table->ops = &erp_multiple_masks_ops;
(*inc_num)++;
return 0;
}
static int mlxsw_sp_acl_erp_ctcam_inc(struct mlxsw_sp_acl_erp_table *erp_table)
{
return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
&erp_table->num_ctcam_erps);
}
static int mlxsw_sp_acl_erp_delta_inc(struct mlxsw_sp_acl_erp_table *erp_table)
{
return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
&erp_table->num_deltas);
}
static void
__mlxsw_sp_acl_erp_table_other_dec(struct mlxsw_sp_acl_erp_table *erp_table,
unsigned int *dec_num)
{
(*dec_num)--;
/* If there are no C-TCAM eRP or deltas in use, the state we
* transition to depends on the number of A-TCAM eRPs currently
* in use.
*/
if (erp_table->num_ctcam_erps > 0 || erp_table->num_deltas > 0)
return;
switch (erp_table->num_atcam_erps) {
case 2:
/* Keep using the eRP table, but correctly set the
@ -683,9 +709,21 @@ mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
}
}
static void mlxsw_sp_acl_erp_ctcam_dec(struct mlxsw_sp_acl_erp_table *erp_table)
{
__mlxsw_sp_acl_erp_table_other_dec(erp_table,
&erp_table->num_ctcam_erps);
}
static void mlxsw_sp_acl_erp_delta_dec(struct mlxsw_sp_acl_erp_table *erp_table)
{
__mlxsw_sp_acl_erp_table_other_dec(erp_table,
&erp_table->num_deltas);
}
static struct mlxsw_sp_acl_erp *
__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
@ -697,89 +735,41 @@ __mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
memcpy(&erp->key, key, sizeof(*key));
bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
refcount_set(&erp->refcnt, 1);
erp_table->num_ctcam_erps++;
err = mlxsw_sp_acl_erp_ctcam_inc(erp_table);
if (err)
goto err_erp_ctcam_inc;
erp->erp_table = erp_table;
err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
goto err_master_mask_set;
err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
if (err)
goto err_rhashtable_insert;
err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
if (err)
goto err_erp_region_ctcam_enable;
/* When C-TCAM is used, the eRP table must be used */
erp_table->ops = &erp_multiple_masks_ops;
return erp;
err_erp_region_ctcam_enable:
rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
err_rhashtable_insert:
mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
err_master_mask_set:
erp_table->num_ctcam_erps--;
mlxsw_sp_acl_erp_ctcam_dec(erp_table);
err_erp_ctcam_inc:
kfree(erp);
return ERR_PTR(err);
}
static struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
/* There is a special situation where we need to spill rules
* into the C-TCAM, yet the region is still using a master
* mask and thus not performing a lookup in the C-TCAM. This
* can happen when two rules that only differ in priority - and
* thus sharing the same key - are programmed. In this case
* we transition the region to use an eRP table
*/
err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
if (err)
return ERR_PTR(err);
erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
if (IS_ERR(erp)) {
err = PTR_ERR(erp);
goto err_erp_create;
}
return erp;
err_erp_create:
mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
return ERR_PTR(err);
}
static void
mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
mlxsw_sp_acl_erp_ht_params);
mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
erp_table->num_ctcam_erps--;
mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
mlxsw_sp_acl_erp_ctcam_dec(erp_table);
kfree(erp);
/* Once the last C-TCAM eRP was destroyed, the state we
* transition to depends on the number of A-TCAM eRPs currently
* in use
*/
if (erp_table->num_ctcam_erps > 0)
return;
mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
}
static struct mlxsw_sp_acl_erp *
@ -790,7 +780,7 @@ mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
int err;
if (key->ctcam)
return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
/* Expand the eRP table for the new eRP, if needed */
err = mlxsw_sp_acl_erp_table_expand(erp_table);
@ -838,7 +828,8 @@ mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
mlxsw_sp_acl_erp_generic_destroy(erp);
if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0 &&
erp_table->num_deltas == 0)
erp_table->ops = &erp_two_masks_ops;
}
@ -940,13 +931,12 @@ mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
WARN_ON(1);
}
struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
const char *mask, bool ctcam)
struct mlxsw_sp_acl_erp_mask *
mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
const char *mask, bool ctcam)
{
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key key;
struct mlxsw_sp_acl_erp *erp;
struct objagg_obj *objagg_obj;
/* eRPs are allocated from a shared resource, but currently all
* allocations are done under RTNL.
@ -955,29 +945,238 @@ mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
key.ctcam = ctcam;
erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
mlxsw_sp_acl_erp_ht_params);
if (erp) {
refcount_inc(&erp->refcnt);
return erp;
}
return erp_table->ops->erp_create(erp_table, &key);
objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key);
if (IS_ERR(objagg_obj))
return ERR_CAST(objagg_obj);
return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
}
void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp *erp)
void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp_mask *erp_mask)
{
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
ASSERT_RTNL();
if (!refcount_dec_and_test(&erp->refcnt))
return;
erp_table->ops->erp_destroy(erp_table, erp);
objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
}
bool
mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask)
{
struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
const struct mlxsw_sp_acl_erp_key *key = objagg_obj_raw(objagg_obj);
return key->ctcam;
}
u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask)
{
struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
return erp->id;
}
struct mlxsw_sp_acl_erp_delta {
struct mlxsw_sp_acl_erp_key key;
u16 start;
u8 mask;
};
u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta)
{
return delta->start;
}
u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta)
{
return delta->mask;
}
u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
const char *enc_key)
{
u16 start = delta->start;
u8 mask = delta->mask;
u16 tmp;
if (!mask)
return 0;
tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)];
if (start / 8 + 1 < __MASK_LEN)
tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8;
tmp >>= start % 8;
tmp &= mask;
return tmp;
}
void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
const char *enc_key)
{
u16 start = delta->start;
u8 mask = delta->mask;
unsigned char *byte;
u16 tmp;
tmp = mask;
tmp <<= start % 8;
tmp = ~tmp;
byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)];
*byte &= tmp & 0xff;
if (start / 8 + 1 < __MASK_LEN) {
byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)];
*byte &= (tmp >> 8) & 0xff;
}
}
static const struct mlxsw_sp_acl_erp_delta
mlxsw_sp_acl_erp_delta_default = {};
const struct mlxsw_sp_acl_erp_delta *
mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask)
{
struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
const struct mlxsw_sp_acl_erp_delta *delta;
delta = objagg_obj_delta_priv(objagg_obj);
if (!delta)
delta = &mlxsw_sp_acl_erp_delta_default;
return delta;
}
static int
mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
const struct mlxsw_sp_acl_erp_key *key,
u16 *delta_start, u8 *delta_mask)
{
int offset = 0;
int si = -1;
u16 pmask;
u16 mask;
int i;
/* The difference between 2 masks can be up to 8 consecutive bits. */
for (i = 0; i < __MASK_LEN; i++) {
if (parent_key->mask[__MASK_IDX(i)] == key->mask[__MASK_IDX(i)])
continue;
if (si == -1)
si = i;
else if (si != i - 1)
return -EINVAL;
}
if (si == -1) {
/* The masks are the same, this cannot happen.
* That means the caller is broken.
*/
WARN_ON(1);
*delta_start = 0;
*delta_mask = 0;
return 0;
}
pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
mask = (unsigned char) key->mask[__MASK_IDX(si)];
if (si + 1 < __MASK_LEN) {
pmask |= (unsigned char) parent_key->mask[__MASK_IDX(si + 1)] << 8;
mask |= (unsigned char) key->mask[__MASK_IDX(si + 1)] << 8;
}
if ((pmask ^ mask) & pmask)
return -EINVAL;
mask &= ~pmask;
while (!(mask & (1 << offset)))
offset++;
while (!(mask & 1))
mask >>= 1;
if (mask & 0xff00)
return -EINVAL;
*delta_start = si * 8 + offset;
*delta_mask = mask;
return 0;
}
static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
void *obj)
{
struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
struct mlxsw_sp_acl_atcam_region *aregion = priv;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key *key = obj;
struct mlxsw_sp_acl_erp_delta *delta;
u16 delta_start;
u8 delta_mask;
int err;
if (parent_key->ctcam || key->ctcam)
return ERR_PTR(-EINVAL);
err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
&delta_start, &delta_mask);
if (err)
return ERR_PTR(-EINVAL);
delta = kzalloc(sizeof(*delta), GFP_KERNEL);
if (!delta)
return ERR_PTR(-ENOMEM);
delta->start = delta_start;
delta->mask = delta_mask;
err = mlxsw_sp_acl_erp_delta_inc(erp_table);
if (err)
goto err_erp_delta_inc;
memcpy(&delta->key, key, sizeof(*key));
err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &delta->key);
if (err)
goto err_master_mask_set;
return delta;
err_master_mask_set:
mlxsw_sp_acl_erp_delta_dec(erp_table);
err_erp_delta_inc:
kfree(delta);
return ERR_PTR(err);
}
static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv)
{
struct mlxsw_sp_acl_erp_delta *delta = delta_priv;
struct mlxsw_sp_acl_atcam_region *aregion = priv;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
mlxsw_sp_acl_erp_master_mask_clear(erp_table, &delta->key);
mlxsw_sp_acl_erp_delta_dec(erp_table);
kfree(delta);
}
static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj)
{
struct mlxsw_sp_acl_atcam_region *aregion = priv;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key *key = obj;
return erp_table->ops->erp_create(erp_table, key);
}
static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
{
struct mlxsw_sp_acl_atcam_region *aregion = priv;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
erp_table->ops->erp_destroy(erp_table, root_priv);
}
static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
.obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
.delta_create = mlxsw_sp_acl_erp_delta_create,
.delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
.root_create = mlxsw_sp_acl_erp_root_create,
.root_destroy = mlxsw_sp_acl_erp_root_destroy,
};
static struct mlxsw_sp_acl_erp_table *
mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
{
@ -988,9 +1187,12 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
if (!erp_table)
return ERR_PTR(-ENOMEM);
err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
if (err)
goto err_rhashtable_init;
erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops,
aregion);
if (IS_ERR(erp_table->objagg)) {
err = PTR_ERR(erp_table->objagg);
goto err_objagg_create;
}
erp_table->erp_core = aregion->atcam->erp_core;
erp_table->ops = &erp_no_mask_ops;
@ -999,7 +1201,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
return erp_table;
err_rhashtable_init:
err_objagg_create:
kfree(erp_table);
return ERR_PTR(err);
}
@ -1008,7 +1210,7 @@ static void
mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
{
WARN_ON(!list_empty(&erp_table->atcam_erps_list));
rhashtable_destroy(&erp_table->erp_ht);
objagg_destroy(erp_table->objagg);
kfree(erp_table);
}

View File

@ -98,8 +98,8 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
char *output)
static void mlxsw_sp1_afk_encode_block(char *output, int block_index,
char *block)
{
unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
char *output_indexed = output + offset;
@ -107,10 +107,19 @@ static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
}
static void mlxsw_sp1_afk_clear_block(char *output, int block_index)
{
unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
char *output_indexed = output + offset;
memset(output_indexed, 0, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
}
const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.blocks = mlxsw_sp1_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
.encode_block = mlxsw_sp1_afk_encode_block,
.clear_block = mlxsw_sp1_afk_clear_block,
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
@ -263,10 +272,9 @@ static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
};
static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
char *output)
static void __mlxsw_sp2_afk_block_value_set(char *output, int block_index,
u64 block_value)
{
u64 block_value = mlxsw_sp2_afk_block_value_get(block);
const struct mlxsw_sp2_afk_block_layout *block_layout;
if (WARN_ON(block_index < 0 ||
@ -278,8 +286,22 @@ static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
&block_layout->item, 0, block_value);
}
static void mlxsw_sp2_afk_encode_block(char *output, int block_index,
char *block)
{
u64 block_value = mlxsw_sp2_afk_block_value_get(block);
__mlxsw_sp2_afk_block_value_set(output, block_index, block_value);
}
static void mlxsw_sp2_afk_clear_block(char *output, int block_index)
{
__mlxsw_sp2_afk_block_value_set(output, block_index, 0);
}
const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.blocks = mlxsw_sp2_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
.encode_block = mlxsw_sp2_afk_encode_block,
.clear_block = mlxsw_sp2_afk_clear_block,
};

View File

@ -154,7 +154,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
* minus delta bits.
*/
u8 erp_id;
};
@ -165,9 +167,15 @@ struct mlxsw_sp_acl_atcam_chunk {
struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
struct {
u16 start;
u8 mask;
u8 value;
} delta_info;
struct mlxsw_sp_acl_ctcam_entry centry;
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
struct mlxsw_sp_acl_erp *erp;
struct mlxsw_sp_acl_erp_mask *erp_mask;
};
static inline struct mlxsw_sp_acl_atcam_region *
@ -209,15 +217,27 @@ int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
struct mlxsw_sp_acl_erp;
struct mlxsw_sp_acl_erp_delta;
bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
struct mlxsw_sp_acl_erp *
mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
const char *mask, bool ctcam);
void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp *erp);
u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta);
u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta);
u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
const char *enc_key);
void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
const char *enc_key);
struct mlxsw_sp_acl_erp_mask;
bool
mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask);
u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask);
const struct mlxsw_sp_acl_erp_delta *
mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask);
struct mlxsw_sp_acl_erp_mask *
mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
const char *mask, bool ctcam);
void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_erp_mask *erp_mask);
int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,

46
include/linux/objagg.h Normal file
View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#ifndef _OBJAGG_H
#define _OBJAGG_H
struct objagg_ops {
size_t obj_size;
void * (*delta_create)(void *priv, void *parent_obj, void *obj);
void (*delta_destroy)(void *priv, void *delta_priv);
void * (*root_create)(void *priv, void *obj);
void (*root_destroy)(void *priv, void *root_priv);
};
struct objagg;
struct objagg_obj;
const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj);
const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj);
const void *objagg_obj_raw(const struct objagg_obj *objagg_obj);
struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj);
void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj);
struct objagg *objagg_create(const struct objagg_ops *ops, void *priv);
void objagg_destroy(struct objagg *objagg);
struct objagg_obj_stats {
unsigned int user_count;
unsigned int delta_user_count; /* includes delta object users */
};
struct objagg_obj_stats_info {
struct objagg_obj_stats stats;
struct objagg_obj *objagg_obj; /* associated object */
bool is_root;
};
struct objagg_stats {
unsigned int stats_info_count;
struct objagg_obj_stats_info stats_info[];
};
const struct objagg_stats *objagg_stats_get(struct objagg *objagg);
void objagg_stats_put(const struct objagg_stats *objagg_stats);
#endif

View File

@ -0,0 +1,228 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM objagg
#if !defined(__TRACE_OBJAGG_H) || defined(TRACE_HEADER_MULTI_READ)
#define __TRACE_OBJAGG_H
#include <linux/tracepoint.h>
struct objagg;
struct objagg_obj;
TRACE_EVENT(objagg_create,
TP_PROTO(const struct objagg *objagg),
TP_ARGS(objagg),
TP_STRUCT__entry(
__field(const void *, objagg)
),
TP_fast_assign(
__entry->objagg = objagg;
),
TP_printk("objagg %p", __entry->objagg)
);
TRACE_EVENT(objagg_destroy,
TP_PROTO(const struct objagg *objagg),
TP_ARGS(objagg),
TP_STRUCT__entry(
__field(const void *, objagg)
),
TP_fast_assign(
__entry->objagg = objagg;
),
TP_printk("objagg %p", __entry->objagg)
);
TRACE_EVENT(objagg_obj_create,
TP_PROTO(const struct objagg *objagg,
const struct objagg_obj *obj),
TP_ARGS(objagg, obj),
TP_STRUCT__entry(
__field(const void *, objagg)
__field(const void *, obj)
),
TP_fast_assign(
__entry->objagg = objagg;
__entry->obj = obj;
),
TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
);
TRACE_EVENT(objagg_obj_destroy,
TP_PROTO(const struct objagg *objagg,
const struct objagg_obj *obj),
TP_ARGS(objagg, obj),
TP_STRUCT__entry(
__field(const void *, objagg)
__field(const void *, obj)
),
TP_fast_assign(
__entry->objagg = objagg;
__entry->obj = obj;
),
TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
);
TRACE_EVENT(objagg_obj_get,
TP_PROTO(const struct objagg *objagg,
const struct objagg_obj *obj,
unsigned int refcount),
TP_ARGS(objagg, obj, refcount),
TP_STRUCT__entry(
__field(const void *, objagg)
__field(const void *, obj)
__field(unsigned int, refcount)
),
TP_fast_assign(
__entry->objagg = objagg;
__entry->obj = obj;
__entry->refcount = refcount;
),
TP_printk("objagg %p, obj %p, refcount %u",
__entry->objagg, __entry->obj, __entry->refcount)
);
TRACE_EVENT(objagg_obj_put,
TP_PROTO(const struct objagg *objagg,
const struct objagg_obj *obj,
unsigned int refcount),
TP_ARGS(objagg, obj, refcount),
TP_STRUCT__entry(
__field(const void *, objagg)
__field(const void *, obj)
__field(unsigned int, refcount)
),
TP_fast_assign(
__entry->objagg = objagg;
__entry->obj = obj;
__entry->refcount = refcount;
),
TP_printk("objagg %p, obj %p, refcount %u",
__entry->objagg, __entry->obj, __entry->refcount)
);
TRACE_EVENT(objagg_obj_parent_assign,
TP_PROTO(const struct objagg *objagg,
const struct objagg_obj *obj,
const struct objagg_obj *parent,
unsigned int parent_refcount),
TP_ARGS(objagg, obj, parent, parent_refcount),
TP_STRUCT__entry(
__field(const void *, objagg)
__field(const void *, obj)
__field(const void *, parent)
__field(unsigned int, parent_refcount)
),
TP_fast_assign(
__entry->objagg = objagg;
__entry->obj = obj;
__entry->parent = parent;
__entry->parent_refcount = parent_refcount;
),
TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
__entry->objagg, __entry->obj,
__entry->parent, __entry->parent_refcount)
);
TRACE_EVENT(objagg_obj_parent_unassign,
TP_PROTO(const struct objagg *objagg,
const struct objagg_obj *obj,
const struct objagg_obj *parent,
unsigned int parent_refcount),
TP_ARGS(objagg, obj, parent, parent_refcount),
TP_STRUCT__entry(
__field(const void *, objagg)
__field(const void *, obj)
__field(const void *, parent)
__field(unsigned int, parent_refcount)
),
TP_fast_assign(
__entry->objagg = objagg;
__entry->obj = obj;
__entry->parent = parent;
__entry->parent_refcount = parent_refcount;
),
TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
__entry->objagg, __entry->obj,
__entry->parent, __entry->parent_refcount)
);
TRACE_EVENT(objagg_obj_root_create,
TP_PROTO(const struct objagg *objagg,
const struct objagg_obj *obj),
TP_ARGS(objagg, obj),
TP_STRUCT__entry(
__field(const void *, objagg)
__field(const void *, obj)
),
TP_fast_assign(
__entry->objagg = objagg;
__entry->obj = obj;
),
TP_printk("objagg %p, obj %p",
__entry->objagg, __entry->obj)
);
TRACE_EVENT(objagg_obj_root_destroy,
TP_PROTO(const struct objagg *objagg,
const struct objagg_obj *obj),
TP_ARGS(objagg, obj),
TP_STRUCT__entry(
__field(const void *, objagg)
__field(const void *, obj)
),
TP_fast_assign(
__entry->objagg = objagg;
__entry->obj = obj;
),
TP_printk("objagg %p, obj %p",
__entry->objagg, __entry->obj)
);
#endif /* __TRACE_OBJAGG_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -624,3 +624,6 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
config OBJAGG
tristate "objagg" if COMPILE_TEST

View File

@ -1976,6 +1976,16 @@ config TEST_MEMCAT_P
If unsure, say N.
config TEST_OBJAGG
tristate "Perform selftest on object aggreration manager"
default n
depends on OBJAGG
help
Enable this option to test object aggregation manager on boot
(or module load).
If unsure, say N.
endif # RUNTIME_TESTING_MENU
config MEMTEST

View File

@ -75,6 +75,7 @@ obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@ -274,3 +275,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
obj-$(CONFIG_OBJAGG) += objagg.o

501
lib/objagg.c Normal file
View File

@ -0,0 +1,501 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/rhashtable.h>
#include <linux/list.h>
#include <linux/sort.h>
#include <linux/objagg.h>
#define CREATE_TRACE_POINTS
#include <trace/events/objagg.h>
struct objagg {
const struct objagg_ops *ops;
void *priv;
struct rhashtable obj_ht;
struct rhashtable_params ht_params;
struct list_head obj_list;
unsigned int obj_count;
};
struct objagg_obj {
struct rhash_head ht_node; /* member of objagg->obj_ht */
struct list_head list; /* member of objagg->obj_list */
struct objagg_obj *parent; /* if the object is nested, this
* holds pointer to parent, otherwise NULL
*/
union {
void *delta_priv; /* user delta private */
void *root_priv; /* user root private */
};
unsigned int refcount; /* counts number of users of this object
* including nested objects
*/
struct objagg_obj_stats stats;
unsigned long obj[0];
};
static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
{
return ++objagg_obj->refcount;
}
static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj)
{
return --objagg_obj->refcount;
}
static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj)
{
objagg_obj->stats.user_count++;
objagg_obj->stats.delta_user_count++;
if (objagg_obj->parent)
objagg_obj->parent->stats.delta_user_count++;
}
static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj)
{
objagg_obj->stats.user_count--;
objagg_obj->stats.delta_user_count--;
if (objagg_obj->parent)
objagg_obj->parent->stats.delta_user_count--;
}
static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj)
{
/* Nesting is not supported, so we can use ->parent
* to figure out if the object is root.
*/
return !objagg_obj->parent;
}
/**
* objagg_obj_root_priv - obtains root private for an object
* @objagg_obj: objagg object instance
*
* Note: all locking must be provided by the caller.
*
* Either the object is root itself when the private is returned
* directly, or the parent is root and its private is returned
* instead.
*
* Returns a user private root pointer.
*/
const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj)
{
if (objagg_obj_is_root(objagg_obj))
return objagg_obj->root_priv;
WARN_ON(!objagg_obj_is_root(objagg_obj->parent));
return objagg_obj->parent->root_priv;
}
EXPORT_SYMBOL(objagg_obj_root_priv);
/**
* objagg_obj_delta_priv - obtains delta private for an object
* @objagg_obj: objagg object instance
*
* Note: all locking must be provided by the caller.
*
* Returns user private delta pointer or NULL in case the passed
* object is root.
*/
const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj)
{
if (objagg_obj_is_root(objagg_obj))
return NULL;
return objagg_obj->delta_priv;
}
EXPORT_SYMBOL(objagg_obj_delta_priv);
/**
* objagg_obj_raw - obtains object user private pointer
* @objagg_obj: objagg object instance
*
* Note: all locking must be provided by the caller.
*
* Returns user private pointer as was passed to objagg_obj_get() by "obj" arg.
*/
const void *objagg_obj_raw(const struct objagg_obj *objagg_obj)
{
return objagg_obj->obj;
}
EXPORT_SYMBOL(objagg_obj_raw);
static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj)
{
return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params);
}
static int objagg_obj_parent_assign(struct objagg *objagg,
struct objagg_obj *objagg_obj,
struct objagg_obj *parent)
{
void *delta_priv;
delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
objagg_obj->obj);
if (IS_ERR(delta_priv))
return PTR_ERR(delta_priv);
/* User returned a delta private, that means that
* our object can be aggregated into the parent.
*/
objagg_obj->parent = parent;
objagg_obj->delta_priv = delta_priv;
objagg_obj_ref_inc(objagg_obj->parent);
trace_objagg_obj_parent_assign(objagg, objagg_obj,
parent,
parent->refcount);
return 0;
}
static int objagg_obj_parent_lookup_assign(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
struct objagg_obj *objagg_obj_cur;
int err;
list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) {
/* Nesting is not supported. In case the object
* is not root, it cannot be assigned as parent.
*/
if (!objagg_obj_is_root(objagg_obj_cur))
continue;
err = objagg_obj_parent_assign(objagg, objagg_obj,
objagg_obj_cur);
if (!err)
return 0;
}
return -ENOENT;
}
static void __objagg_obj_put(struct objagg *objagg,
struct objagg_obj *objagg_obj);
static void objagg_obj_parent_unassign(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
trace_objagg_obj_parent_unassign(objagg, objagg_obj,
objagg_obj->parent,
objagg_obj->parent->refcount);
objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv);
__objagg_obj_put(objagg, objagg_obj->parent);
}
static int objagg_obj_root_create(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
objagg_obj->obj);
if (IS_ERR(objagg_obj->root_priv))
return PTR_ERR(objagg_obj->root_priv);
trace_objagg_obj_root_create(objagg, objagg_obj);
return 0;
}
static void objagg_obj_root_destroy(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
trace_objagg_obj_root_destroy(objagg, objagg_obj);
objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv);
}
static int objagg_obj_init(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
int err;
/* Try to find if the object can be aggregated under an existing one. */
err = objagg_obj_parent_lookup_assign(objagg, objagg_obj);
if (!err)
return 0;
/* If aggregation is not possible, make the object a root. */
return objagg_obj_root_create(objagg, objagg_obj);
}
static void objagg_obj_fini(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
if (!objagg_obj_is_root(objagg_obj))
objagg_obj_parent_unassign(objagg, objagg_obj);
else
objagg_obj_root_destroy(objagg, objagg_obj);
}
static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj)
{
struct objagg_obj *objagg_obj;
int err;
objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size,
GFP_KERNEL);
if (!objagg_obj)
return ERR_PTR(-ENOMEM);
objagg_obj_ref_inc(objagg_obj);
memcpy(objagg_obj->obj, obj, objagg->ops->obj_size);
err = objagg_obj_init(objagg, objagg_obj);
if (err)
goto err_obj_init;
err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node,
objagg->ht_params);
if (err)
goto err_ht_insert;
list_add(&objagg_obj->list, &objagg->obj_list);
objagg->obj_count++;
trace_objagg_obj_create(objagg, objagg_obj);
return objagg_obj;
err_ht_insert:
objagg_obj_fini(objagg, objagg_obj);
err_obj_init:
kfree(objagg_obj);
return ERR_PTR(err);
}
static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
{
struct objagg_obj *objagg_obj;
/* First, try to find the object exactly as user passed it,
* perhaps it is already in use.
*/
objagg_obj = objagg_obj_lookup(objagg, obj);
if (objagg_obj) {
objagg_obj_ref_inc(objagg_obj);
return objagg_obj;
}
return objagg_obj_create(objagg, obj);
}
/**
* objagg_obj_get - gets an object within objagg instance
* @objagg: objagg instance
* @obj: user-specific private object pointer
*
* Note: all locking must be provided by the caller.
*
* Size of the "obj" memory is specified in "objagg->ops".
*
* There are 3 main options this function wraps:
* 1) The object according to "obj" already exist. In that case
* the reference counter is incrementes and the object is returned.
* 2) The object does not exist, but it can be aggregated within
* another object. In that case, user ops->delta_create() is called
* to obtain delta data and a new object is created with returned
* user-delta private pointer.
* 3) The object does not exist and cannot be aggregated into
* any of the existing objects. In that case, user ops->root_create()
* is called to create the root and a new object is created with
* returned user-root private pointer.
*
* Returns a pointer to objagg object instance in case of success,
* otherwise it returns pointer error using ERR_PTR macro.
*/
struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj)
{
struct objagg_obj *objagg_obj;
objagg_obj = __objagg_obj_get(objagg, obj);
if (IS_ERR(objagg_obj))
return objagg_obj;
objagg_obj_stats_inc(objagg_obj);
trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount);
return objagg_obj;
}
EXPORT_SYMBOL(objagg_obj_get);
static void objagg_obj_destroy(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
trace_objagg_obj_destroy(objagg, objagg_obj);
--objagg->obj_count;
list_del(&objagg_obj->list);
rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node,
objagg->ht_params);
objagg_obj_fini(objagg, objagg_obj);
kfree(objagg_obj);
}
static void __objagg_obj_put(struct objagg *objagg,
struct objagg_obj *objagg_obj)
{
if (!objagg_obj_ref_dec(objagg_obj))
objagg_obj_destroy(objagg, objagg_obj);
}
/**
* objagg_obj_put - puts an object within objagg instance
* @objagg: objagg instance
* @objagg_obj: objagg object instance
*
* Note: all locking must be provided by the caller.
*
* Symmetric to objagg_obj_get().
*/
void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj)
{
trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount);
objagg_obj_stats_dec(objagg_obj);
__objagg_obj_put(objagg, objagg_obj);
}
EXPORT_SYMBOL(objagg_obj_put);
/**
* objagg_create - creates a new objagg instance
* @ops: user-specific callbacks
* @priv: pointer to a private data passed to the ops
*
* Note: all locking must be provided by the caller.
*
* The purpose of the library is to provide an infrastructure to
* aggregate user-specified objects. Library does not care about the type
* of the object. User fills-up ops which take care of the specific
* user object manipulation.
*
* As a very stupid example, consider integer numbers. For example
* number 8 as a root object. That can aggregate number 9 with delta 1,
* number 10 with delta 2, etc. This example is implemented as
* a part of a testing module in test_objagg.c file.
*
* Each objagg instance contains multiple trees. Each tree node is
* represented by "an object". In the current implementation there can be
* only roots and leafs nodes. Leaf nodes are called deltas.
* But in general, this can be easily extended for intermediate nodes.
* In that extension, a delta would be associated with all non-root
* nodes.
*
* Returns a pointer to newly created objagg instance in case of success,
* otherwise it returns pointer error using ERR_PTR macro.
*/
struct objagg *objagg_create(const struct objagg_ops *ops, void *priv)
{
struct objagg *objagg;
int err;
if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy ||
!ops->delta_create || !ops->delta_destroy))
return ERR_PTR(-EINVAL);
objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
if (!objagg)
return ERR_PTR(-ENOMEM);
objagg->ops = ops;
objagg->priv = priv;
INIT_LIST_HEAD(&objagg->obj_list);
objagg->ht_params.key_len = ops->obj_size;
objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj);
objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node);
err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params);
if (err)
goto err_rhashtable_init;
trace_objagg_create(objagg);
return objagg;
err_rhashtable_init:
kfree(objagg);
return ERR_PTR(err);
}
EXPORT_SYMBOL(objagg_create);
/**
* objagg_destroy - destroys a new objagg instance
* @objagg: objagg instance
*
* Note: all locking must be provided by the caller.
*/
void objagg_destroy(struct objagg *objagg)
{
trace_objagg_destroy(objagg);
WARN_ON(!list_empty(&objagg->obj_list));
rhashtable_destroy(&objagg->obj_ht);
kfree(objagg);
}
EXPORT_SYMBOL(objagg_destroy);
static int objagg_stats_info_sort_cmp_func(const void *a, const void *b)
{
const struct objagg_obj_stats_info *stats_info1 = a;
const struct objagg_obj_stats_info *stats_info2 = b;
if (stats_info1->is_root != stats_info2->is_root)
return stats_info2->is_root - stats_info1->is_root;
if (stats_info1->stats.delta_user_count !=
stats_info2->stats.delta_user_count)
return stats_info2->stats.delta_user_count -
stats_info1->stats.delta_user_count;
return stats_info2->stats.user_count - stats_info1->stats.user_count;
}
/**
* objagg_stats_get - obtains stats of the objagg instance
* @objagg: objagg instance
*
* Note: all locking must be provided by the caller.
*
* The returned structure contains statistics of all object
* currently in use, ordered by following rules:
* 1) Root objects are always on lower indexes than the rest.
* 2) Objects with higher delta user count are always on lower
* indexes.
* 3) In case more objects have the same delta user count,
* the objects are ordered by user count.
*
* Returns a pointer to stats instance in case of success,
* otherwise it returns pointer error using ERR_PTR macro.
*/
const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
{
struct objagg_stats *objagg_stats;
struct objagg_obj *objagg_obj;
size_t alloc_size;
int i;
alloc_size = sizeof(*objagg_stats) +
sizeof(objagg_stats->stats_info[0]) * objagg->obj_count;
objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
if (!objagg_stats)
return ERR_PTR(-ENOMEM);
i = 0;
list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats,
sizeof(objagg_stats->stats_info[0].stats));
objagg_stats->stats_info[i].objagg_obj = objagg_obj;
objagg_stats->stats_info[i].is_root =
objagg_obj_is_root(objagg_obj);
i++;
}
objagg_stats->stats_info_count = i;
sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
sizeof(struct objagg_obj_stats_info),
objagg_stats_info_sort_cmp_func, NULL);
return objagg_stats;
}
EXPORT_SYMBOL(objagg_stats_get);
/**
* objagg_stats_puts - puts stats of the objagg instance
* @objagg_stats: objagg instance stats
*
* Note: all locking must be provided by the caller.
*/
void objagg_stats_put(const struct objagg_stats *objagg_stats)
{
kfree(objagg_stats);
}
EXPORT_SYMBOL(objagg_stats_put);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Object aggregation manager");

835
lib/test_objagg.c Normal file
View File

@ -0,0 +1,835 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/objagg.h>
struct tokey {
unsigned int id;
};
#define NUM_KEYS 32
static int key_id_index(unsigned int key_id)
{
if (key_id >= NUM_KEYS) {
WARN_ON(1);
return 0;
}
return key_id;
}
#define BUF_LEN 128
struct world {
unsigned int root_count;
unsigned int delta_count;
char next_root_buf[BUF_LEN];
struct objagg_obj *objagg_objs[NUM_KEYS];
unsigned int key_refs[NUM_KEYS];
};
struct root {
struct tokey key;
char buf[BUF_LEN];
};
struct delta {
unsigned int key_id_diff;
};
static struct objagg_obj *world_obj_get(struct world *world,
struct objagg *objagg,
unsigned int key_id)
{
struct objagg_obj *objagg_obj;
struct tokey key;
int err;
key.id = key_id;
objagg_obj = objagg_obj_get(objagg, &key);
if (IS_ERR(objagg_obj)) {
pr_err("Key %u: Failed to get object.\n", key_id);
return objagg_obj;
}
if (!world->key_refs[key_id_index(key_id)]) {
world->objagg_objs[key_id_index(key_id)] = objagg_obj;
} else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
pr_err("Key %u: God another object for the same key.\n",
key_id);
err = -EINVAL;
goto err_key_id_check;
}
world->key_refs[key_id_index(key_id)]++;
return objagg_obj;
err_key_id_check:
objagg_obj_put(objagg, objagg_obj);
return ERR_PTR(err);
}
static void world_obj_put(struct world *world, struct objagg *objagg,
unsigned int key_id)
{
struct objagg_obj *objagg_obj;
if (!world->key_refs[key_id_index(key_id)])
return;
objagg_obj = world->objagg_objs[key_id_index(key_id)];
objagg_obj_put(objagg, objagg_obj);
world->key_refs[key_id_index(key_id)]--;
}
#define MAX_KEY_ID_DIFF 5
static void *delta_create(void *priv, void *parent_obj, void *obj)
{
struct tokey *parent_key = parent_obj;
struct world *world = priv;
struct tokey *key = obj;
int diff = key->id - parent_key->id;
struct delta *delta;
if (diff < 0 || diff > MAX_KEY_ID_DIFF)
return ERR_PTR(-EINVAL);
delta = kzalloc(sizeof(*delta), GFP_KERNEL);
if (!delta)
return ERR_PTR(-ENOMEM);
delta->key_id_diff = diff;
world->delta_count++;
return delta;
}
static void delta_destroy(void *priv, void *delta_priv)
{
struct delta *delta = delta_priv;
struct world *world = priv;
world->delta_count--;
kfree(delta);
}
static void *root_create(void *priv, void *obj)
{
struct world *world = priv;
struct tokey *key = obj;
struct root *root;
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
memcpy(&root->key, key, sizeof(root->key));
memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
world->root_count++;
return root;
}
static void root_destroy(void *priv, void *root_priv)
{
struct root *root = root_priv;
struct world *world = priv;
world->root_count--;
kfree(root);
}
static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
unsigned int key_id, bool should_create_root)
{
unsigned int orig_root_count = world->root_count;
struct objagg_obj *objagg_obj;
const struct root *root;
int err;
if (should_create_root)
prandom_bytes(world->next_root_buf,
sizeof(world->next_root_buf));
objagg_obj = world_obj_get(world, objagg, key_id);
if (IS_ERR(objagg_obj)) {
pr_err("Key %u: Failed to get object.\n", key_id);
return PTR_ERR(objagg_obj);
}
if (should_create_root) {
if (world->root_count != orig_root_count + 1) {
pr_err("Key %u: Root was not created\n", key_id);
err = -EINVAL;
goto err_check_root_count;
}
} else {
if (world->root_count != orig_root_count) {
pr_err("Key %u: Root was incorrectly created\n",
key_id);
err = -EINVAL;
goto err_check_root_count;
}
}
root = objagg_obj_root_priv(objagg_obj);
if (root->key.id != key_id) {
pr_err("Key %u: Root has unexpected key id\n", key_id);
err = -EINVAL;
goto err_check_key_id;
}
if (should_create_root &&
memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
pr_err("Key %u: Buffer does not match the expected content\n",
key_id);
err = -EINVAL;
goto err_check_buf;
}
return 0;
err_check_buf:
err_check_key_id:
err_check_root_count:
objagg_obj_put(objagg, objagg_obj);
return err;
}
static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
unsigned int key_id, bool should_destroy_root)
{
unsigned int orig_root_count = world->root_count;
world_obj_put(world, objagg, key_id);
if (should_destroy_root) {
if (world->root_count != orig_root_count - 1) {
pr_err("Key %u: Root was not destroyed\n", key_id);
return -EINVAL;
}
} else {
if (world->root_count != orig_root_count) {
pr_err("Key %u: Root was incorrectly destroyed\n",
key_id);
return -EINVAL;
}
}
return 0;
}
static int check_stats_zero(struct objagg *objagg)
{
const struct objagg_stats *stats;
int err = 0;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return PTR_ERR(stats);
if (stats->stats_info_count != 0) {
pr_err("Stats: Object count is not zero while it should be\n");
err = -EINVAL;
}
objagg_stats_put(stats);
return err;
}
static int check_stats_nodelta(struct objagg *objagg)
{
const struct objagg_stats *stats;
int i;
int err;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return PTR_ERR(stats);
if (stats->stats_info_count != NUM_KEYS) {
pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
NUM_KEYS, stats->stats_info_count);
err = -EINVAL;
goto stats_put;
}
for (i = 0; i < stats->stats_info_count; i++) {
if (stats->stats_info[i].stats.user_count != 2) {
pr_err("Stats: incorrect user count\n");
err = -EINVAL;
goto stats_put;
}
if (stats->stats_info[i].stats.delta_user_count != 2) {
pr_err("Stats: incorrect delta user count\n");
err = -EINVAL;
goto stats_put;
}
}
err = 0;
stats_put:
objagg_stats_put(stats);
return err;
}
static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
{
return ERR_PTR(-EOPNOTSUPP);
}
static void delta_destroy_dummy(void *priv, void *delta_priv)
{
}
static const struct objagg_ops nodelta_ops = {
.obj_size = sizeof(struct tokey),
.delta_create = delta_create_dummy,
.delta_destroy = delta_destroy_dummy,
.root_create = root_create,
.root_destroy = root_destroy,
};
static int test_nodelta(void)
{
struct world world = {};
struct objagg *objagg;
int i;
int err;
objagg = objagg_create(&nodelta_ops, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
err = check_stats_zero(objagg);
if (err)
goto err_stats_first_zero;
/* First round of gets, the root objects should be created */
for (i = 0; i < NUM_KEYS; i++) {
err = test_nodelta_obj_get(&world, objagg, i, true);
if (err)
goto err_obj_first_get;
}
/* Do the second round of gets, all roots are already created,
* make sure that no new root is created
*/
for (i = 0; i < NUM_KEYS; i++) {
err = test_nodelta_obj_get(&world, objagg, i, false);
if (err)
goto err_obj_second_get;
}
err = check_stats_nodelta(objagg);
if (err)
goto err_stats_nodelta;
for (i = NUM_KEYS - 1; i >= 0; i--) {
err = test_nodelta_obj_put(&world, objagg, i, false);
if (err)
goto err_obj_first_put;
}
for (i = NUM_KEYS - 1; i >= 0; i--) {
err = test_nodelta_obj_put(&world, objagg, i, true);
if (err)
goto err_obj_second_put;
}
err = check_stats_zero(objagg);
if (err)
goto err_stats_second_zero;
objagg_destroy(objagg);
return 0;
err_stats_nodelta:
err_obj_first_put:
err_obj_second_get:
for (i--; i >= 0; i--)
world_obj_put(&world, objagg, i);
i = NUM_KEYS;
err_obj_first_get:
err_obj_second_put:
for (i--; i >= 0; i--)
world_obj_put(&world, objagg, i);
err_stats_first_zero:
err_stats_second_zero:
objagg_destroy(objagg);
return err;
}
static const struct objagg_ops delta_ops = {
.obj_size = sizeof(struct tokey),
.delta_create = delta_create,
.delta_destroy = delta_destroy,
.root_create = root_create,
.root_destroy = root_destroy,
};
enum action {
ACTION_GET,
ACTION_PUT,
};
enum expect_delta {
EXPECT_DELTA_SAME,
EXPECT_DELTA_INC,
EXPECT_DELTA_DEC,
};
enum expect_root {
EXPECT_ROOT_SAME,
EXPECT_ROOT_INC,
EXPECT_ROOT_DEC,
};
struct expect_stats_info {
struct objagg_obj_stats stats;
bool is_root;
unsigned int key_id;
};
struct expect_stats {
unsigned int info_count;
struct expect_stats_info info[NUM_KEYS];
};
struct action_item {
unsigned int key_id;
enum action action;
enum expect_delta expect_delta;
enum expect_root expect_root;
struct expect_stats expect_stats;
};
#define EXPECT_STATS(count, ...) \
{ \
.info_count = count, \
.info = { __VA_ARGS__ } \
}
#define ROOT(key_id, user_count, delta_user_count) \
{{user_count, delta_user_count}, true, key_id}
#define DELTA(key_id, user_count) \
{{user_count, user_count}, false, key_id}
static const struct action_item action_items[] = {
{
1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(1, ROOT(1, 1, 1)),
}, /* r: 1 d: */
{
7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
}, /* r: 1, 7 d: */
{
3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
DELTA(3, 1)),
}, /* r: 1, 7 d: 3^1 */
{
5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
DELTA(3, 1), DELTA(5, 1)),
}, /* r: 1, 7 d: 3^1, 5^1 */
{
3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 7 d: 3^1, 3^1, 5^1 */
{
1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */
{
30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */
{
8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */
{
8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */
{
3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */
{
3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */
{
1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */
{
1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 7, 30 d: 5^1, 8^7, 8^7 */
{
5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2)),
}, /* r: 7, 30 d: 8^7, 8^7 */
{
5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
DELTA(8, 2)),
}, /* r: 7, 30, 5 d: 8^7, 8^7 */
{
6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
{
8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 3), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 1), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 6^5 */
{
8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 7, 30, 5 d: 6^5, 8^5 */
{
7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 30, 5 d: 6^5, 8^5 */
{
30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
EXPECT_STATS(3, ROOT(5, 1, 3),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 5 d: 6^5, 8^5 */
{
5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(3, ROOT(5, 0, 2),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: d: 6^5, 8^5 */
{
6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(2, ROOT(5, 0, 1),
DELTA(8, 1)),
}, /* r: d: 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
EXPECT_STATS(0, ),
}, /* r: d: */
};
static int check_expect(struct world *world,
const struct action_item *action_item,
unsigned int orig_delta_count,
unsigned int orig_root_count)
{
unsigned int key_id = action_item->key_id;
switch (action_item->expect_delta) {
case EXPECT_DELTA_SAME:
if (orig_delta_count != world->delta_count) {
pr_err("Key %u: Delta count changed while expected to remain the same.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_DELTA_INC:
if (WARN_ON(action_item->action == ACTION_PUT))
return -EINVAL;
if (orig_delta_count + 1 != world->delta_count) {
pr_err("Key %u: Delta count was not incremented.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_DELTA_DEC:
if (WARN_ON(action_item->action == ACTION_GET))
return -EINVAL;
if (orig_delta_count - 1 != world->delta_count) {
pr_err("Key %u: Delta count was not decremented.\n",
key_id);
return -EINVAL;
}
break;
}
switch (action_item->expect_root) {
case EXPECT_ROOT_SAME:
if (orig_root_count != world->root_count) {
pr_err("Key %u: Root count changed while expected to remain the same.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_ROOT_INC:
if (WARN_ON(action_item->action == ACTION_PUT))
return -EINVAL;
if (orig_root_count + 1 != world->root_count) {
pr_err("Key %u: Root count was not incremented.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_ROOT_DEC:
if (WARN_ON(action_item->action == ACTION_GET))
return -EINVAL;
if (orig_root_count - 1 != world->root_count) {
pr_err("Key %u: Root count was not decremented.\n",
key_id);
return -EINVAL;
}
}
return 0;
}
static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
{
const struct tokey *root_key;
const struct delta *delta;
unsigned int key_id;
root_key = objagg_obj_root_priv(objagg_obj);
key_id = root_key->id;
delta = objagg_obj_delta_priv(objagg_obj);
if (delta)
key_id += delta->key_id_diff;
return key_id;
}
static int
check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
const struct expect_stats_info *expect_stats_info,
const char **errmsg)
{
if (stats_info->is_root != expect_stats_info->is_root) {
if (errmsg)
*errmsg = "Incorrect root/delta indication";
return -EINVAL;
}
if (stats_info->stats.user_count !=
expect_stats_info->stats.user_count) {
if (errmsg)
*errmsg = "Incorrect user count";
return -EINVAL;
}
if (stats_info->stats.delta_user_count !=
expect_stats_info->stats.delta_user_count) {
if (errmsg)
*errmsg = "Incorrect delta user count";
return -EINVAL;
}
return 0;
}
static int
check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
const struct expect_stats_info *expect_stats_info,
const char **errmsg)
{
if (obj_to_key_id(stats_info->objagg_obj) !=
expect_stats_info->key_id) {
if (errmsg)
*errmsg = "incorrect key id";
return -EINVAL;
}
return 0;
}
static int check_expect_stats_neigh(const struct objagg_stats *stats,
const struct expect_stats *expect_stats,
int pos)
{
int i;
int err;
for (i = pos - 1; i >= 0; i--) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (err)
break;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (!err)
return 0;
}
for (i = pos + 1; i < stats->stats_info_count; i++) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (err)
break;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (!err)
return 0;
}
return -EINVAL;
}
static int __check_expect_stats(const struct objagg_stats *stats,
const struct expect_stats *expect_stats,
const char **errmsg)
{
int i;
int err;
if (stats->stats_info_count != expect_stats->info_count) {
*errmsg = "Unexpected object count";
return -EINVAL;
}
for (i = 0; i < stats->stats_info_count; i++) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[i], errmsg);
if (err)
return err;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[i], errmsg);
if (err) {
/* It is possible that one of the neighbor stats with
* same numbers have the correct key id, so check it
*/
err = check_expect_stats_neigh(stats, expect_stats, i);
if (err)
return err;
}
}
return 0;
}
static int check_expect_stats(struct objagg *objagg,
const struct expect_stats *expect_stats,
const char **errmsg)
{
const struct objagg_stats *stats;
int err;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return PTR_ERR(stats);
err = __check_expect_stats(stats, expect_stats, errmsg);
objagg_stats_put(stats);
return err;
}
static int test_delta_action_item(struct world *world,
struct objagg *objagg,
const struct action_item *action_item,
bool inverse)
{
unsigned int orig_delta_count = world->delta_count;
unsigned int orig_root_count = world->root_count;
unsigned int key_id = action_item->key_id;
enum action action = action_item->action;
struct objagg_obj *objagg_obj;
const char *errmsg;
int err;
if (inverse)
action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
switch (action) {
case ACTION_GET:
objagg_obj = world_obj_get(world, objagg, key_id);
if (IS_ERR(objagg_obj))
return PTR_ERR(objagg_obj);
break;
case ACTION_PUT:
world_obj_put(world, objagg, key_id);
break;
}
if (inverse)
return 0;
err = check_expect(world, action_item,
orig_delta_count, orig_root_count);
if (err)
goto errout;
err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
if (err) {
pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
goto errout;
}
return 0;
errout:
/* This can only happen when action is not inversed.
* So in case of an error, cleanup by doing inverse action.
*/
test_delta_action_item(world, objagg, action_item, true);
return err;
}
static int test_delta(void)
{
struct world world = {};
struct objagg *objagg;
int i;
int err;
objagg = objagg_create(&delta_ops, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
for (i = 0; i < ARRAY_SIZE(action_items); i++) {
err = test_delta_action_item(&world, objagg,
&action_items[i], false);
if (err)
goto err_do_action_item;
}
objagg_destroy(objagg);
return 0;
err_do_action_item:
for (i--; i >= 0; i--)
test_delta_action_item(&world, objagg, &action_items[i], true);
objagg_destroy(objagg);
return err;
}
static int __init test_objagg_init(void)
{
int err;
err = test_nodelta();
if (err)
return err;
return test_delta();
}
static void __exit test_objagg_exit(void)
{
}
module_init(test_objagg_init);
module_exit(test_objagg_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Test module for objagg");

View File

@ -8,7 +8,7 @@
lib_dir=$(dirname $0)/../../../../net/forwarding
ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
multiple_masks_test ctcam_edge_cases_test"
multiple_masks_test ctcam_edge_cases_test delta_simple_test"
NUM_NETIFS=2
source $lib_dir/tc_common.sh
source $lib_dir/lib.sh
@ -142,7 +142,7 @@ two_masks_test()
tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
$tcflags dst_ip 192.0.2.2 action drop
tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
$tcflags dst_ip 192.0.0.0/16 action drop
$tcflags dst_ip 192.0.0.0/8 action drop
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
@ -235,7 +235,7 @@ ctcam_two_atcam_masks_test()
$tcflags dst_ip 192.0.2.2 action drop
# Filter goes into A-TCAM
tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
$tcflags dst_ip 192.0.2.0/24 action drop
$tcflags dst_ip 192.0.0.0/16 action drop
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
@ -324,6 +324,86 @@ ctcam_edge_cases_test()
ctcam_no_atcam_masks_test
}
tp_record()
{
local tracepoint=$1
local cmd=$2
perf record -q -e $tracepoint $cmd
return $?
}
tp_check_hits()
{
local tracepoint=$1
local count=$2
perf_output=`perf script -F trace:event,trace`
hits=`echo $perf_output | grep "$tracepoint:" | wc -l`
if [[ "$count" -ne "$hits" ]]; then
return 1
fi
return 0
}
delta_simple_test()
{
# The first filter will create eRP, the second filter will fit into
# the first eRP with delta. Remove the first rule then and check that
# the eRP stays (referenced by the second filter).
RET=0
if [[ "$tcflags" != "skip_sw" ]]; then
return 0;
fi
tp_record "objagg:*" "tc filter add dev $h2 ingress protocol ip \
pref 1 handle 101 flower $tcflags dst_ip 192.0.0.0/24 \
action drop"
tp_check_hits "objagg:objagg_obj_root_create" 1
check_err $? "eRP was not created"
tp_record "objagg:*" "tc filter add dev $h2 ingress protocol ip \
pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
action drop"
tp_check_hits "objagg:objagg_obj_root_create" 0
check_err $? "eRP was incorrectly created"
tp_check_hits "objagg:objagg_obj_parent_assign" 1
check_err $? "delta was not created"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
tc_check_packets "dev $h2 ingress" 101 1
check_fail $? "Matched a wrong filter"
tc_check_packets "dev $h2 ingress" 102 1
check_err $? "Did not match on correct filter"
tp_record "objagg:*" "tc filter del dev $h2 ingress protocol ip \
pref 1 handle 101 flower"
tp_check_hits "objagg:objagg_obj_root_destroy" 0
check_err $? "eRP was incorrectly destroyed"
tp_check_hits "objagg:objagg_obj_parent_unassign" 0
check_err $? "delta was incorrectly destroyed"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
tc_check_packets "dev $h2 ingress" 102 2
check_err $? "Did not match on correct filter after the first was removed"
tp_record "objagg:*" "tc filter del dev $h2 ingress protocol ip \
pref 2 handle 102 flower"
tp_check_hits "objagg:objagg_obj_parent_unassign" 1
check_err $? "delta was not destroyed"
tp_check_hits "objagg:objagg_obj_root_destroy" 1
check_err $? "eRP was not destroyed"
log_test "delta simple test ($tcflags)"
}
setup_prepare()
{
h1=${NETIFS[p1]}