mlxsw: Add support for more than 256 ports in SBSR register

Add 'port_page' field in SBSR to be able to query occupancy of more than
256 ports. The field determines the range of the ports specified in the
'ingress_port_mask' and 'egress_port_mask' bit masks:
>From '256 * port_page' to '256 * port_page + 255'.

For each local port, the appropriate port page is used. A query is never
performed for a port range that spans multiple port pages.

Signed-off-by: Amit Cohen <amcohen@nvidia.com>
Reviewed-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Amit Cohen 2021-12-01 10:12:38 +02:00 committed by David S. Miller
parent c934757d90
commit f8538aec88
2 changed files with 31 additions and 5 deletions

View file

@ -12255,6 +12255,16 @@ MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN);
*/
MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
#define MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE 256
/* reg_sbsr_port_page
* Determines the range of the ports specified in the 'ingress_port_mask'
* and 'egress_port_mask' bit masks.
* {ingress,egress}_port_mask[x] is (256 * port_page) + x
* Access: Index
*/
MLXSW_ITEM32(reg, sbsr, port_page, 0x04, 0, 4);
/* reg_sbsr_ingress_port_mask
* Bit vector for all ingress network ports.
* Indicates which of the ports (for which the relevant bit is set)

View file

@ -1582,13 +1582,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
LIST_HEAD(bulk_list);
char *sbsr_pl;
u8 masked_count;
u16 local_port_1;
u16 local_port;
int i;
int err;
int err2;
@ -1602,6 +1601,10 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
local_port_1 = local_port;
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
@ -1609,6 +1612,10 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
if (local_port > last_local_port) {
current_page++;
goto do_query;
}
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
@ -1651,10 +1658,11 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u16 local_port, last_local_port;
LIST_HEAD(bulk_list);
char *sbsr_pl;
unsigned int masked_count;
u16 local_port;
u8 current_page = 0;
char *sbsr_pl;
int i;
int err;
int err2;
@ -1667,6 +1675,10 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
@ -1674,6 +1686,10 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
if (local_port > last_local_port) {
current_page++;
goto do_query;
}
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,