bnxt_en: Fill HW RSS table from the RSS logical indirection table.

Now that we have the logical table, we can fill the HW RSS table
using the logical table's entries and converting them to the HW
specific format.  Re-initialize the logical table to standard
distribution if the number of RX rings changes during ring reservation.

v4: Use bnxt_get_rxfh_indir_size() to get the RSS table size.

v2: Use ALIGN() to roundup the RSS table size.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Michael Chan 2020-07-08 07:53:57 -04:00 committed by David S. Miller
parent f9f6a3fbb5
commit f33a305d09
1 changed files with 52 additions and 36 deletions

View File

@ -4878,9 +4878,51 @@ int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
return 1;
}
static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
u16 i, j;
/* Fill the RSS indirection table with ring group ids */
for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
if (!no_rss)
j = bp->rss_indir_tbl[i];
vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
}
}
static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
struct bnxt_vnic_info *vnic)
{
__le16 *ring_tbl = vnic->rss_table;
struct bnxt_rx_ring_info *rxr;
u16 tbl_size, i;
tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
for (i = 0; i < tbl_size; i++) {
u16 ring_id, j;
j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
ring_id = rxr->rx_ring_struct.fw_ring_id;
*ring_tbl++ = cpu_to_le16(ring_id);
ring_id = bnxt_cp_ring_for_rx(bp, rxr);
*ring_tbl++ = cpu_to_le16(ring_id);
}
}
static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
if (bp->flags & BNXT_FLAG_CHIP_P5)
__bnxt_fill_hw_rss_tbl_p5(bp, vnic);
else
__bnxt_fill_hw_rss_tbl(bp, vnic);
}
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
u32 i, j, max_rings;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input req = {0};
@ -4890,24 +4932,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
bnxt_fill_hw_rss_tbl(bp, vnic);
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
max_rings = bp->rx_nr_rings - 1;
else
max_rings = bp->rx_nr_rings;
} else {
max_rings = 1;
}
/* Fill the RSS indirection table with ring group ids */
for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
if (j == max_rings)
j = 0;
vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
}
req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr =
cpu_to_le64(vnic->rss_hash_key_dma_addr);
@ -4919,9 +4946,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
struct hwrm_vnic_rss_cfg_input req = {0};
dma_addr_t ring_tbl_map;
u32 i, nr_ctxs;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@ -4929,31 +4956,18 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return 0;
}
bnxt_fill_hw_rss_tbl(bp, vnic);
req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
ring_tbl_map = vnic->rss_table_dma_addr;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0, k = 0; i < nr_ctxs; i++) {
__le16 *ring_tbl = vnic->rss_table;
for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
int rc;
req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
req.ring_table_pair_index = i;
req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
for (j = 0; j < 64; j++) {
u16 ring_id;
ring_id = rxr->rx_ring_struct.fw_ring_id;
*ring_tbl++ = cpu_to_le16(ring_id);
ring_id = bnxt_cp_ring_for_rx(bp, rxr);
*ring_tbl++ = cpu_to_le16(ring_id);
rxr++;
k++;
if (k == max_rings) {
k = 0;
rxr = &bp->rx_ring[0];
}
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return rc;
@ -8248,6 +8262,8 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
}
bnxt_set_dflt_rss_indir_tbl(bp);
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;