Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: Support 5 layer Tx scheduler topology

Mateusz Polchlopek says:

For performance reasons there is a need to have support for selectable
Tx scheduler topology. Currently firmware supports only the default
9-layer and 5-layer topology. This patch series enables switch from
default to 5-layer topology, if user decides to opt-in.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: Document tx_scheduling_layers parameter
  ice: Add tx_scheduling_layers devlink param
  ice: Enable switching default Tx scheduler topology
  ice: Adjust the VSI/Aggregator layers
  ice: Support 5 layer topology
  devlink: extend devlink_param *set pointer
====================

Link: https://lore.kernel.org/r/20240422203913.225151-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-04-24 20:05:31 -07:00
commit 21d9f921f8
35 changed files with 664 additions and 85 deletions

View File

@ -21,6 +21,53 @@ Parameters
* - ``enable_iwarp``
- runtime
- mutually exclusive with ``enable_roce``
* - ``tx_scheduling_layers``
- permanent
- The ice hardware uses hierarchical scheduling for Tx with a fixed
number of layers in the scheduling tree. Each of them are decision
points. Root node represents a port, while all the leaves represent
the queues. This way of configuring the Tx scheduler allows features
like DCB or devlink-rate (documented below) to configure how much
bandwidth is given to any given queue or group of queues, enabling
fine-grained control because scheduling parameters can be configured
at any given layer of the tree.
The default 9-layer tree topology was deemed best for most workloads,
as it gives an optimal ratio of performance to configurability. However,
for some specific cases, this 9-layer topology might not be desired.
One example would be sending traffic to queues that are not a multiple
of 8. Because the maximum radix is limited to 8 in 9-layer topology,
the 9th queue has a different parent than the rest, and it's given
more bandwidth credits. This causes a problem when the system is
sending traffic to 9 queues:
| tx_queue_0_packets: 24163396
| tx_queue_1_packets: 24164623
| tx_queue_2_packets: 24163188
| tx_queue_3_packets: 24163701
| tx_queue_4_packets: 24163683
| tx_queue_5_packets: 24164668
| tx_queue_6_packets: 23327200
| tx_queue_7_packets: 24163853
| tx_queue_8_packets: 91101417 < Too much traffic is sent from 9th
To address this need, you can switch to a 5-layer topology, which
changes the maximum topology radix to 512. With this enhancement,
the performance characteristic is equal as all queues can be assigned
to the same parent in the tree. The obvious drawback of this solution
is a lower configuration depth of the tree.
Use the ``tx_scheduling_layer`` parameter with the devlink command
to change the transmit scheduler topology. To use 5-layer topology,
use a value of 5. For example:
$ devlink dev param set pci/0000:16:00.0 name tx_scheduling_layers
value 5 cmode permanent
Use a value of 9 to set it back to the default value.
You must do PCI slot powercycle for the selected topology to take effect.
To verify that value has been set:
$ devlink dev param show pci/0000:16:00.0 name tx_scheduling_layers
Info versions
=============

View File

@ -4,7 +4,8 @@
#include "otx2_cpt_devlink.h"
static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
@ -13,7 +14,8 @@ static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
@ -45,7 +47,8 @@ static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_t106_mode_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;

View File

@ -256,7 +256,8 @@ int pdsc_dl_flash_update(struct devlink *dl,
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx);
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx);
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack);

View File

@ -37,7 +37,8 @@ int pdsc_dl_enable_get(struct devlink *dl, u32 id,
}
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_priv(dl);
struct pdsc_viftype *vt_entry;

View File

@ -1096,7 +1096,8 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_set_variable_input *req;
@ -1145,7 +1146,8 @@ static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;

View File

@ -523,6 +523,156 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf,
return 0;
}
/**
* ice_get_tx_topo_user_sel - Read user's choice from flash
* @pf: pointer to pf structure
* @layers: value read from flash will be saved here
*
* Reads user's preference for Tx Scheduler Topology Tree from PFA TLV.
*
* Return: zero when read was successful, negative values otherwise.
*/
static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers)
{
struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
struct ice_hw *hw = &pf->hw;
int err;
err = ice_acquire_nvm(hw, ICE_RES_READ);
if (err)
return err;
err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
sizeof(usr_sel), &usr_sel, true, true, NULL);
if (err)
goto exit_release_res;
if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL)
*layers = ICE_SCHED_5_LAYERS;
else
*layers = ICE_SCHED_9_LAYERS;
exit_release_res:
ice_release_nvm(hw);
return err;
}
/**
* ice_update_tx_topo_user_sel - Save user's preference in flash
* @pf: pointer to pf structure
* @layers: value to be saved in flash
*
* Variable "layers" defines user's preference about number of layers in Tx
* Scheduler Topology Tree. This choice should be stored in PFA TLV field
* and be picked up by driver, next time during init.
*
* Return: zero when save was successful, negative values otherwise.
*/
static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers)
{
struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
struct ice_hw *hw = &pf->hw;
int err;
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err)
return err;
err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
sizeof(usr_sel), &usr_sel, true, true, NULL);
if (err)
goto exit_release_res;
if (layers == ICE_SCHED_5_LAYERS)
usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL;
else
usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL;
err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2,
sizeof(usr_sel.data), &usr_sel.data,
true, NULL, NULL);
exit_release_res:
ice_release_nvm(hw);
return err;
}
/**
* ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter
* @devlink: pointer to the devlink instance
* @id: the parameter ID to set
* @ctx: context to store the parameter value
*
* Return: zero on success and negative value on failure.
*/
static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct ice_pf *pf = devlink_priv(devlink);
int err;
err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8);
if (err)
return err;
return 0;
}
/**
* ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter
* @devlink: pointer to the devlink instance
* @id: the parameter ID to set
* @ctx: context to get the parameter value
* @extack: netlink extended ACK structure
*
* Return: zero on success and negative value on failure.
*/
static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
int err;
err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8);
if (err)
return err;
NL_SET_ERR_MSG_MOD(extack,
"Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect.");
return 0;
}
/**
* ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers
* parameter value
* @devlink: unused pointer to devlink instance
* @id: the parameter ID to validate
* @val: value to validate
* @extack: netlink extended ACK structure
*
* Supported values are:
* - 5 - five layers Tx Scheduler Topology Tree
* - 9 - nine layers Tx Scheduler Topology Tree
*
* Return: zero when passed parameter value is supported. Negative value on
* error.
*/
static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) {
NL_SET_ERR_MSG_MOD(extack,
"Wrong number of tx scheduler layers provided.");
return -EINVAL;
}
return 0;
}
/**
* ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree
* @pf: pf struct
@ -1144,9 +1294,9 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
return 0;
}
static int
ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
bool roce_ena = ctx->val.vbool;
@ -1195,9 +1345,9 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
return 0;
}
static int
ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
bool iw_ena = ctx->val.vbool;
@ -1235,6 +1385,11 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
return 0;
}
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
};
static const struct devlink_param ice_devlink_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ice_devlink_enable_roce_get,
@ -1244,7 +1399,13 @@ static const struct devlink_param ice_devlink_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
"tx_scheduling_layers",
DEVLINK_PARAM_TYPE_U8,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
ice_devlink_tx_sched_layers_get,
ice_devlink_tx_sched_layers_set,
ice_devlink_tx_sched_layers_validate),
};
static void ice_devlink_free(void *devlink_ptr)
@ -1304,9 +1465,16 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct ice_hw *hw = &pf->hw;
size_t params_size;
params_size = ARRAY_SIZE(ice_devlink_params);
if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
params_size--;
return devl_params_register(devlink, ice_devlink_params,
ARRAY_SIZE(ice_devlink_params));
params_size);
}
void ice_devlink_unregister_params(struct ice_pf *pf)

View File

@ -121,6 +121,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
@ -810,6 +811,23 @@ struct ice_aqc_get_topo {
__le32 addr_low;
};
/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
struct ice_aqc_get_set_tx_topo {
u8 set_flags;
#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
u8 get_flags;
#define ICE_AQC_TX_TOPO_GET_RAM 2
__le16 reserved1;
__le32 reserved2;
__le32 addr_high;
__le32 addr_low;
};
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
@ -1666,6 +1684,15 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_START_POINT 0
#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
struct ice_aqc_nvm_tx_topo_user_sel {
__le16 length;
u8 data;
#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
u8 reserved;
};
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
u8 flags;
@ -2538,6 +2565,7 @@ struct ice_aq_desc {
struct ice_aqc_get_link_topo get_link_topo;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
struct ice_aqc_get_set_tx_topo get_set_tx_topo;
} params;
};
@ -2644,6 +2672,10 @@ enum ice_adminq_opc {
ice_aqc_opc_query_sched_res = 0x0412,
ice_aqc_opc_remove_rl_profiles = 0x0415,
/* tx topology commands */
ice_aqc_opc_set_tx_topo = 0x0417,
ice_aqc_opc_get_tx_topo = 0x0418,
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
ice_aqc_opc_set_phy_cfg = 0x0601,

View File

@ -1617,6 +1617,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_set_port_params:
case ice_aqc_opc_get_vlan_mode_parameters:
case ice_aqc_opc_set_vlan_mode_parameters:
case ice_aqc_opc_set_tx_topo:
case ice_aqc_opc_get_tx_topo:
case ice_aqc_opc_add_recipe:
case ice_aqc_opc_recipe_to_profile:
case ice_aqc_opc_get_recipe:
@ -2173,6 +2175,9 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
break;
case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
caps->tx_sched_topo_comp_mode_en = (number == 1);
break;
default:
/* Not one of the recognized common capabilities */
found = false;

View File

@ -4,6 +4,7 @@
#include "ice_common.h"
#include "ice.h"
#include "ice_ddp.h"
#include "ice_sched.h"
/* For supporting double VLAN mode, it is necessary to enable or disable certain
* boost tcam entries. The metadata labels names that match the following
@ -2272,3 +2273,211 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return state;
}
/**
* ice_get_set_tx_topo - get or set Tx topology
* @hw: pointer to the HW struct
* @buf: pointer to Tx topology buffer
* @buf_size: buffer size
* @cd: pointer to command details structure or NULL
* @flags: pointer to descriptor flags
* @set: 0-get, 1-set topology
*
* The function will get or set Tx topology
*
* Return: zero when set was successful, negative values otherwise.
*/
static int
ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
struct ice_sq_cd *cd, u8 *flags, bool set)
{
struct ice_aqc_get_set_tx_topo *cmd;
struct ice_aq_desc desc;
int status;
cmd = &desc.params.get_set_tx_topo;
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
/* requested to update a new topology, not a default topology */
if (buf)
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
if (ice_is_e825c(hw))
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
}
if (!ice_is_e825c(hw))
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
return status;
/* read the return flag values (first byte) for get operation */
if (!set && flags)
*flags = desc.params.get_set_tx_topo.set_flags;
return 0;
}
/**
* ice_cfg_tx_topo - Initialize new Tx topology if available
* @hw: pointer to the HW struct
* @buf: pointer to Tx topology buffer
* @len: buffer size
*
* The function will apply the new Tx topology from the package buffer
* if available.
*
* Return: zero when update was successful, negative values otherwise.
*/
int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
{
u8 *current_topo, *new_topo = NULL;
struct ice_run_time_cfg_seg *seg;
struct ice_buf_hdr *section;
struct ice_pkg_hdr *pkg_hdr;
enum ice_ddp_state state;
u16 offset, size = 0;
u32 reg = 0;
int status;
u8 flags;
if (!buf || !len)
return -EINVAL;
/* Does FW support new Tx topology mode ? */
if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
return -EOPNOTSUPP;
}
current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!current_topo)
return -ENOMEM;
/* Get the current Tx topology */
status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
&flags, false);
kfree(current_topo);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
return status;
}
/* Is default topology already applied ? */
if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) {
ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n");
return -EEXIST;
}
/* Is new topology already applied ? */
if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n");
return -EEXIST;
}
/* Setting topology already issued? */
if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n");
/* Add a small delay before exiting */
msleep(2000);
return -EEXIST;
}
/* Change the topology from new to default (5 to 9) */
if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
goto update_topo;
}
pkg_hdr = (struct ice_pkg_hdr *)buf;
state = ice_verify_pkg(pkg_hdr, len);
if (state) {
ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
state);
return -EIO;
}
/* Find runtime configuration segment */
seg = (struct ice_run_time_cfg_seg *)
ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
if (!seg) {
ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
return -EIO;
}
if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
seg->buf_table.buf_count);
return -EIO;
}
section = ice_pkg_val_buf(seg->buf_table.buf_array);
if (!section || le32_to_cpu(section->section_entry[0].type) !=
ICE_SID_TX_5_LAYER_TOPO) {
ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
return -EIO;
}
size = le16_to_cpu(section->section_entry[0].size);
offset = le16_to_cpu(section->section_entry[0].offset);
if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
return -EIO;
}
/* Make sure the section fits in the buffer */
if (offset + size > ICE_PKG_BUF_SIZE) {
ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
return -EIO;
}
/* Get the new topology buffer */
new_topo = ((u8 *)section) + offset;
update_topo:
/* Acquire global lock to make sure that set topology issued
* by one PF.
*/
status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
return status;
}
/* Check if reset was triggered already. */
reg = rd32(hw, GLGEN_RSTAT);
if (reg & GLGEN_RSTAT_DEVSTATE_M) {
/* Reset is in progress, re-init the HW again */
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
ice_check_reset(hw);
return 0;
}
/* Set new topology */
status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
return status;
}
/* New topology is updated, delay 1 second before issuing the CORER */
msleep(1000);
ice_reset(hw, ICE_RESET_CORER);
/* CORER will clear the global lock, so no explicit call
* required for release.
*/
return 0;
}

View File

@ -454,4 +454,6 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
#endif

View File

@ -286,10 +286,9 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Returns: zero on success, or a negative error code on failure.
*/
static int
ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
u8 *reset_level, struct netlink_ext_ack *extack)
int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);

View File

@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack);
int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
struct netlink_ext_ack *extack);
int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
u8 *reset_level, struct netlink_ext_ack *extack);
#endif

View File

@ -4453,11 +4453,13 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
/**
* ice_request_fw - Device initialization routine
* @pf: pointer to the PF instance
* @firmware: double pointer to firmware struct
*
* Return: zero when successful, negative values otherwise.
*/
static void ice_request_fw(struct ice_pf *pf)
static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
{
char *opt_fw_filename = ice_get_opt_fw_name(pf);
const struct firmware *firmware = NULL;
struct device *dev = ice_pf_to_dev(pf);
int err = 0;
@ -4466,29 +4468,95 @@ static void ice_request_fw(struct ice_pf *pf)
* and warning messages for other errors.
*/
if (opt_fw_filename) {
err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
if (err) {
kfree(opt_fw_filename);
goto dflt_pkg_load;
}
/* request for firmware was successful. Download to device */
ice_load_pkg(firmware, pf);
err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
kfree(opt_fw_filename);
release_firmware(firmware);
return;
if (!err)
return err;
}
dflt_pkg_load:
err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
if (err) {
err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
if (err)
dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
return;
return err;
}
/**
* ice_init_tx_topology - performs Tx topology initialization
* @hw: pointer to the hardware structure
* @firmware: pointer to firmware structure
*
* Return: zero when init was successful, negative values otherwise.
*/
static int
ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
{
u8 num_tx_sched_layers = hw->num_tx_sched_layers;
struct ice_pf *pf = hw->back;
struct device *dev;
u8 *buf_copy;
int err;
dev = ice_pf_to_dev(pf);
/* ice_cfg_tx_topo buf argument is not a constant,
* so we have to make a copy
*/
buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
if (!err) {
if (hw->num_tx_sched_layers > num_tx_sched_layers)
dev_info(dev, "Tx scheduling layers switching feature disabled\n");
else
dev_info(dev, "Tx scheduling layers switching feature enabled\n");
/* if there was a change in topology ice_cfg_tx_topo triggered
* a CORER and we need to re-init hw
*/
ice_deinit_hw(hw);
err = ice_init_hw(hw);
return err;
} else if (err == -EIO) {
dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
}
/* request for firmware was successful. Download to device */
return 0;
}
/**
* ice_init_ddp_config - DDP related configuration
* @hw: pointer to the hardware structure
* @pf: pointer to pf structure
*
* This function loads DDP file from the disk, then initializes Tx
* topology. At the end DDP package is loaded on the card.
*
* Return: zero when init was successful, negative values otherwise.
*/
static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
const struct firmware *firmware = NULL;
int err;
err = ice_request_fw(pf, &firmware);
if (err) {
dev_err(dev, "Fail during requesting FW: %d\n", err);
return err;
}
err = ice_init_tx_topology(hw, firmware);
if (err) {
dev_err(dev, "Fail during initialization of Tx topology: %d\n",
err);
release_firmware(firmware);
return err;
}
/* Download firmware to device */
ice_load_pkg(firmware, pf);
release_firmware(firmware);
return 0;
}
/**
@ -4661,9 +4729,11 @@ int ice_init_dev(struct ice_pf *pf)
ice_init_feature_support(pf);
ice_request_fw(pf);
err = ice_init_ddp_config(hw, pf);
if (err)
return err;
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
/* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/

View File

@ -18,10 +18,9 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
static int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;

View File

@ -14,6 +14,9 @@ struct ice_orom_civd_info {
int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram, struct ice_sq_cd *cd);
int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);

View File

@ -1128,12 +1128,11 @@ u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 5 or less sw_entry_point_layer
*/
/* calculate the VSI layer based on number of layers. */
if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
if (layer > hw->sw_entry_point_layer)
return layer;
}
if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
/* qgroup and VSI layers are same */
return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@ -1150,13 +1149,10 @@ u8 ice_sched_get_agg_layer(struct ice_hw *hw)
* 7 or less sw_entry_point_layer
*/
/* calculate the aggregator layer based on number of layers. */
if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
if (layer > hw->sw_entry_point_layer)
return layer;
}
return hw->sw_entry_point_layer;
if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
else
return hw->sw_entry_point_layer;
}
/**
@ -1510,10 +1506,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
{
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u8 qgrp_layer, vsi_layer;
u16 max_children;
u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
vsi_layer = ice_sched_get_vsi_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@ -1524,6 +1521,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_node)
return NULL;
/* If the queue group and VSI layer are same then queues
* are all attached directly to VSI
*/
if (qgrp_layer == vsi_layer)
return vsi_node;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
@ -3199,7 +3202,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
u8 profile_type;
int status;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
if (!pi || layer_num >= pi->hw->num_tx_sched_layers)
return NULL;
switch (rl_type) {
case ICE_MIN_BW:
@ -3215,8 +3218,6 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
return NULL;
}
if (!pi)
return NULL;
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@ -3446,7 +3447,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
int status = 0;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
if (layer_num >= pi->hw->num_tx_sched_layers)
return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],

View File

@ -6,6 +6,17 @@
#include "ice_common.h"
/**
* DOC: ice_sched.h
*
* This header file stores everything that is needed for broadly understood
* scheduler. It consists of defines related to layers, structures related to
* aggregator, functions declarations and others.
*/
#define ICE_SCHED_5_LAYERS 5
#define ICE_SCHED_9_LAYERS 9
#define SCHED_NODE_NAME_MAX_LEN 32
#define ICE_QGRP_LAYER_OFFSET 2

View File

@ -296,6 +296,7 @@ struct ice_hw_common_caps {
bool pcie_reset_avoidance;
/* Post update reset restriction */
bool reset_restrict_support;
bool tx_sched_topo_comp_mode_en;
};
/* IEEE 1588 TIME_SYNC specific info */

View File

@ -1202,7 +1202,8 @@ static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@ -1256,7 +1257,8 @@ static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
}
static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@ -1310,7 +1312,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32
}
static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@ -1367,7 +1370,8 @@ static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;

View File

@ -32,7 +32,8 @@ static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
}
static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;

View File

@ -185,7 +185,8 @@ static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
mlx4_internal_err_reset = ctx->val.vbool;
return 0;
@ -202,7 +203,8 @@ static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;

View File

@ -1805,7 +1805,8 @@ err:
}
static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);

View File

@ -2413,7 +2413,8 @@ err:
}
static int esw_port_metadata_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;

View File

@ -3332,7 +3332,8 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
enum mlx5_flow_steering_mode mode;

View File

@ -52,7 +52,8 @@ static void mlx5_set_fw_rst_ack(struct mlx5_core_dev *dev)
}
static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_fw_reset *fw_reset;

View File

@ -1465,7 +1465,8 @@ mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
static int
mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp_acl_tcam_vregion *vregion;

View File

@ -132,7 +132,8 @@ exit_close_nsp:
static int
nfp_devlink_param_u8_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
const struct nfp_devlink_param_u8_arg *arg;
struct nfp_pf *pf = devlink_priv(devlink);

View File

@ -132,7 +132,8 @@ static int qed_dl_param_get(struct devlink *dl, u32 id,
}
static int qed_dl_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct qed_devlink *qed_dl = devlink_priv(dl);
struct qed_dev *cdev;

View File

@ -3056,7 +3056,8 @@ static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
}
static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *cpsw = dl_priv->common;

View File

@ -1625,7 +1625,8 @@ static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
}
static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@ -1762,7 +1763,8 @@ static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
}
static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;

View File

@ -33,7 +33,8 @@ static int ipc_devlink_get_param(struct devlink *dl, u32 id,
/* Set the param values for the specific param ID's */
static int ipc_devlink_set_param(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);

View File

@ -483,7 +483,8 @@ struct devlink_param {
int (*get)(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx);
int (*set)(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx);
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
int (*validate)(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack);

View File

@ -1258,7 +1258,8 @@ struct dsa_switch_ops {
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx);
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
int dsa_devlink_params_register(struct dsa_switch *ds,
const struct devlink_param *params,
size_t params_count);

View File

@ -158,11 +158,12 @@ static int devlink_param_get(struct devlink *devlink,
static int devlink_param_set(struct devlink *devlink,
const struct devlink_param *param,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
if (!param->set)
return -EOPNOTSUPP;
return param->set(devlink, param->id, ctx);
return param->set(devlink, param->id, ctx, extack);
}
static int
@ -571,7 +572,7 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
return -EOPNOTSUPP;
ctx.val = value;
ctx.cmode = cmode;
err = devlink_param_set(devlink, param, &ctx);
err = devlink_param_set(devlink, param, &ctx, info->extack);
if (err)
return err;
}

View File

@ -194,7 +194,8 @@ int dsa_devlink_param_get(struct devlink *dl, u32 id,
EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);