net: mana: Configure hwc timeout from hardware

At present hwc timeout value is a fixed value. This patch sets the hwc
timeout from the hardware. It now uses a new hardware capability
GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG to query and set the value
in hwc_timeout.

Signed-off-by: Souradeep Chakrabarti <schakrabarti@linux.microsoft.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Souradeep Chakrabarti 2023-08-02 04:07:40 -07:00 committed by David S. Miller
parent 58e701264f
commit 62c1bff593
4 changed files with 76 additions and 3 deletions

View file

@ -106,6 +106,25 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
return 0;
}
static int mana_gd_query_hwc_timeout(struct pci_dev *pdev, u32 *timeout_val)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_query_hwc_timeout_resp resp = {};
struct gdma_query_hwc_timeout_req req = {};
int err;
mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_HWC_TIMEOUT,
sizeof(req), sizeof(resp));
req.timeout_ms = *timeout_val;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status)
return err ? err : -EPROTO;
*timeout_val = resp.timeout_ms;
return 0;
}
static int mana_gd_detect_devices(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@ -882,8 +901,10 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_verify_ver_resp resp = {};
struct gdma_verify_ver_req req = {};
struct hw_channel_context *hwc;
int err;
hwc = gc->hwc.driver_data;
mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
sizeof(req), sizeof(resp));
@ -910,7 +931,14 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
if (err) {
dev_err(gc->dev, "Failed to set the hwc timeout %d\n", err);
return err;
}
dev_dbg(gc->dev, "set the hwc timeout to %u\n", hwc->hwc_timeout);
}
return 0;
}

View file

@ -174,7 +174,25 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
complete(&hwc->hwc_init_eqe_comp);
break;
case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
type_data.as_uint32 = event->details[0];
type = type_data.type;
val = type_data.value;
switch (type) {
case HWC_DATA_CFG_HWC_TIMEOUT:
hwc->hwc_timeout = val;
break;
default:
dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
break;
}
break;
default:
dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
break;
}
@ -696,6 +714,7 @@ int mana_hwc_create_channel(struct gdma_context *gc)
gd->driver_data = hwc;
hwc->gdma_dev = gd;
hwc->dev = gc->dev;
hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
/* HWC's instance number is always 0. */
gd->dev_id.as_uint32 = 0;
@ -770,6 +789,8 @@ void mana_hwc_destroy_channel(struct gdma_context *gc)
hwc->gdma_dev->doorbell = INVALID_DOORBELL;
hwc->gdma_dev->pdid = INVALID_PDID;
hwc->hwc_timeout = 0;
kfree(hwc);
gc->hwc.driver_data = NULL;
gc->hwc.gdma_context = NULL;
@ -825,7 +846,8 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
goto out;
}
if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
if (!wait_for_completion_timeout(&ctx->comp_event,
(msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
dev_err(hwc->dev, "HWC: Request timed out!\n");
err = -ETIMEDOUT;
goto out;

View file

@ -33,6 +33,7 @@ enum gdma_request_type {
GDMA_DESTROY_PD = 30,
GDMA_CREATE_MR = 31,
GDMA_DESTROY_MR = 32,
GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
};
#define GDMA_RESOURCE_DOORBELL_PAGE 27
@ -57,6 +58,8 @@ enum gdma_eqe_type {
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
GDMA_EQE_HWC_INIT_DATA = 130,
GDMA_EQE_HWC_INIT_DONE = 131,
GDMA_EQE_HWC_SOC_RECONFIG = 132,
GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
};
enum {
@ -531,10 +534,12 @@ enum {
* so the driver is able to reliably support features like busy_poll.
*/
#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
#define GDMA_DRV_CAP_FLAGS2 0
@ -664,6 +669,19 @@ struct gdma_disable_queue_req {
u32 alloc_res_id_on_creation;
}; /* HW DATA */
/* GDMA_QUERY_HWC_TIMEOUT */
struct gdma_query_hwc_timeout_req {
struct gdma_req_hdr hdr;
u32 timeout_ms;
u32 reserved;
};
struct gdma_query_hwc_timeout_resp {
struct gdma_resp_hdr hdr;
u32 timeout_ms;
u32 reserved;
};
enum atb_page_size {
ATB_PAGE_SIZE_4K,
ATB_PAGE_SIZE_8K,

View file

@ -23,6 +23,10 @@
#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
#define HWC_DATA_CFG_HWC_TIMEOUT 1
#define HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS 30000
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
*/
@ -182,6 +186,7 @@ struct hw_channel_context {
u32 pf_dest_vrq_id;
u32 pf_dest_vrcq_id;
u32 hwc_timeout;
struct hwc_caller_ctx *caller_ctx;
};