RDMA v5.16 third rc pull request

Last fixes before holidays:
 
 - Work around a HW bug in HNS HIP08
 
 - Recent memory leak regression in qib
 
 - Incorrect use of kfree() for vmalloc memory in hns
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmHBK3MACgkQOG33FX4g
 mxobyRAAmeN+27yYxE7NHjNPdFQ8wDGwJW5+L6gaydzsG19Ql69ZdA2VRQqO+kyI
 8RH69IPKkRbpgruZhCguNSCyYan+mre9qL7kcRJXuIbt1gKZVoD/6J+70t4TOw3W
 +R154GUGN8dzilmYp1Rr8rGNKZDLRc+yc6SSSDbGu3RmCJm7PZgLjrj0pfqHO37g
 juA3lOvGHgVEJtmFM3SILfkwqKFkNRhB7pVb/3HE18Vpwn0dXFh42DyoA/9T8DwX
 Ufx1rm7/hxjYIxv0TXmNU6MqhobP3SwF8wCBTlS0IBPHWSBVoYAKon2hCdFw2wD2
 wadGCEMuD+MXZu+/ZvyDz0EN9AWamCktKr74ZD5fp8Un7XY4r2vpjBb0uxuMrfyJ
 H2TZSFxDp31W1yU2UgDmpPOLrQhC5iduDGGZ7olw41CAqC95zUidAv2tJrRra1Hv
 UlnAIKM7VZ+tT8VcDPcdmUkGjvgJlmFhWD0VM6RJ+RU2smTW1WcCoPrNQV9ngSMy
 AO8wmVg8KwKQtkd8roOkdNe1QxvnUuAt7WWTWW3sc2IqELoYGfm5uJXoQQICP3Q/
 nCVXjjDmUY9zfgjRqcqLGwZrQJTCHQp7JSELmQCaEODJVYsafW0FxZlHhyLr48t6
 Q30YdioNbO9+DG8Ggza3os5jeCqxEbh+kugg43KMQyjGtQ05oJg=
 =rRP8
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Last fixes before holidays. Nothing very exciting:

   - Work around a HW bug in HNS HIP08

   - Recent memory leak regression in qib

   - Incorrect use of kfree() for vmalloc memory in hns"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/hns: Replace kfree() with kvfree()
  IB/qib: Fix memory leak in qib_user_sdma_queue_pkts()
  RDMA/hns: Fix RNR retransmission issue for HIP08
This commit is contained in:
Linus Torvalds 2021-12-20 17:26:42 -08:00
commit 6e0567b730
4 changed files with 67 additions and 9 deletions

View File

@ -1594,11 +1594,17 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
u32 clock_cycles_of_1us;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
false);
hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
else
clock_cycles_of_1us = HNS_ROCE_1US_CFG;
hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
@ -4802,6 +4808,30 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
return ret;
}
static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
{
#define QP_ACK_TIMEOUT_MAX_HIP08 20
#define QP_ACK_TIMEOUT_OFFSET 10
#define QP_ACK_TIMEOUT_MAX 31
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 20.\n");
return false;
}
*timeout += QP_ACK_TIMEOUT_OFFSET;
} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX) {
ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 31.\n");
return false;
}
}
return true;
}
static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@ -4811,6 +4841,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret = 0;
u8 timeout;
if (attr_mask & IB_QP_AV) {
ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
@ -4820,12 +4851,10 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_TIMEOUT) {
if (attr->timeout < 31) {
hr_reg_write(context, QPC_AT, attr->timeout);
timeout = attr->timeout;
if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
hr_reg_write(context, QPC_AT, timeout);
hr_reg_clear(qpc_mask, QPC_AT);
} else {
ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 30.\n");
}
}
@ -4882,7 +4911,9 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
hr_reg_write(context, QPC_MIN_RNR_TIME,
hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
}
@ -5499,6 +5530,16 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
dev_info(hr_dev->dev,
"cq_period(%u) reached the upper limit, adjusted to 65.\n",
cq_period);
cq_period = HNS_ROCE_MAX_CQ_PERIOD;
}
cq_period *= HNS_ROCE_CLOCK_ADJUST;
}
hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
@ -5894,6 +5935,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
eq->eq_period);
eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
}
eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
}
hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);

View File

@ -1444,6 +1444,14 @@ struct hns_roce_dip {
struct list_head node; /* all dips are on a list */
};
/* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000
#define HNS_ROCE_MAX_CQ_PERIOD 65
#define HNS_ROCE_MAX_EQ_PERIOD 65
#define HNS_ROCE_RNR_TIMER_10NS 1
#define HNS_ROCE_1US_CFG 999
#define HNS_ROCE_1NS_CFG 0
#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0

View File

@ -259,7 +259,7 @@ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
static void free_srq_wrid(struct hns_roce_srq *srq)
{
kfree(srq->wrid);
kvfree(srq->wrid);
srq->wrid = NULL;
}

View File

@ -941,7 +941,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
&addrlimit) ||
addrlimit > type_max(typeof(pkt->addrlimit))) {
ret = -EINVAL;
goto free_pbc;
goto free_pkt;
}
pkt->addrlimit = addrlimit;