From edebc8407b8891ec0ea9ca4089f3d3343a5e50dd Mon Sep 17 00:00:00 2001 From: Bob Pearson Date: Fri, 16 Oct 2020 16:13:44 -0500 Subject: [PATCH 1/5] RDMA/rxe: Fix small problem in network_type patch The patch referenced below has a typo that results in using the wrong L2 header size for outbound traffic. (V4 <-> V6). It also breaks kernel-side RC traffic because they use AVs that use RDMA_NETWORK_XXX enums instead of RXE_NETWORK_TYPE_XXX enums. Fix this by transcoding between these enum types. Fixes: e0d696d201dd ("RDMA/rxe: Move the definitions for rxe_av.network_type to uAPI") Link: https://lore.kernel.org/r/20201016211343.22906-1-rpearson@hpe.com Signed-off-by: Bob Pearson Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_av.c | 35 +++++++++++++++++++++++++---- drivers/infiniband/sw/rxe/rxe_net.c | 2 +- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c index 38021e2c8688..df0d173d6acb 100644 --- a/drivers/infiniband/sw/rxe/rxe_av.c +++ b/drivers/infiniband/sw/rxe/rxe_av.c @@ -16,15 +16,24 @@ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av) int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr) { + const struct ib_global_route *grh = rdma_ah_read_grh(attr); struct rxe_port *port; + int type; port = &rxe->port; if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) { - u8 sgid_index = rdma_ah_read_grh(attr)->sgid_index; + if (grh->sgid_index > port->attr.gid_tbl_len) { + pr_warn("invalid sgid index = %d\n", + grh->sgid_index); + return -EINVAL; + } - if (sgid_index > port->attr.gid_tbl_len) { - pr_warn("invalid sgid index = %d\n", sgid_index); + type = rdma_gid_attr_network_type(grh->sgid_attr); + if (type < RDMA_NETWORK_IPV4 || + type > RDMA_NETWORK_IPV6) { + pr_warn("invalid network type for rdma_rxe = %d\n", + type); return -EINVAL; } } @@ -65,11 +74,29 @@ void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr) void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr) { const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr; + int ibtype; + int type; rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &rdma_ah_read_grh(attr)->dgid); - av->network_type = rdma_gid_attr_network_type(sgid_attr); + + ibtype = rdma_gid_attr_network_type(sgid_attr); + + switch (ibtype) { + case RDMA_NETWORK_IPV4: + type = RXE_NETWORK_TYPE_IPV4; + break; + case RDMA_NETWORK_IPV6: + type = RXE_NETWORK_TYPE_IPV4; + break; + default: + /* not reached - checked in rxe_av_chk_attr */ + type = 0; + break; + } + + av->network_type = type; } struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt) diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 575e1a4ec821..34bef7d8e6b4 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -442,7 +442,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, if (IS_ERR(attr)) return NULL; - if (av->network_type == RXE_NETWORK_TYPE_IPV6) + if (av->network_type == RXE_NETWORK_TYPE_IPV4) hdr_len = ETH_HLEN + sizeof(struct udphdr) + sizeof(struct iphdr); else From fbdd0049d98d44914fc57d4b91f867f4996c787b Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 26 Oct 2020 15:43:59 +0200 Subject: [PATCH 2/5] RDMA/mlx5: Fix devlink deadlock on net namespace deletion When a mlx5 core devlink instance is reloaded in different net namespace, its associated IB device is deleted and recreated. Example sequence is: $ ip netns add foo $ devlink dev reload pci/0000:00:08.0 netns foo $ ip netns del foo mlx5 IB device needs to attach and detach the netdevice to it through the netdev notifier chain during load and unload sequence. A below call graph of the unload flow. cleanup_net() down_read(&pernet_ops_rwsem); <- first sem acquired ops_pre_exit_list() pre_exit() devlink_pernet_pre_exit() devlink_reload() mlx5_devlink_reload_down() mlx5_unload_one() [...] mlx5_ib_remove() mlx5_ib_unbind_slave_port() mlx5_remove_netdev_notifier() unregister_netdevice_notifier() down_write(&pernet_ops_rwsem);<- recurrsive lock Hence, when net namespace is deleted, mlx5 reload results in deadlock. When deadlock occurs, devlink mutex is also held. This not only deadlocks the mlx5 device under reload, but all the processes which attempt to access unrelated devlink devices are deadlocked. Hence, fix this by mlx5 ib driver to register for per net netdev notifier instead of global one, which operats on the net namespace without holding the pernet_ops_rwsem. Fixes: 4383cfcc65e7 ("net/mlx5: Add devlink reload") Link: https://lore.kernel.org/r/20201026134359.23150-1-parav@nvidia.com Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 6 ++++-- .../net/ethernet/mellanox/mlx5/core/lib/mlx5.h | 5 ----- include/linux/mlx5/driver.h | 18 ++++++++++++++++++ 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 89e04ca62ae0..246e3cbe0b2c 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3305,7 +3305,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) int err; dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event; - err = register_netdevice_notifier(&dev->port[port_num].roce.nb); + err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev), + &dev->port[port_num].roce.nb); if (err) { dev->port[port_num].roce.nb.notifier_call = NULL; return err; @@ -3317,7 +3318,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) { if (dev->port[port_num].roce.nb.notifier_call) { - unregister_netdevice_notifier(&dev->port[port_num].roce.nb); + unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev), + &dev->port[port_num].roce.nb); dev->port[port_num].roce.nb.notifier_call = NULL; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index d046db7bb047..3a9fa629503f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -90,9 +90,4 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, u32 key_type, u32 *p_key_id); void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id); -static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) -{ - return devlink_net(priv_to_devlink(dev)); -} - #endif diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index add85094f9a5..0f23e1ed5e71 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1213,4 +1213,22 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) return val.vbool; } +/** + * mlx5_core_net - Provide net namespace of the mlx5_core_dev + * @dev: mlx5 core device + * + * mlx5_core_net() returns the net namespace of mlx5 core device. + * This can be called only in below described limited context. + * (a) When a devlink instance for mlx5_core is registered and + * when devlink reload operation is disabled. + * or + * (b) during devlink reload reload_down() and reload_up callbacks + * where it is ensured that devlink instance's net namespace is + * stable. + */ +static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) +{ + return devlink_net(priv_to_devlink(dev)); +} + #endif /* MLX5_DRIVER_H */ From 7d66a71488d7c14506ab81d6455c095992efca04 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 26 Oct 2020 10:26:21 +0200 Subject: [PATCH 3/5] RDMA/uverbs: Fix false error in query gid IOCTL Some drivers (such as EFA) have a GID table, but aren't IB/RoCE devices. Remove the unnecessary rdma_ib_or_roce() check. This fixes rdma-core failures for EFA when it uses the new ioctl interface for querying the GID table. Fixes: 9f85cbe50aa0 ("RDMA/uverbs: Expose the new GID query API to user space") Link: https://lore.kernel.org/r/20201026082621.32463-1-galpress@amazon.com Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_std_types_device.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c index f367d523a46b..302f898c5833 100644 --- a/drivers/infiniband/core/uverbs_std_types_device.c +++ b/drivers/infiniband/core/uverbs_std_types_device.c @@ -401,9 +401,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)( if (!rdma_is_port_valid(ib_dev, port_num)) return -EINVAL; - if (!rdma_ib_or_roce(ib_dev, port_num)) - return -EOPNOTSUPP; - gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index); if (IS_ERR(gid_attr)) return PTR_ERR(gid_attr); From 071ba4cc559de47160761b9500b72e8fa09d923d Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 26 Oct 2020 11:25:49 -0300 Subject: [PATCH 4/5] RDMA: Add rdma_connect_locked() There are two flows for handling RDMA_CM_EVENT_ROUTE_RESOLVED, either the handler triggers a completion and another thread does rdma_connect() or the handler directly calls rdma_connect(). In all cases rdma_connect() needs to hold the handler_mutex, but when handler's are invoked this is already held by the core code. This causes ULPs using the 2nd method to deadlock. Provide a rdma_connect_locked() and have all ULPs call it from their handlers. Link: https://lore.kernel.org/r/0-v2-53c22d5c1405+33-rdma_connect_locking_jgg@nvidia.com Reported-and-tested-by: Guoqing Jiang Fixes: 2a7cec538169 ("RDMA/cma: Fix locking for the RDMA_CM_CONNECT state") Acked-by: Santosh Shilimkar Acked-by: Jack Wang Reviewed-by: Christoph Hellwig Reviewed-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 48 +++++++++++++++++++----- drivers/infiniband/ulp/iser/iser_verbs.c | 2 +- drivers/infiniband/ulp/rtrs/rtrs-clt.c | 4 +- drivers/nvme/host/rdma.c | 4 +- include/rdma/rdma_cm.h | 14 +------ net/rds/ib_cm.c | 5 ++- 6 files changed, 48 insertions(+), 29 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 7c2ab1f2fbea..a77750b8954d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -405,10 +405,10 @@ static int cma_comp_exch(struct rdma_id_private *id_priv, /* * The FSM uses a funny double locking where state is protected by both * the handler_mutex and the spinlock. State is not allowed to change - * away from a handler_mutex protected value without also holding + * to/from a handler_mutex protected value without also holding * handler_mutex. */ - if (comp == RDMA_CM_CONNECT) + if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) lockdep_assert_held(&id_priv->handler_mutex); spin_lock_irqsave(&id_priv->lock, flags); @@ -4038,17 +4038,23 @@ out: return ret; } -int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) +/** + * rdma_connect_locked - Initiate an active connection request. + * @id: Connection identifier to connect. + * @conn_param: Connection information used for connected QPs. + * + * Same as rdma_connect() but can only be called from the + * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. + */ +int rdma_connect_locked(struct rdma_cm_id *id, + struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; - mutex_lock(&id_priv->handler_mutex); - if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) { - ret = -EINVAL; - goto err_unlock; - } + if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) + return -EINVAL; if (!id->qp) { id_priv->qp_num = conn_param->qp_num; @@ -4066,11 +4072,33 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) ret = -ENOSYS; if (ret) goto err_state; - mutex_unlock(&id_priv->handler_mutex); return 0; err_state: cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); -err_unlock: + return ret; +} +EXPORT_SYMBOL(rdma_connect_locked); + +/** + * rdma_connect - Initiate an active connection request. + * @id: Connection identifier to connect. + * @conn_param: Connection information used for connected QPs. + * + * Users must have resolved a route for the rdma_cm_id to connect with by having + * called rdma_resolve_route before calling this routine. + * + * This call will either connect to a remote QP or obtain remote QP information + * for unconnected rdma_cm_id's. The actual operation is based on the + * rdma_cm_id's port space. + */ +int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) +{ + struct rdma_id_private *id_priv = + container_of(id, struct rdma_id_private, id); + int ret; + + mutex_lock(&id_priv->handler_mutex); + ret = rdma_connect_locked(id, conn_param); mutex_unlock(&id_priv->handler_mutex); return ret; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 2f3ebc0a75d9..2bd18b006893 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -620,7 +620,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id) conn_param.private_data = (void *)&req_hdr; conn_param.private_data_len = sizeof(struct iser_cm_hdr); - ret = rdma_connect(cma_id, &conn_param); + ret = rdma_connect_locked(cma_id, &conn_param); if (ret) { iser_err("failure connecting: %d\n", ret); goto failure; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 776e89231c52..f298adc02acb 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -1674,9 +1674,9 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) uuid_copy(&msg.sess_uuid, &sess->s.uuid); uuid_copy(&msg.paths_uuid, &clt->paths_uuid); - err = rdma_connect(con->c.cm_id, ¶m); + err = rdma_connect_locked(con->c.cm_id, ¶m); if (err) - rtrs_err(clt, "rdma_connect(): %d\n", err); + rtrs_err(clt, "rdma_connect_locked(): %d\n", err); return err; } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index aad829a2b50d..8bbc48cc45dc 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1890,10 +1890,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); } - ret = rdma_connect(queue->cm_id, ¶m); + ret = rdma_connect_locked(queue->cm_id, ¶m); if (ret) { dev_err(ctrl->ctrl.device, - "rdma_connect failed (%d).\n", ret); + "rdma_connect_locked failed (%d).\n", ret); goto out_destroy_queue_ib; } diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index c672ae1da26b..32a67af18415 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -227,19 +227,9 @@ void rdma_destroy_qp(struct rdma_cm_id *id); int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int *qp_attr_mask); -/** - * rdma_connect - Initiate an active connection request. - * @id: Connection identifier to connect. - * @conn_param: Connection information used for connected QPs. - * - * Users must have resolved a route for the rdma_cm_id to connect with - * by having called rdma_resolve_route before calling this routine. - * - * This call will either connect to a remote QP or obtain remote QP - * information for unconnected rdma_cm_id's. The actual operation is - * based on the rdma_cm_id's port space. - */ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); +int rdma_connect_locked(struct rdma_cm_id *id, + struct rdma_conn_param *conn_param); int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, struct rdma_ucm_ece *ece); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 06603dd1c8aa..b36b60668b1d 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -956,9 +956,10 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6) rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, conn->c_proposed_version, UINT_MAX, UINT_MAX, isv6); - ret = rdma_connect(cm_id, &conn_param); + ret = rdma_connect_locked(cm_id, &conn_param); if (ret) - rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); + rds_ib_conn_error(conn, "rdma_connect_locked failed (%d)\n", + ret); out: /* Beware - returning non-zero tells the rdma_cm to destroy From a2267f8a52eea9096861affd463f691be0f0e8c9 Mon Sep 17 00:00:00 2001 From: Alok Prasad Date: Wed, 21 Oct 2020 11:50:08 +0000 Subject: [PATCH 5/5] RDMA/qedr: Fix memory leak in iWARP CM Fixes memory leak in iWARP CM Fixes: e411e0587e0d ("RDMA/qedr: Add iWARP connection management functions") Link: https://lore.kernel.org/r/20201021115008.28138-1-palok@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Igor Russkikh Signed-off-by: Alok Prasad Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/qedr_iw_cm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index c7169d2c69e5..c4bc58736e48 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id) listener->qed_handle); cm_id->rem_ref(cm_id); + kfree(listener); return rc; }