RDMA: Change MAD processing function to remove extra casting and parameter

All users of process_mad() converts input pointers from ib_mad_hdr to be
ib_mad, update the function declaration to use ib_mad directly.

Also remove not used input MAD size parameter.

Link: https://lore.kernel.org/r/20191029062745.7932-17-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Tested-By: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Leon Romanovsky 2019-10-29 08:27:45 +02:00 committed by Jason Gunthorpe
parent 333ee7e2d0
commit e26e7b88f6
17 changed files with 104 additions and 154 deletions

View file

@ -913,9 +913,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
/* No GRH for DR SMP */ /* No GRH for DR SMP */
ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL, ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
(const struct ib_mad_hdr *)smp, mad_size, (const struct ib_mad *)smp,
(struct ib_mad_hdr *)mad_priv->mad, (struct ib_mad *)mad_priv->mad, &mad_size,
&mad_size, &out_mad_pkey_index); &out_mad_pkey_index);
switch (ret) switch (ret)
{ {
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@ -2321,9 +2321,9 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (port_priv->device->ops.process_mad) { if (port_priv->device->ops.process_mad) {
ret = port_priv->device->ops.process_mad( ret = port_priv->device->ops.process_mad(
port_priv->device, 0, port_priv->port_num, wc, port_priv->device, 0, port_priv->port_num, wc,
&recv->grh, (const struct ib_mad_hdr *)recv->mad, &recv->grh, (const struct ib_mad *)recv->mad,
recv->mad_size, (struct ib_mad_hdr *)response->mad, (struct ib_mad *)response->mad, &mad_size,
&mad_size, &resp_mad_pkey_index); &resp_mad_pkey_index);
if (opa) if (opa)
wc->pkey_index = resp_mad_pkey_index; wc->pkey_index = resp_mad_pkey_index;

View file

@ -497,10 +497,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (attr != IB_PMA_CLASS_PORT_INFO) if (attr != IB_PMA_CLASS_PORT_INFO)
in_mad->data[41] = port_num; /* PortSelect field */ in_mad->data[41] = port_num; /* PortSelect field */
if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, port_num, NULL, NULL,
port_num, NULL, NULL, in_mad, out_mad, &mad_size,
(const struct ib_mad_hdr *)in_mad, mad_size,
(struct ib_mad_hdr *)out_mad, &mad_size,
&out_mad_pkey_index) & &out_mad_pkey_index) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) { (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {

View file

@ -4915,11 +4915,10 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
*/ */
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in_mad, size_t in_mad_size, const struct ib_mad *in_mad, struct ib_mad *out_mad,
struct ib_mad_hdr *out_mad, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index)
u16 *out_mad_pkey_index)
{ {
switch (in_mad->base_version) { switch (in_mad->mad_hdr.base_version) {
case OPA_MGMT_BASE_VERSION: case OPA_MGMT_BASE_VERSION:
return hfi1_process_opa_mad(ibdev, mad_flags, port, return hfi1_process_opa_mad(ibdev, mad_flags, port,
in_wc, in_grh, in_wc, in_grh,
@ -4928,10 +4927,8 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
out_mad_size, out_mad_size,
out_mad_pkey_index); out_mad_pkey_index);
case IB_MGMT_BASE_VERSION: case IB_MGMT_BASE_VERSION:
return hfi1_process_ib_mad(ibdev, mad_flags, port, return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc,
in_wc, in_grh, in_grh, in_mad, out_mad);
(const struct ib_mad *)in_mad,
(struct ib_mad *)out_mad);
default: default:
break; break;
} }

View file

@ -330,9 +330,8 @@ void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp); void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in_mad, size_t in_mad_size, const struct ib_mad *in_mad, struct ib_mad *out_mad,
struct ib_mad_hdr *out_mad, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index);
u16 *out_mad_pkey_index);
/* /*
* The PSN_MASK and PSN_SHIFT allow for * The PSN_MASK and PSN_SHIFT allow for

View file

@ -983,13 +983,10 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size, const struct ib_mad *in, struct ib_mad *out,
struct ib_mad_hdr *out, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index)
u16 *out_mad_pkey_index)
{ {
struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num); enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
/* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
@ -997,20 +994,20 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
*/ */
if (link == IB_LINK_LAYER_INFINIBAND) { if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) && if (mlx4_is_slave(dev->dev) &&
(in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
(in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, return iboe_process_mad(ibdev, mad_flags, port_num,
in_grh, in_mad, out_mad); in_wc, in_grh, in, out);
return ib_process_mad(ibdev, mad_flags, port_num, in_wc, return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
in_grh, in_mad, out_mad); in, out);
} }
if (link == IB_LINK_LAYER_ETHERNET) if (link == IB_LINK_LAYER_ETHERNET)
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad); in_grh, in, out);
return -EINVAL; return -EINVAL;
} }

View file

@ -786,11 +786,10 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad); const void *in_mad, void *response_mad);
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size, const struct ib_mad *in, struct ib_mad *out,
struct ib_mad_hdr *out, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index);
u16 *out_mad_pkey_index);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);

View file

@ -219,15 +219,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size, const struct ib_mad *in, struct ib_mad *out,
struct ib_mad_hdr *out, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index)
u16 *out_mad_pkey_index)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in; u8 mgmt_class = in->mad_hdr.mgmt_class;
struct ib_mad *out_mad = (struct ib_mad *)out; u8 method = in->mad_hdr.method;
u8 mgmt_class = in_mad->mad_hdr.mgmt_class;
u8 method = in_mad->mad_hdr.method;
u16 slid; u16 slid;
int err; int err;
@ -247,13 +244,13 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
/* Don't process SMInfo queries -- the SMA can't handle them. /* Don't process SMInfo queries -- the SMA can't handle them.
*/ */
if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
} break; } break;
case IB_MGMT_CLASS_PERF_MGMT: case IB_MGMT_CLASS_PERF_MGMT:
if (MLX5_CAP_GEN(dev->mdev, vport_counters) && if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
method == IB_MGMT_METHOD_GET) method == IB_MGMT_METHOD_GET)
return process_pma_cmd(dev, port_num, in_mad, out_mad); return process_pma_cmd(dev, port_num, in, out);
/* fallthrough */ /* fallthrough */
case MLX5_IB_VENDOR_CLASS1: case MLX5_IB_VENDOR_CLASS1:
/* fallthrough */ /* fallthrough */
@ -267,16 +264,15 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
} }
err = mlx5_MAD_IFC(to_mdev(ibdev), err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
mad_flags & IB_MAD_IGNORE_MKEY, mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
mad_flags & IB_MAD_IGNORE_BKEY, in_grh, in, out);
port_num, in_wc, in_grh, in_mad, out_mad);
if (err) if (err)
return IB_MAD_RESULT_FAILURE; return IB_MAD_RESULT_FAILURE;
/* set return bit in status of directed route responses */ /* set return bit in status of directed route responses */
if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); out->mad_hdr.status |= cpu_to_be16(1 << 15);
if (method == IB_MGMT_METHOD_TRAP_REPRESS) if (method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */ /* no response for trap repress */

View file

@ -1192,9 +1192,8 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
unsigned int *meta_sg_offset); unsigned int *meta_sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size, const struct ib_mad *in, struct ib_mad *out,
struct ib_mad_hdr *out, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index);
u16 *out_mad_pkey_index);
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);

View file

@ -576,14 +576,10 @@ enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_process_mad(struct ib_device *ibdev, int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mad_flags, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
u8 port_num, const struct ib_mad *in, struct ib_mad *out,
const struct ib_wc *in_wc, size_t *out_mad_size, u16 *out_mad_pkey_index);
const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
int mthca_create_agents(struct mthca_dev *dev); int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev); void mthca_free_agents(struct mthca_dev *dev);

View file

@ -196,26 +196,19 @@ static void forward_trap(struct mthca_dev *dev,
} }
} }
int mthca_process_mad(struct ib_device *ibdev, int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mad_flags, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
u8 port_num, const struct ib_mad *in, struct ib_mad *out,
const struct ib_wc *in_wc, size_t *out_mad_size, u16 *out_mad_pkey_index)
const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{ {
int err; int err;
u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE); u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0; u16 prev_lid = 0;
struct ib_port_attr pattr; struct ib_port_attr pattr;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
/* Forward locally generated traps to the SM */ /* Forward locally generated traps to the SM */
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) {
slid == 0) { forward_trap(to_mdev(ibdev), port_num, in);
forward_trap(to_mdev(ibdev), port_num, in_mad);
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
} }
@ -225,40 +218,39 @@ int mthca_process_mad(struct ib_device *ibdev,
* Only handle PMA and Mellanox vendor-specific class gets and * Only handle PMA and Mellanox vendor-specific class gets and
* sets for other classes. * sets for other classes.
*/ */
if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && in->mad_hdr.method != IB_MGMT_METHOD_SET &&
in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
/* /*
* Don't process SMInfo queries or vendor-specific * Don't process SMInfo queries or vendor-specific
* MADs -- the SMA can't handle them. * MADs -- the SMA can't handle them.
*/ */
if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
IB_SMP_ATTR_VENDOR_MASK)) IB_SMP_ATTR_VENDOR_MASK))
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) in->mad_hdr.method != IB_MGMT_METHOD_SET)
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
} else } else
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && in->mad_hdr.method == IB_MGMT_METHOD_SET &&
in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr)) !ib_query_port(ibdev, port_num, &pattr))
prev_lid = ib_lid_cpu16(pattr.lid); prev_lid = ib_lid_cpu16(pattr.lid);
err = mthca_MAD_IFC(to_mdev(ibdev), err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
mad_flags & IB_MAD_IGNORE_MKEY, mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
mad_flags & IB_MAD_IGNORE_BKEY, in_grh, in, out);
port_num, in_wc, in_grh, in_mad, out_mad);
if (err == -EBADMSG) if (err == -EBADMSG)
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
else if (err) { else if (err) {
@ -266,16 +258,16 @@ int mthca_process_mad(struct ib_device *ibdev,
return IB_MAD_RESULT_FAILURE; return IB_MAD_RESULT_FAILURE;
} }
if (!out_mad->mad_hdr.status) { if (!out->mad_hdr.status) {
smp_snoop(ibdev, port_num, in_mad, prev_lid); smp_snoop(ibdev, port_num, in, prev_lid);
node_desc_override(ibdev, out_mad); node_desc_override(ibdev, out);
} }
/* set return bit in status of directed route responses */ /* set return bit in status of directed route responses */
if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); out->mad_hdr.status |= cpu_to_be16(1 << 15);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */ /* no response for trap repress */
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;

View file

@ -247,23 +247,18 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0; return 0;
} }
int ocrdma_process_mad(struct ib_device *ibdev, int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
int process_mad_flags, u8 port_num, const struct ib_wc *in_wc,
u8 port_num, const struct ib_grh *in_grh, const struct ib_mad *in,
const struct ib_wc *in_wc, struct ib_mad *out, size_t *out_mad_size,
const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index) u16 *out_mad_pkey_index)
{ {
int status = IB_MAD_RESULT_SUCCESS; int status = IB_MAD_RESULT_SUCCESS;
struct ocrdma_dev *dev; struct ocrdma_dev *dev;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
dev = get_ocrdma_dev(ibdev); dev = get_ocrdma_dev(ibdev);
ocrdma_pma_counters(dev, out_mad); ocrdma_pma_counters(dev, out);
status |= IB_MAD_RESULT_REPLY; status |= IB_MAD_RESULT_REPLY;
} }

View file

@ -56,12 +56,9 @@ int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags); void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int ocrdma_process_mad(struct ib_device *, int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
int process_mad_flags, u8 port_num, const struct ib_wc *in_wc,
u8 port_num, const struct ib_grh *in_grh, const struct ib_mad *in,
const struct ib_wc *in_wc, struct ib_mad *out, size_t *out_mad_size,
const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index); u16 *out_mad_pkey_index);
#endif /* __OCRDMA_AH_H__ */ #endif /* __OCRDMA_AH_H__ */

View file

@ -4346,19 +4346,10 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
} }
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
u8 port_num, u8 port_num, const struct ib_wc *in_wc,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in,
const struct ib_grh *in_grh, struct ib_mad *out_mad, size_t *out_mad_size,
const struct ib_mad_hdr *mad_hdr, u16 *out_mad_pkey_index)
size_t in_mad_size, struct ib_mad_hdr *out_mad,
size_t *out_mad_size, u16 *out_mad_pkey_index)
{ {
struct qedr_dev *dev = get_qedr_dev(ibdev);
DP_DEBUG(dev, QEDR_MSG_GSI,
"QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
mad_hdr->class_specific, mad_hdr->class_version,
mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
return IB_MAD_RESULT_SUCCESS; return IB_MAD_RESULT_SUCCESS;
} }

View file

@ -92,10 +92,9 @@ int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_wr); const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags, int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc, u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_grh *in_grh, const struct ib_mad *in_mad,
const struct ib_mad_hdr *in_mad, struct ib_mad *out_mad, size_t *out_mad_size,
size_t in_mad_size, struct ib_mad_hdr *out_mad, u16 *out_mad_pkey_index);
size_t *out_mad_size, u16 *out_mad_pkey_index);
int qedr_port_immutable(struct ib_device *ibdev, u8 port_num, int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable); struct ib_port_immutable *immutable);

View file

@ -2386,24 +2386,21 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
*/ */
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size, const struct ib_mad *in, struct ib_mad *out,
struct ib_mad_hdr *out, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index)
u16 *out_mad_pkey_index)
{ {
int ret; int ret;
struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_pportdata *ppd = ppd_from_ibp(ibp);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
switch (in_mad->mad_hdr.mgmt_class) { switch (in->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED: case IB_MGMT_CLASS_SUBN_LID_ROUTED:
ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); ret = process_subn(ibdev, mad_flags, port, in, out);
goto bail; goto bail;
case IB_MGMT_CLASS_PERF_MGMT: case IB_MGMT_CLASS_PERF_MGMT:
ret = process_perf(ibdev, port, in_mad, out_mad); ret = process_perf(ibdev, port, in, out);
goto bail; goto bail;
case IB_MGMT_CLASS_CONG_MGMT: case IB_MGMT_CLASS_CONG_MGMT:
@ -2412,7 +2409,7 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
ret = IB_MAD_RESULT_SUCCESS; ret = IB_MAD_RESULT_SUCCESS;
goto bail; goto bail;
} }
ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad); ret = process_cc(ibdev, mad_flags, port, in, out);
goto bail; goto bail;
default: default:

View file

@ -245,9 +245,8 @@ void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp); void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size, const struct ib_mad *in, struct ib_mad *out,
struct ib_mad_hdr *out, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index);
u16 *out_mad_pkey_index);
void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx); void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx); void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);

View file

@ -2123,7 +2123,7 @@ struct ib_flow_action {
atomic_t usecnt; atomic_t usecnt;
}; };
struct ib_mad_hdr; struct ib_mad;
struct ib_grh; struct ib_grh;
enum ib_process_mad_flags { enum ib_process_mad_flags {
@ -2301,9 +2301,8 @@ struct ib_device_ops {
int (*process_mad)(struct ib_device *device, int process_mad_flags, int (*process_mad)(struct ib_device *device, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc, u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in_mad, size_t in_mad_size, const struct ib_mad *in_mad, struct ib_mad *out_mad,
struct ib_mad_hdr *out_mad, size_t *out_mad_size, size_t *out_mad_size, u16 *out_mad_pkey_index);
u16 *out_mad_pkey_index);
int (*query_device)(struct ib_device *device, int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr, struct ib_device_attr *device_attr,
struct ib_udata *udata); struct ib_udata *udata);