RDMA/bnxt_re: Add bnxt_re RoCE driver

This patch introduces the RoCE driver for the Broadcom
NetXtreme-E 10/25/40/50G RoCE HCAs.

The RoCE driver is a two part driver that relies on the parent
bnxt_en NIC driver to operate.  The changes needed in the bnxt_en
driver have already been incorporated via Dave Miller's net tree
into the mainline kernel.

The vendor official git repository for this driver is available
on github as:
https://github.com/Broadcom/linux-rdma-nxt/

Signed-off-by: Eddie Wai <eddie.wai@broadcom.com>
Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Selvin Xavier 2017-02-10 03:19:33 -08:00 committed by Doug Ledford
parent 24dc831b77
commit 1ac5a40479
14 changed files with 13347 additions and 0 deletions

View File

@ -0,0 +1,146 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Slow Path Operators (header)
*
*/
#ifndef __BNXT_RE_H__
#define __BNXT_RE_H__
#define ROCE_DRV_MODULE_NAME "bnxt_re"
#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
#define BNXT_RE_PAGE_SIZE_4K BIT(12)
#define BNXT_RE_PAGE_SIZE_8K BIT(13)
#define BNXT_RE_PAGE_SIZE_64K BIT(16)
#define BNXT_RE_PAGE_SIZE_2M BIT(21)
#define BNXT_RE_PAGE_SIZE_8M BIT(23)
#define BNXT_RE_PAGE_SIZE_1G BIT(30)
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
struct bnxt_re_dev *rdev;
struct net_device *vlan_dev;
};
struct bnxt_re_sqp_entries {
struct bnxt_qplib_sge sge;
u64 wrid;
/* For storing the actual qp1 cqe */
struct bnxt_qplib_cqe cqe;
struct bnxt_re_qp *qp1_qp;
};
#define BNXT_RE_MIN_MSIX 2
#define BNXT_RE_MAX_MSIX 16
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
#define BNXT_RE_FLAG_GOT_MSIX 2
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8
#define BNXT_RE_FLAG_QOS_WORK_REG 16
struct net_device *netdev;
unsigned int version, major, minor;
struct bnxt_en_dev *en_dev;
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
int id;
struct delayed_work worker;
u8 cur_prio_map;
/* FP Notification Queue (CQ & SRQ) */
struct tasklet_struct nq_task;
/* RCFW Channel */
struct bnxt_qplib_rcfw rcfw;
/* NQ */
struct bnxt_qplib_nq nq;
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
struct bnxt_qplib_ctx qplib_ctx;
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
atomic_t qp_count;
struct mutex qp_lock; /* protect qp list */
struct list_head qp_list;
atomic_t cq_count;
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
/* QP for for handling QP1 packets */
u32 sqp_id;
struct bnxt_re_qp *qp1_sqp;
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
};
#define to_bnxt_re_dev(ptr, member) \
container_of((ptr), struct bnxt_re_dev, member)
#define BNXT_RE_ROCE_V1_PACKET 0
#define BNXT_RE_ROCEV2_IPV4_PACKET 2
#define BNXT_RE_ROCEV2_IPV6_PACKET 3
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
if (rdev)
return &rdev->ibdev.dev;
return NULL;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,197 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: IB Verbs interpreter (header)
*/
#ifndef __BNXT_RE_IB_VERBS_H__
#define __BNXT_RE_IB_VERBS_H__
struct bnxt_re_gid_ctx {
u32 idx;
u32 refcnt;
};
struct bnxt_re_pd {
struct bnxt_re_dev *rdev;
struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd;
struct bnxt_qplib_dpi dpi;
};
struct bnxt_re_ah {
struct bnxt_re_dev *rdev;
struct ib_ah ib_ah;
struct bnxt_qplib_ah qplib_ah;
};
struct bnxt_re_qp {
struct list_head list;
struct bnxt_re_dev *rdev;
struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */
struct bnxt_qplib_qp qplib_qp;
struct ib_umem *sumem;
struct ib_umem *rumem;
/* QP1 */
u32 send_psn;
struct ib_ud_header qp1_hdr;
};
struct bnxt_re_cq {
struct bnxt_re_dev *rdev;
spinlock_t cq_lock; /* protect cq */
u16 cq_count;
u16 cq_period;
struct ib_cq ib_cq;
struct bnxt_qplib_cq qplib_cq;
struct bnxt_qplib_cqe *cql;
#define MAX_CQL_PER_POLL 1024
u32 max_cql;
struct ib_umem *umem;
};
struct bnxt_re_mr {
struct bnxt_re_dev *rdev;
struct ib_mr ib_mr;
struct ib_umem *ib_umem;
struct bnxt_qplib_mrw qplib_mr;
u32 npages;
u64 *pages;
struct bnxt_qplib_frpl qplib_frpl;
};
struct bnxt_re_frpl {
struct bnxt_re_dev *rdev;
struct bnxt_qplib_frpl qplib_frpl;
u64 *page_list;
};
struct bnxt_re_fmr {
struct bnxt_re_dev *rdev;
struct ib_fmr ib_fmr;
struct bnxt_qplib_mrw qplib_fmr;
};
struct bnxt_re_mw {
struct bnxt_re_dev *rdev;
struct ib_mw ib_mw;
struct bnxt_qplib_mrw qplib_mw;
};
struct bnxt_re_ucontext {
struct bnxt_re_dev *rdev;
struct ib_ucontext ib_uctx;
struct bnxt_qplib_dpi *dpi;
void *shpg;
spinlock_t sh_lock; /* protect shpg */
};
struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num);
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
int bnxt_re_modify_device(struct ib_device *ibdev,
int device_modify_mask,
struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
unsigned int index, void **context);
int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context);
int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
int index, union ib_gid *gid);
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata);
int bnxt_re_dealloc_pd(struct ib_pd *pd);
struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
struct ib_ah_attr *ah_attr,
struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int bnxt_re_destroy_qp(struct ib_qp *qp);
int bnxt_re_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
struct ib_send_wr **bad_send_wr);
int bnxt_re_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
int bnxt_re_destroy_cq(struct ib_cq *cq);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int bnxt_re_dereg_mr(struct ib_mr *mr);
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
u64 iova);
int bnxt_re_unmap_fmr(struct list_head *fmr_list);
int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata);
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
#endif /* __BNXT_RE_IB_VERBS_H__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,439 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Fast Path Operators (header)
*/
#ifndef __BNXT_QPLIB_FP_H__
#define __BNXT_QPLIB_FP_H__
struct bnxt_qplib_sge {
u64 addr;
u32 lkey;
u32 size;
};
#define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
#define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
#define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
static inline u32 get_sqe_pg(u32 val)
{
return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
}
static inline u32 get_sqe_idx(u32 val)
{
return (val & SQE_MAX_IDX_PER_PG);
}
#define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
#define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
#define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
static inline u32 get_psne_pg(u32 val)
{
return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
}
static inline u32 get_psne_idx(u32 val)
{
return (val & PSNE_MAX_IDX_PER_PG);
}
#define BNXT_QPLIB_QP_MAX_SGL 6
struct bnxt_qplib_swq {
u64 wr_id;
u8 type;
u8 flags;
u32 start_psn;
u32 next_psn;
struct sq_psn_search *psn_search;
};
struct bnxt_qplib_swqe {
/* General */
u64 wr_id;
u8 reqs_type;
u8 type;
#define BNXT_QPLIB_SWQE_TYPE_SEND 0
#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
#define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
#define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
#define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
#define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
#define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
#define BNXT_QPLIB_SWQE_TYPE_RECV 128
#define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
u8 flags;
#define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0)
#define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1)
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
int num_sge;
/* Max inline data is 96 bytes */
u32 inline_len;
#define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96
u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH];
union {
/* Send, with imm, inval key */
struct {
union {
__be32 imm_data;
u32 inv_key;
};
u32 q_key;
u32 dst_qp;
u16 avid;
} send;
/* Send Raw Ethernet and QP1 */
struct {
u16 lflags;
u16 cfa_action;
u32 cfa_meta;
} rawqp1;
/* RDMA write, with imm, read */
struct {
union {
__be32 imm_data;
u32 inv_key;
};
u64 remote_va;
u32 r_key;
} rdma;
/* Atomic cmp/swap, fetch/add */
struct {
u64 remote_va;
u32 r_key;
u64 swap_data;
u64 cmp_data;
} atomic;
/* Local Invalidate */
struct {
u32 inv_l_key;
} local_inv;
/* FR-PMR */
struct {
u8 access_cntl;
u8 pg_sz_log;
bool zero_based;
u32 l_key;
u32 length;
u8 pbl_pg_sz_log;
#define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
#define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
#define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
#define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
#define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
#define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
#define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
#define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
u8 levels;
#define PAGE_SHIFT_4K 12
__le64 *pbl_ptr;
dma_addr_t pbl_dma_ptr;
u64 *page_list;
u16 page_list_len;
u64 va;
} frmr;
/* Bind */
struct {
u8 access_cntl;
#define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0)
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1)
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2)
#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3)
#define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4)
bool zero_based;
u8 mw_type;
u32 parent_l_key;
u32 r_key;
u64 va;
u32 length;
} bind;
};
};
#define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
#define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
#define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
#define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
#define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq;
struct scatterlist *sglist;
u32 nmap;
u32 max_wqe;
u16 max_sge;
u32 psn;
bool flush_in_progress;
};
struct bnxt_qplib_qp {
struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi;
u64 qp_handle;
u32 id;
u8 type;
u8 sig_type;
u32 modify_flags;
u8 state;
u8 cur_qp_state;
u32 max_inline_data;
u32 mtu;
u8 path_mtu;
bool en_sqd_async_notify;
u16 pkey_index;
u32 qkey;
u32 dest_qp_id;
u8 access;
u8 timeout;
u8 retry_cnt;
u8 rnr_retry;
u32 min_rnr_timer;
u32 max_rd_atomic;
u32 max_dest_rd_atomic;
u32 dest_qpn;
u8 smac[6];
u16 vlan_id;
u8 nw_type;
struct bnxt_qplib_ah ah;
#define BTH_PSN_MASK ((1 << 24) - 1)
/* SQ */
struct bnxt_qplib_q sq;
/* RQ */
struct bnxt_qplib_q rq;
/* SRQ */
struct bnxt_qplib_srq *srq;
/* CQ */
struct bnxt_qplib_cq *scq;
struct bnxt_qplib_cq *rcq;
/* IRRQ and ORRQ */
struct bnxt_qplib_hwq irrq;
struct bnxt_qplib_hwq orrq;
/* Header buffer for QP1 */
int sq_hdr_buf_size;
int rq_hdr_buf_size;
/*
* Buffer space for ETH(14), IP or GRH(40), UDP header(8)
* and ib_bth + ib_deth (20).
* Max required is 82 when RoCE V2 is enabled
*/
#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
/* Ethernet header = 14 */
/* ib_grh = 40 (provided by MAD) */
/* ib_bth + ib_deth = 20 */
/* MAD = 256 (provided by MAD) */
/* iCRC = 4 */
#define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
#define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
void *sq_hdr_buf;
dma_addr_t sq_hdr_buf_map;
void *rq_hdr_buf;
dma_addr_t rq_hdr_buf_map;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
#define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
#define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1)
#define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG)
#define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
#define ROCE_CQE_CMP_V 0
#define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
!((raw_cons) & (cp_bit)))
struct bnxt_qplib_cqe {
u8 status;
u8 type;
u8 opcode;
u32 length;
u64 wr_id;
union {
__be32 immdata;
u32 invrkey;
};
u64 qp_handle;
u64 mr_handle;
u16 flags;
u8 smac[6];
u32 src_qp;
u16 raweth_qp1_flags;
u16 raweth_qp1_errors;
u16 raweth_qp1_cfa_code;
u32 raweth_qp1_flags2;
u32 raweth_qp1_metadata;
u8 raweth_qp1_payload_offset;
u16 pkey_index;
};
#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
struct bnxt_qplib_cq {
struct bnxt_qplib_dpi *dpi;
void __iomem *dbr_base;
u32 max_wqe;
u32 id;
u16 count;
u16 period;
struct bnxt_qplib_hwq hwq;
u32 cnq_hw_ring_id;
bool resize_in_progress;
struct scatterlist *sghead;
u32 nmap;
u64 cq_handle;
#define CQ_RESIZE_WAIT_TIME_MS 500
unsigned long flags;
#define CQ_FLAGS_RESIZE_IN_PROG 1
wait_queue_head_t waitq;
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
#define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
#define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2)
#define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1)
#define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1)
#define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1)
#define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base)
#define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE)
#define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1)
#define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
#define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
#define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \
(!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
!((raw_cons) & (cp_bit)))
#define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
#define NQ_CONS_PCI_BAR_REGION 2
#define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
#define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
#define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
#define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \
NQ_DB_IDX_VALID)
#define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \
NQ_DB_IDX_VALID | \
NQ_DB_IRQ_DIS)
#define NQ_DB_REARM(db, raw_cons, cp_bit) \
writel(NQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
#define NQ_DB(db, raw_cons, cp_bit) \
writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
struct bnxt_qplib_nq {
struct pci_dev *pdev;
int vector;
int budget;
bool requested;
struct tasklet_struct worker;
struct bnxt_qplib_hwq hwq;
u16 bar_reg;
u16 bar_reg_off;
u16 ring_id;
void __iomem *bar_reg_iomem;
int (*cqn_handler)
(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq);
int (*srqn_handler)
(struct bnxt_qplib_nq *nq,
void *srq,
u8 event);
};
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
void *srq,
u8 event));
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge);
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge);
u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp,
u32 index);
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe);
void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
#endif /* __BNXT_QPLIB_FP_H__ */

View File

@ -0,0 +1,694 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: RDMA Controller HW interface
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/prefetch.h>
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_rcfw.h"
static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
u16 cbit;
int rc;
cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (!test_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev,
"QPLIB: CMD bit %d for cookie 0x%x is not set?",
cbit, cookie);
rc = wait_event_timeout(rcfw->waitq,
!test_bit(cbit, rcfw->cmdq_bitmap),
msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
if (!rc) {
dev_warn(&rcfw->pdev->dev,
"QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
RCFW_CMD_WAIT_TIME_MS, cookie);
}
return rc;
};
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{
u32 count = -1;
u16 cbit;
cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (!test_bit(cbit, rcfw->cmdq_bitmap))
goto done;
do {
bnxt_qplib_service_creq((unsigned long)rcfw);
} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
done:
return count;
};
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct cmdq_base *req, void **crsbe,
u8 is_block)
{
struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
struct bnxt_qplib_crsqe *crsqe = NULL;
struct bnxt_qplib_crsbe **crsb_ptr;
u32 sw_prod, cmdq_prod;
u8 retry_cnt = 0xFF;
dma_addr_t dma_addr;
unsigned long flags;
u32 size, opcode;
u16 cookie, cbit;
int pg, idx;
u8 *preq;
retry:
opcode = req->opcode;
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW not initialized, reject opcode 0x%x",
opcode);
return NULL;
}
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
return NULL;
}
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
spin_lock_irqsave(&cmdq->lock, flags);
if (req->cmd_size > cmdq->max_elements -
((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
(cmdq->max_elements - 1))) {
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
spin_unlock_irqrestore(&cmdq->lock, flags);
if (!retry_cnt--)
return NULL;
goto retry;
}
retry_cnt = 0xFF;
cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (is_block)
cookie |= RCFW_CMD_IS_BLOCKING;
req->cookie = cpu_to_le16(cookie);
if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW MAX outstanding cmd reached!");
atomic_dec(&rcfw->seq_num);
spin_unlock_irqrestore(&cmdq->lock, flags);
if (!retry_cnt--)
return NULL;
goto retry;
}
/* Reserve a resp buffer slot if requested */
if (req->resp_size && crsbe) {
spin_lock(&crsb->lock);
sw_prod = HWQ_CMP(crsb->prod, crsb);
crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
*crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
[get_crsb_idx(sw_prod)];
bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
req->resp_addr = cpu_to_le64(dma_addr);
crsb->prod++;
spin_unlock(&crsb->lock);
req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
BNXT_QPLIB_CMDQE_UNITS - 1) /
BNXT_QPLIB_CMDQE_UNITS;
}
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
preq = (u8 *)req;
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
do {
pg = 0;
idx = 0;
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
if (!cmdqe) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW request failed with no cmdqe!");
goto done;
}
/* Copy a segment of the req cmd to the cmdq */
memset(cmdqe, 0, sizeof(*cmdqe));
memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
preq += min_t(u32, size, sizeof(*cmdqe));
size -= min_t(u32, size, sizeof(*cmdqe));
cmdq->prod++;
} while (size > 0);
cmdq_prod = cmdq->prod;
if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
/* The very first doorbell write is required to set this flag
* which prompts the FW to reset its internal pointers
*/
cmdq_prod |= FIRMWARE_FIRST_FLAG;
rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
}
sw_prod = HWQ_CMP(crsq->prod, crsq);
crsqe = &crsq->crsq[sw_prod];
memset(crsqe, 0, sizeof(*crsqe));
crsq->prod++;
crsqe->req_size = req->cmd_size;
/* ring CMDQ DB */
writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
rcfw->cmdq_bar_reg_prod_off);
writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
rcfw->cmdq_bar_reg_trig_off);
done:
spin_unlock_irqrestore(&cmdq->lock, flags);
/* Return the CREQ response pointer */
return crsqe ? &crsqe->qp_event : NULL;
}
/* Completions */
static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_func_event *func_event)
{
switch (func_event->event) {
case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
/* SRQ ctx error, call srq_handler??
* But there's no SRQ handle!
*/
break;
case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
break;
case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
break;
default:
return -EINVAL;
}
return 0;
}
static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event)
{
struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
struct bnxt_qplib_crsqe *crsqe;
u16 cbit, cookie, blocked = 0;
unsigned long flags;
u32 sw_cons;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
dev_dbg(&rcfw->pdev->dev,
"QPLIB: Received QP error notification");
break;
default:
/* Command Response */
spin_lock_irqsave(&cmdq->lock, flags);
sw_cons = HWQ_CMP(crsq->cons, crsq);
crsqe = &crsq->crsq[sw_cons];
crsq->cons++;
memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
cookie = le16_to_cpu(crsqe->qp_event.cookie);
blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev,
"QPLIB: CMD bit %d was not requested", cbit);
cmdq->cons += crsqe->req_size;
spin_unlock_irqrestore(&cmdq->lock, flags);
if (!blocked)
wake_up(&rcfw->waitq);
break;
}
return 0;
}
/* SP - CREQ Completion handlers */
static void bnxt_qplib_service_creq(unsigned long data)
{
struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
struct bnxt_qplib_hwq *creq = &rcfw->creq;
struct creq_base *creqe, **creq_ptr;
u32 sw_cons, raw_cons;
unsigned long flags;
u32 type;
/* Service the CREQ until empty */
spin_lock_irqsave(&creq->lock, flags);
raw_cons = creq->cons;
while (1) {
sw_cons = HWQ_CMP(raw_cons, creq);
creq_ptr = (struct creq_base **)creq->pbl_ptr;
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
break;
type = creqe->type & CREQ_BASE_TYPE_MASK;
switch (type) {
case CREQ_BASE_TYPE_QP_EVENT:
if (!bnxt_qplib_process_qp_event
(rcfw, (struct creq_qp_event *)creqe))
rcfw->creq_qp_event_processed++;
else {
dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
dev_warn(&rcfw->pdev->dev,
"QPLIB: type = 0x%x not handled",
type);
}
break;
case CREQ_BASE_TYPE_FUNC_EVENT:
if (!bnxt_qplib_process_func_event
(rcfw, (struct creq_func_event *)creqe))
rcfw->creq_func_event_processed++;
else
dev_warn
(&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
type);
break;
default:
dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
dev_warn(&rcfw->pdev->dev,
"QPLIB: op_event = 0x%x not handled", type);
break;
}
raw_cons++;
}
if (creq->cons != raw_cons) {
creq->cons = raw_cons;
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
creq->max_elements);
}
spin_unlock_irqrestore(&creq->lock, flags);
}
static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_rcfw *rcfw = dev_instance;
struct bnxt_qplib_hwq *creq = &rcfw->creq;
struct creq_base **creq_ptr;
u32 sw_cons;
/* Prefetch the CREQ element */
sw_cons = HWQ_CMP(creq->cons, creq);
creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
tasklet_schedule(&rcfw->worker);
return IRQ_HANDLED;
}
/* RCFW */
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
{
struct creq_deinitialize_fw_resp *resp;
struct cmdq_deinitialize_fw req;
u16 cmd_flags = 0;
RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
resp = (struct creq_deinitialize_fw_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, 0);
if (!resp)
return -EINVAL;
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
return -ETIMEDOUT;
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
return -EFAULT;
clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
return 0;
}
static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
{
return (pbl->pg_size == ROCE_PG_SIZE_4K ?
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
pbl->pg_size == ROCE_PG_SIZE_8K ?
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
pbl->pg_size == ROCE_PG_SIZE_64K ?
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
pbl->pg_size == ROCE_PG_SIZE_2M ?
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
pbl->pg_size == ROCE_PG_SIZE_8M ?
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
pbl->pg_size == ROCE_PG_SIZE_1G ?
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
}
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn)
{
struct creq_initialize_fw_resp *resp;
struct cmdq_initialize_fw req;
u16 cmd_flags = 0, level;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
/*
* VFs need not setup the HW context area, PF
* shall setup this area for VF. Skipping the
* HW programming
*/
if (is_virtfn)
goto skip_ctx_setup;
level = ctx->qpc_tbl.level;
req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
__get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
level = ctx->mrw_tbl.level;
req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
__get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
level = ctx->srqc_tbl.level;
req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
level = ctx->cq_tbl.level;
req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
level = ctx->srqc_tbl.level;
req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
__get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
level = ctx->cq_tbl.level;
req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
__get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
level = ctx->tim_tbl.level;
req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
__get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
level = ctx->tqm_pde_level;
req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
__get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
req.qpc_page_dir =
cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.mrw_page_dir =
cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.srq_page_dir =
cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.cq_page_dir =
cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.tim_page_dir =
cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.tqm_page_dir =
cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
skip_ctx_setup:
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
resp = (struct creq_initialize_fw_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, 0);
if (!resp) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW: INITIALIZE_FW send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW: INITIALIZE_FW timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW: INITIALIZE_FW failed");
return -EINVAL;
}
set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
return 0;
}
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
kfree(rcfw->crsq.crsq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
rcfw->pdev = NULL;
}
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw)
{
rcfw->pdev = pdev;
rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
&rcfw->creq.max_elements,
BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_L2_CMPL)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: HW channel CREQ allocation failed");
goto fail;
}
rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
&rcfw->cmdq.max_elements,
BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_CTX)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: HW channel CMDQ allocation failed");
goto fail;
}
rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
if (!rcfw->crsq.crsq)
goto fail;
rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
&rcfw->crsb.max_elements,
BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_CTX)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: HW channel CRSB allocation failed");
goto fail;
}
return 0;
fail:
bnxt_qplib_free_rcfw_channel(rcfw);
return -ENOMEM;
}
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
unsigned long indx;
/* Make sure the HW channel is stopped! */
synchronize_irq(rcfw->vector);
tasklet_disable(&rcfw->worker);
tasklet_kill(&rcfw->worker);
if (rcfw->requested) {
free_irq(rcfw->vector, rcfw);
rcfw->requested = false;
}
if (rcfw->cmdq_bar_reg_iomem)
iounmap(rcfw->cmdq_bar_reg_iomem);
rcfw->cmdq_bar_reg_iomem = NULL;
if (rcfw->creq_bar_reg_iomem)
iounmap(rcfw->creq_bar_reg_iomem);
rcfw->creq_bar_reg_iomem = NULL;
indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
if (indx != rcfw->bmap_size)
dev_err(&rcfw->pdev->dev,
"QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
kfree(rcfw->cmdq_bitmap);
rcfw->bmap_size = 0;
rcfw->aeq_handler = NULL;
rcfw->vector = 0;
}
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *))
{
resource_size_t res_base;
struct cmdq_init init;
u16 bmap_size;
int rc;
/* General */
atomic_set(&rcfw->seq_num, 0);
rcfw->flags = FIRMWARE_FIRST_FLAG;
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
sizeof(unsigned long));
rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
if (!rcfw->cmdq_bitmap)
return -ENOMEM;
rcfw->bmap_size = bmap_size;
/* CMDQ */
rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
if (!res_base)
return -ENOMEM;
rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
RCFW_COMM_BASE_OFFSET,
RCFW_COMM_SIZE);
if (!rcfw->cmdq_bar_reg_iomem) {
dev_err(&rcfw->pdev->dev,
"QPLIB: CMDQ BAR region %d mapping failed",
rcfw->cmdq_bar_reg);
return -ENOMEM;
}
rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
RCFW_PF_COMM_PROD_OFFSET;
rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
/* CRSQ */
rcfw->crsq.prod = 0;
rcfw->crsq.cons = 0;
/* CREQ */
rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
if (!res_base)
dev_err(&rcfw->pdev->dev,
"QPLIB: CREQ BAR region %d resc start is 0!",
rcfw->creq_bar_reg);
rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
4);
if (!rcfw->creq_bar_reg_iomem) {
dev_err(&rcfw->pdev->dev,
"QPLIB: CREQ BAR region %d mapping failed",
rcfw->creq_bar_reg);
return -ENOMEM;
}
rcfw->creq_qp_event_processed = 0;
rcfw->creq_func_event_processed = 0;
rcfw->vector = msix_vector;
if (aeq_handler)
rcfw->aeq_handler = aeq_handler;
tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
(unsigned long)rcfw);
rcfw->requested = false;
rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
"bnxt_qplib_creq", rcfw);
if (rc) {
dev_err(&rcfw->pdev->dev,
"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
bnxt_qplib_disable_rcfw_channel(rcfw);
return rc;
}
rcfw->requested = true;
init_waitqueue_head(&rcfw->waitq);
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
init.cmdq_size_cmdq_lvl = cpu_to_le16(
((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
CMDQ_INIT_CMDQ_SIZE_MASK) |
((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
CMDQ_INIT_CMDQ_LVL_MASK));
init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
/* Write to the Bono mailbox register */
__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
return 0;
}

View File

@ -0,0 +1,231 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: RDMA Controller HW interface (header)
*/
#ifndef __BNXT_QPLIB_RCFW_H__
#define __BNXT_QPLIB_RCFW_H__
#define RCFW_CMDQ_TRIG_VAL 1
#define RCFW_COMM_PCI_BAR_REGION 0
#define RCFW_COMM_CONS_PCI_BAR_REGION 2
#define RCFW_COMM_BASE_OFFSET 0x600
#define RCFW_PF_COMM_PROD_OFFSET 0xc
#define RCFW_VF_COMM_PROD_OFFSET 0xc
#define RCFW_COMM_TRIG_OFFSET 0x100
#define RCFW_COMM_SIZE 0x104
#define RCFW_DBR_PCI_BAR_REGION 2
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
do { \
memset(&(req), 0, sizeof((req))); \
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
(req).cmd_size = (sizeof((req)) + \
BNXT_QPLIB_CMDQE_UNITS - 1) / \
BNXT_QPLIB_CMDQE_UNITS; \
(req).flags = cpu_to_le16(cmd_flags); \
} while (0)
#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */
/* CMDQ elements */
#define BNXT_QPLIB_CMDQE_MAX_CNT 256
#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
#define BNXT_QPLIB_CMDQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CMDQE_UNITS)
#define MAX_CMDQ_IDX (BNXT_QPLIB_CMDQE_MAX_CNT - 1)
#define MAX_CMDQ_IDX_PER_PG (BNXT_QPLIB_CMDQE_CNT_PER_PG - 1)
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
/* Cmdq contains a fix number of a 16-Byte slots */
struct bnxt_qplib_cmdqe {
u8 data[16];
};
static inline u32 get_cmdq_pg(u32 val)
{
return (val & ~MAX_CMDQ_IDX_PER_PG) / BNXT_QPLIB_CMDQE_CNT_PER_PG;
}
static inline u32 get_cmdq_idx(u32 val)
{
return val & MAX_CMDQ_IDX_PER_PG;
}
/* Crsq buf is 1024-Byte */
struct bnxt_qplib_crsbe {
u8 data[1024];
};
/* CRSQ SB */
#define BNXT_QPLIB_CRSBE_MAX_CNT 4
#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
static inline u32 get_crsb_pg(u32 val)
{
return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
}
static inline u32 get_crsb_idx(u32 val)
{
return val & MAX_CRSB_IDX_PER_PG;
}
static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
u32 prod, dma_addr_t *dma_addr)
{
*dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
*dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
BNXT_QPLIB_CRSBE_UNITS;
}
/* CREQ */
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
#define BNXT_QPLIB_CREQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CREQE_UNITS)
#define MAX_CREQ_IDX (BNXT_QPLIB_CREQE_MAX_CNT - 1)
#define MAX_CREQ_IDX_PER_PG (BNXT_QPLIB_CREQE_CNT_PER_PG - 1)
static inline u32 get_creq_pg(u32 val)
{
return (val & ~MAX_CREQ_IDX_PER_PG) / BNXT_QPLIB_CREQE_CNT_PER_PG;
}
static inline u32 get_creq_idx(u32 val)
{
return val & MAX_CREQ_IDX_PER_PG;
}
#define BNXT_QPLIB_CREQE_PER_PG (PAGE_SIZE / sizeof(struct creq_base))
#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
(!!((hdr)->v & CREQ_BASE_V) == \
!((raw_cons) & (cp_bit)))
#define CREQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
#define CREQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
#define CREQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
#define CREQ_DB_CP_FLAGS_REARM (CREQ_DB_KEY_CP | \
CREQ_DB_IDX_VALID)
#define CREQ_DB_CP_FLAGS (CREQ_DB_KEY_CP | \
CREQ_DB_IDX_VALID | \
CREQ_DB_IRQ_DIS)
#define CREQ_DB_REARM(db, raw_cons, cp_bit) \
writel(CREQ_DB_CP_FLAGS_REARM | ((raw_cons) & ((cp_bit) - 1)), db)
#define CREQ_DB(db, raw_cons, cp_bit) \
writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
/* HWQ */
struct bnxt_qplib_crsqe {
struct creq_qp_event qp_event;
u32 req_size;
};
struct bnxt_qplib_crsq {
struct bnxt_qplib_crsqe *crsq;
u32 prod;
u32 cons;
u32 max_elements;
};
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
int vector;
struct tasklet_struct worker;
bool requested;
unsigned long *cmdq_bitmap;
u32 bmap_size;
unsigned long flags;
#define FIRMWARE_INITIALIZED_FLAG 1
#define FIRMWARE_FIRST_FLAG BIT(31)
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *);
atomic_t seq_num;
/* Bar region info */
void __iomem *cmdq_bar_reg_iomem;
u16 cmdq_bar_reg;
u16 cmdq_bar_reg_prod_off;
u16 cmdq_bar_reg_trig_off;
u16 creq_ring_id;
u16 creq_bar_reg;
void __iomem *creq_bar_reg_iomem;
/* Cmd-Resp and Async Event notification queue */
struct bnxt_qplib_hwq creq;
u64 creq_qp_event_processed;
u64 creq_func_event_processed;
/* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq;
struct bnxt_qplib_crsq crsq;
struct bnxt_qplib_hwq crsb;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
int (*aeq_handler)
(struct bnxt_qplib_rcfw *,
struct creq_func_event *));
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct cmdq_base *req, void **crsbe,
u8 is_block);
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
#endif /* __BNXT_QPLIB_RCFW_H__ */

View File

@ -0,0 +1,825 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: QPLib resource manager
*/
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/inetdevice.h>
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
#include "qplib_rcfw.h"
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats);
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats);
/* PBL */
static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
bool is_umem)
{
int i;
if (!is_umem) {
for (i = 0; i < pbl->pg_count; i++) {
if (pbl->pg_arr[i])
dma_free_coherent(&pdev->dev, pbl->pg_size,
(void *)((unsigned long)
pbl->pg_arr[i] &
PAGE_MASK),
pbl->pg_map_arr[i]);
else
dev_warn(&pdev->dev,
"QPLIB: PBL free pg_arr[%d] empty?!",
i);
pbl->pg_arr[i] = NULL;
}
}
kfree(pbl->pg_arr);
pbl->pg_arr = NULL;
kfree(pbl->pg_map_arr);
pbl->pg_map_arr = NULL;
pbl->pg_count = 0;
pbl->pg_size = 0;
}
static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
struct scatterlist *sghead, u32 pages, u32 pg_size)
{
struct scatterlist *sg;
bool is_umem = false;
int i;
/* page ptr arrays */
pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
if (!pbl->pg_arr)
return -ENOMEM;
pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
if (!pbl->pg_map_arr) {
kfree(pbl->pg_arr);
pbl->pg_arr = NULL;
return -ENOMEM;
}
pbl->pg_count = 0;
pbl->pg_size = pg_size;
if (!sghead) {
for (i = 0; i < pages; i++) {
pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
pbl->pg_size,
&pbl->pg_map_arr[i],
GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
memset(pbl->pg_arr[i], 0, pbl->pg_size);
pbl->pg_count++;
}
} else {
i = 0;
is_umem = true;
for_each_sg(sghead, sg, pages, i) {
pbl->pg_map_arr[i] = sg_dma_address(sg);
pbl->pg_arr[i] = sg_virt(sg);
if (!pbl->pg_arr[i])
goto fail;
pbl->pg_count++;
}
}
return 0;
fail:
__free_pbl(pdev, pbl, is_umem);
return -ENOMEM;
}
/* HWQ */
void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
{
int i;
if (!hwq->max_elements)
return;
if (hwq->level >= PBL_LVL_MAX)
return;
for (i = 0; i < hwq->level + 1; i++) {
if (i == hwq->level)
__free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
else
__free_pbl(pdev, &hwq->pbl[i], false);
}
hwq->level = PBL_LVL_MAX;
hwq->max_elements = 0;
hwq->element_size = 0;
hwq->prod = 0;
hwq->cons = 0;
hwq->cp_bit = 0;
}
/* All HWQs are power of 2 in size */
int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
struct scatterlist *sghead, int nmap,
u32 *elements, u32 element_size, u32 aux,
u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
{
u32 pages, slots, size, aux_pages = 0, aux_size = 0;
dma_addr_t *src_phys_ptr, **dst_virt_ptr;
int i, rc;
hwq->level = PBL_LVL_MAX;
slots = roundup_pow_of_two(*elements);
if (aux) {
aux_size = roundup_pow_of_two(aux);
aux_pages = (slots * aux_size) / pg_size;
if ((slots * aux_size) % pg_size)
aux_pages++;
}
size = roundup_pow_of_two(element_size);
if (!sghead) {
hwq->is_user = false;
pages = (slots * size) / pg_size + aux_pages;
if ((slots * size) % pg_size)
pages++;
if (!pages)
return -EINVAL;
} else {
hwq->is_user = true;
pages = nmap;
}
/* Alloc the 1st memory block; can be a PDL/PTL/PBL */
if (sghead && (pages == MAX_PBL_LVL_0_PGS))
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
pages, pg_size);
else
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
if (rc)
goto fail;
hwq->level = PBL_LVL_0;
if (pages > MAX_PBL_LVL_0_PGS) {
if (pages > MAX_PBL_LVL_1_PGS) {
/* 2 levels of indirection */
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
if (rc)
goto fail;
/* Fill in lvl0 PBL */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] | PTU_PDE_VALID;
hwq->level = PBL_LVL_1;
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
pages, pg_size);
if (rc)
goto fail;
/* Fill in lvl1 PBL */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] | PTU_PTE_VALID;
}
if (hwq_type == HWQ_TYPE_QUEUE) {
/* Find the last pg of the size */
i = hwq->pbl[PBL_LVL_2].pg_count;
dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
PTU_PTE_LAST;
if (i > 1)
dst_virt_ptr[PTR_PG(i - 2)]
[PTR_IDX(i - 2)] |=
PTU_PTE_NEXT_TO_LAST;
}
hwq->level = PBL_LVL_2;
} else {
u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
PTU_PTE_VALID;
/* 1 level of indirection */
rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
pages, pg_size);
if (rc)
goto fail;
/* Fill in lvl0 PBL */
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] | flag;
}
if (hwq_type == HWQ_TYPE_QUEUE) {
/* Find the last pg of the size */
i = hwq->pbl[PBL_LVL_1].pg_count;
dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
PTU_PTE_LAST;
if (i > 1)
dst_virt_ptr[PTR_PG(i - 2)]
[PTR_IDX(i - 2)] |=
PTU_PTE_NEXT_TO_LAST;
}
hwq->level = PBL_LVL_1;
}
}
hwq->pdev = pdev;
spin_lock_init(&hwq->lock);
hwq->prod = 0;
hwq->cons = 0;
*elements = hwq->max_elements = slots;
hwq->element_size = size;
/* For direct access to the elements */
hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
return 0;
fail:
bnxt_qplib_free_hwq(pdev, hwq);
return -ENOMEM;
}
/* Context Tables */
void bnxt_qplib_free_ctx(struct pci_dev *pdev,
struct bnxt_qplib_ctx *ctx)
{
int i;
bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
}
/*
* Routine: bnxt_qplib_alloc_ctx
* Description:
* Context tables are memories which are used by the chip fw.
* The 6 tables defined are:
* QPC ctx - holds QP states
* MRW ctx - holds memory region and window
* SRQ ctx - holds shared RQ states
* CQ ctx - holds completion queue states
* TQM ctx - holds Tx Queue Manager context
* TIM ctx - holds timer context
* Depending on the size of the tbl requested, either a 1 Page Buffer List
* or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
* instead.
* Table might be employed as follows:
* For 0 < ctx size <= 1 PAGE, 0 level of ind is used
* For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
* For 512 < ctx size <= MAX, 2 levels of ind is used
* Returns:
* 0 if success, else -ERRORS
*/
int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
struct bnxt_qplib_ctx *ctx,
bool virt_fn)
{
int i, j, k, rc = 0;
int fnz_idx = -1;
__le64 **pbl_ptr;
if (virt_fn)
goto stats_alloc;
/* QPC Tables */
ctx->qpc_tbl.max_elements = ctx->qpc_count;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
&ctx->qpc_tbl.max_elements,
BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
goto fail;
/* MRW Tables */
ctx->mrw_tbl.max_elements = ctx->mrw_count;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
&ctx->mrw_tbl.max_elements,
BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
goto fail;
/* SRQ Tables */
ctx->srqc_tbl.max_elements = ctx->srqc_count;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
&ctx->srqc_tbl.max_elements,
BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
goto fail;
/* CQ Tables */
ctx->cq_tbl.max_elements = ctx->cq_count;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
&ctx->cq_tbl.max_elements,
BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
goto fail;
/* TQM Buffer */
ctx->tqm_pde.max_elements = 512;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
&ctx->tqm_pde.max_elements, sizeof(u64),
0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
goto fail;
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
if (!ctx->tqm_count[i])
continue;
ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
ctx->tqm_count[i];
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
&ctx->tqm_tbl[i].max_elements, 1,
0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
goto fail;
}
pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
if (!ctx->tqm_tbl[i].max_elements)
continue;
if (fnz_idx == -1)
fnz_idx = i;
switch (ctx->tqm_tbl[i].level) {
case PBL_LVL_2:
for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
k++)
pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
cpu_to_le64(
ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
| PTU_PTE_VALID);
break;
case PBL_LVL_1:
case PBL_LVL_0:
default:
pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
PTU_PTE_VALID);
break;
}
}
if (fnz_idx == -1)
fnz_idx = 0;
ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
/* TIM Buffer */
ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
&ctx->tim_tbl.max_elements, 1,
0, PAGE_SIZE, HWQ_TYPE_CTX);
if (rc)
goto fail;
stats_alloc:
/* Stats */
rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
if (rc)
goto fail;
return 0;
fail:
bnxt_qplib_free_ctx(pdev, ctx);
return rc;
}
/* GUID */
void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
{
u8 mac[ETH_ALEN];
/* MAC-48 to EUI-64 mapping */
memcpy(mac, dev_addr, ETH_ALEN);
guid[0] = mac[0] ^ 2;
guid[1] = mac[1];
guid[2] = mac[2];
guid[3] = 0xff;
guid[4] = 0xfe;
guid[5] = mac[3];
guid[6] = mac[4];
guid[7] = mac[5];
}
static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl)
{
kfree(sgid_tbl->tbl);
kfree(sgid_tbl->hw_id);
kfree(sgid_tbl->ctx);
sgid_tbl->tbl = NULL;
sgid_tbl->hw_id = NULL;
sgid_tbl->ctx = NULL;
sgid_tbl->max = 0;
sgid_tbl->active = 0;
}
static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl,
u16 max)
{
sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
if (!sgid_tbl->tbl)
return -ENOMEM;
sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
if (!sgid_tbl->hw_id)
goto out_free1;
sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
if (!sgid_tbl->ctx)
goto out_free2;
sgid_tbl->max = max;
return 0;
out_free2:
kfree(sgid_tbl->hw_id);
sgid_tbl->hw_id = NULL;
out_free1:
kfree(sgid_tbl->tbl);
sgid_tbl->tbl = NULL;
return -ENOMEM;
};
static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl)
{
int i;
for (i = 0; i < sgid_tbl->max; i++) {
if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero)))
bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
}
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
sgid_tbl->active = 0;
}
static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct net_device *netdev)
{
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
}
static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl)
{
if (!pkey_tbl->tbl)
dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
else
kfree(pkey_tbl->tbl);
pkey_tbl->tbl = NULL;
pkey_tbl->max = 0;
pkey_tbl->active = 0;
}
static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl,
u16 max)
{
pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
if (!pkey_tbl->tbl)
return -ENOMEM;
pkey_tbl->max = max;
return 0;
};
/* PDs */
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
{
u32 bit_num;
bit_num = find_first_bit(pdt->tbl, pdt->max);
if (bit_num == pdt->max)
return -ENOMEM;
/* Found unused PD */
clear_bit(bit_num, pdt->tbl);
pd->id = bit_num;
return 0;
}
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd_tbl *pdt,
struct bnxt_qplib_pd *pd)
{
if (test_and_set_bit(pd->id, pdt->tbl)) {
dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
pd->id);
return -EINVAL;
}
pd->id = 0;
return 0;
}
static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
{
kfree(pdt->tbl);
pdt->tbl = NULL;
pdt->max = 0;
}
static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd_tbl *pdt,
u32 max)
{
u32 bytes;
bytes = max >> 3;
if (!bytes)
bytes = 1;
pdt->tbl = kmalloc(bytes, GFP_KERNEL);
if (!pdt->tbl)
return -ENOMEM;
pdt->max = max;
memset((u8 *)pdt->tbl, 0xFF, bytes);
return 0;
}
/* DPIs */
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
struct bnxt_qplib_dpi *dpi,
void *app)
{
u32 bit_num;
bit_num = find_first_bit(dpit->tbl, dpit->max);
if (bit_num == dpit->max)
return -ENOMEM;
/* Found unused DPI */
clear_bit(bit_num, dpit->tbl);
dpit->app_tbl[bit_num] = app;
dpi->dpi = bit_num;
dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
return 0;
}
int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi_tbl *dpit,
struct bnxt_qplib_dpi *dpi)
{
if (dpi->dpi >= dpit->max) {
dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
return -EINVAL;
}
if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
dpi->dpi);
return -EINVAL;
}
if (dpit->app_tbl)
dpit->app_tbl[dpi->dpi] = NULL;
memset(dpi, 0, sizeof(*dpi));
return 0;
}
static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi_tbl *dpit)
{
kfree(dpit->tbl);
kfree(dpit->app_tbl);
if (dpit->dbr_bar_reg_iomem)
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
memset(dpit, 0, sizeof(*dpit));
}
static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi_tbl *dpit,
u32 dbr_offset)
{
u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
resource_size_t bar_reg_base;
u32 dbr_len, bytes;
if (dpit->dbr_bar_reg_iomem) {
dev_err(&res->pdev->dev,
"QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
return -EALREADY;
}
bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
if (!bar_reg_base) {
dev_err(&res->pdev->dev,
"QPLIB: BAR region %d resc start failed", dbr_bar_reg);
return -ENOMEM;
}
dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
dbr_len);
return -ENOMEM;
}
dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
dbr_len);
if (!dpit->dbr_bar_reg_iomem) {
dev_err(&res->pdev->dev,
"QPLIB: FP: DBR BAR region %d mapping failed",
dbr_bar_reg);
return -ENOMEM;
}
dpit->unmapped_dbr = bar_reg_base + dbr_offset;
dpit->max = dbr_len / PAGE_SIZE;
dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
if (!dpit->app_tbl) {
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
dev_err(&res->pdev->dev,
"QPLIB: DPI app tbl allocation failed");
return -ENOMEM;
}
bytes = dpit->max >> 3;
if (!bytes)
bytes = 1;
dpit->tbl = kmalloc(bytes, GFP_KERNEL);
if (!dpit->tbl) {
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
kfree(dpit->app_tbl);
dpit->app_tbl = NULL;
dev_err(&res->pdev->dev,
"QPLIB: DPI tbl allocation failed for size = %d",
bytes);
return -ENOMEM;
}
memset((u8 *)dpit->tbl, 0xFF, bytes);
return 0;
}
/* PKEYs */
static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
{
memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
pkey_tbl->active = 0;
}
static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl)
{
u16 pkey = 0xFFFF;
memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
/* pkey default = 0xFFFF */
bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
}
/* Stats */
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats)
{
if (stats->dma) {
dma_free_coherent(&pdev->dev, stats->size,
stats->dma, stats->dma_map);
}
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
}
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats)
{
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
stats->size = sizeof(struct ctx_hw_stats);
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL);
if (!stats->dma) {
dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
return -ENOMEM;
}
return 0;
}
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
{
bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
}
int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
{
bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
return 0;
}
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
res->netdev = NULL;
res->pdev = NULL;
}
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
struct net_device *netdev,
struct bnxt_qplib_dev_attr *dev_attr)
{
int rc = 0;
res->pdev = pdev;
res->netdev = netdev;
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)
goto fail;
rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
if (rc)
goto fail;
rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
if (rc)
goto fail;
rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
if (rc)
goto fail;
return 0;
fail:
bnxt_qplib_free_res(res);
return rc;
}

View File

@ -0,0 +1,223 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: QPLib resource manager (header)
*/
#ifndef __BNXT_QPLIB_RES_H__
#define __BNXT_QPLIB_RES_H__
extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
#define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
enum bnxt_qplib_hwq_type {
HWQ_TYPE_CTX,
HWQ_TYPE_QUEUE,
HWQ_TYPE_L2_CMPL
};
#define MAX_PBL_LVL_0_PGS 1
#define MAX_PBL_LVL_1_PGS 512
#define MAX_PBL_LVL_1_PGS_SHIFT 9
#define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
#define MAX_PBL_LVL_2_PGS (256 * 512)
enum bnxt_qplib_pbl_lvl {
PBL_LVL_0,
PBL_LVL_1,
PBL_LVL_2,
PBL_LVL_MAX
};
#define ROCE_PG_SIZE_4K (4 * 1024)
#define ROCE_PG_SIZE_8K (8 * 1024)
#define ROCE_PG_SIZE_64K (64 * 1024)
#define ROCE_PG_SIZE_2M (2 * 1024 * 1024)
#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
struct bnxt_qplib_pbl {
u32 pg_count;
u32 pg_size;
void **pg_arr;
dma_addr_t *pg_map_arr;
};
struct bnxt_qplib_hwq {
struct pci_dev *pdev;
/* lock to protect qplib_hwq */
spinlock_t lock;
struct bnxt_qplib_pbl pbl[PBL_LVL_MAX];
enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */
/* ptr for easy access to the PBL entries */
void **pbl_ptr;
/* ptr for easy access to the dma_addr */
dma_addr_t *pbl_dma_ptr;
u32 max_elements;
u16 element_size; /* Size of each entry */
u32 prod; /* raw */
u32 cons; /* raw */
u8 cp_bit;
u8 is_user;
};
/* Tables */
struct bnxt_qplib_pd_tbl {
unsigned long *tbl;
u32 max;
};
struct bnxt_qplib_sgid_tbl {
struct bnxt_qplib_gid *tbl;
u16 *hw_id;
u16 max;
u16 active;
void *ctx;
};
struct bnxt_qplib_pkey_tbl {
u16 *tbl;
u16 max;
u16 active;
};
struct bnxt_qplib_dpi {
u32 dpi;
void __iomem *dbr;
u64 umdbr;
};
struct bnxt_qplib_dpi_tbl {
void **app_tbl;
unsigned long *tbl;
u16 max;
void __iomem *dbr_bar_reg_iomem;
u64 unmapped_dbr;
};
struct bnxt_qplib_stats {
dma_addr_t dma_map;
void *dma;
u32 size;
u32 fw_id;
};
struct bnxt_qplib_vf_res {
u32 max_qp_per_vf;
u32 max_mrw_per_vf;
u32 max_srq_per_vf;
u32 max_cq_per_vf;
u32 max_gid_per_vf;
};
#define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448
#define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64
#define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64
#define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128
struct bnxt_qplib_ctx {
u32 qpc_count;
struct bnxt_qplib_hwq qpc_tbl;
u32 mrw_count;
struct bnxt_qplib_hwq mrw_tbl;
u32 srqc_count;
struct bnxt_qplib_hwq srqc_tbl;
u32 cq_count;
struct bnxt_qplib_hwq cq_tbl;
struct bnxt_qplib_hwq tim_tbl;
#define MAX_TQM_ALLOC_REQ 32
#define MAX_TQM_ALLOC_BLK_SIZE 8
u8 tqm_count[MAX_TQM_ALLOC_REQ];
struct bnxt_qplib_hwq tqm_pde;
u32 tqm_pde_level;
struct bnxt_qplib_hwq tqm_tbl[MAX_TQM_ALLOC_REQ];
struct bnxt_qplib_stats stats;
struct bnxt_qplib_vf_res vf_res;
};
struct bnxt_qplib_res {
struct pci_dev *pdev;
struct net_device *netdev;
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
};
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
struct bnxt_qplib_pd;
struct bnxt_qplib_dev_attr;
void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq);
int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
struct scatterlist *sl, int nmap, u32 *elements,
u32 elements_per_page, u32 aux, u32 pg_size,
enum bnxt_qplib_hwq_type hwq_type);
void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
struct bnxt_qplib_pd *pd);
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd_tbl *pd_tbl,
struct bnxt_qplib_pd *pd);
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
struct bnxt_qplib_dpi *dpi,
void *app);
int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi_tbl *dpi_tbl,
struct bnxt_qplib_dpi *dpi);
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
struct net_device *netdev,
struct bnxt_qplib_dev_attr *dev_attr);
void bnxt_qplib_free_ctx(struct pci_dev *pdev,
struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
struct bnxt_qplib_ctx *ctx,
bool virt_fn);
#endif /* __BNXT_QPLIB_RES_H__ */

View File

@ -0,0 +1,838 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Slow Path Operators
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_rcfw.h"
#include "qplib_sp.h"
const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 } };
/* Device */
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr)
{
struct cmdq_query_func req;
struct creq_query_func_resp *resp;
struct creq_query_func_resp_sb *sb;
u16 cmd_flags = 0;
u32 temp;
u8 *tqm_alloc;
int i;
RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
resp = (struct creq_query_func_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
0);
if (!resp) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
attr->max_qp_init_rd_atom =
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
attr->max_qp_sges = sb->max_sge;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
attr->max_cq_sges = attr->max_qp_sges;
attr->max_mr = le32_to_cpu(sb->max_mr);
attr->max_mw = le32_to_cpu(sb->max_mw);
attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
attr->max_pd = 64 * 1024;
attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
attr->max_ah = le32_to_cpu(sb->max_ah);
attr->max_fmr = le32_to_cpu(sb->max_fmr);
attr->max_map_per_fmr = sb->max_map_per_fmr;
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
/* Bono only reports 1 PKEY for now, but it can support > 1 */
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
attr->max_sgid = le32_to_cpu(sb->max_gid);
strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver));
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
tqm_alloc = (u8 *)&temp;
attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
return 0;
}
/* SGID */
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid)
{
if (index > sgid_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: Index %d exceeded SGID table max (%d)",
index, sgid_tbl->max);
return -EINVAL;
}
memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
return 0;
}
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, bool update)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
sgid_tbl);
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
int index;
if (!sgid_tbl) {
dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
return -EINVAL;
}
/* Do we need a sgid_lock here? */
if (!sgid_tbl->active) {
dev_err(&res->pdev->dev,
"QPLIB: SGID table has no active entries");
return -ENOMEM;
}
for (index = 0; index < sgid_tbl->max; index++) {
if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
break;
}
if (index == sgid_tbl->max) {
dev_warn(&res->pdev->dev, "GID not found in the SGID table");
return 0;
}
/* Remove GID from the SGID table */
if (update) {
struct cmdq_delete_gid req;
struct creq_delete_gid_resp *resp;
u16 cmd_flags = 0;
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
if (sgid_tbl->hw_id[index] == 0xFFFF) {
dev_err(&res->pdev->dev,
"QPLIB: GID entry contains an invalid HW id");
return -EINVAL;
}
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
resp = (struct creq_delete_gid_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL,
0);
if (!resp) {
dev_err(&res->pdev->dev,
"QPLIB: SP: DELETE_GID send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&res->pdev->dev,
"QPLIB: SP: DELETE_GID timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&res->pdev->dev,
"QPLIB: SP: DELETE_GID failed ");
dev_err(&res->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
}
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
"QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
index, sgid_tbl->hw_id[index], sgid_tbl->active);
sgid_tbl->hw_id[index] = (u16)-1;
/* unlock */
return 0;
}
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *smac, u16 vlan_id,
bool update, u32 *index)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
sgid_tbl);
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
int i, free_idx, rc = 0;
if (!sgid_tbl) {
dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
return -EINVAL;
}
/* Do we need a sgid_lock here? */
if (sgid_tbl->active == sgid_tbl->max) {
dev_err(&res->pdev->dev, "QPLIB: SGID table is full");
return -ENOMEM;
}
free_idx = sgid_tbl->max;
for (i = 0; i < sgid_tbl->max; i++) {
if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
dev_dbg(&res->pdev->dev,
"QPLIB: SGID entry already exist in entry %d!",
i);
*index = i;
return -EALREADY;
} else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero)) &&
free_idx == sgid_tbl->max) {
free_idx = i;
}
}
if (free_idx == sgid_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: SGID table is FULL but count is not MAX??");
return -ENOMEM;
}
if (update) {
struct cmdq_add_gid req;
struct creq_add_gid_resp *resp;
u16 cmd_flags = 0;
u32 temp32[4];
u16 temp16[3];
RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
memcpy(temp32, gid->data, sizeof(struct bnxt_qplib_gid));
req.gid[0] = cpu_to_be32(temp32[3]);
req.gid[1] = cpu_to_be32(temp32[2]);
req.gid[2] = cpu_to_be32(temp32[1]);
req.gid[3] = cpu_to_be32(temp32[0]);
if (vlan_id != 0xFFFF)
req.vlan = cpu_to_le16((vlan_id &
CMDQ_ADD_GID_VLAN_VLAN_ID_MASK) |
CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
CMDQ_ADD_GID_VLAN_VLAN_EN);
/* MAC in network format */
memcpy(temp16, smac, 6);
req.src_mac[0] = cpu_to_be16(temp16[0]);
req.src_mac[1] = cpu_to_be16(temp16[1]);
req.src_mac[2] = cpu_to_be16(temp16[2]);
resp = (struct creq_add_gid_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, 0);
if (!resp) {
dev_err(&res->pdev->dev,
"QPLIB: SP: ADD_GID send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&res->pdev->dev,
"QPIB: SP: ADD_GID timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
dev_err(&res->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
}
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
sgid_tbl->active++;
dev_dbg(&res->pdev->dev,
"QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
*index = free_idx;
/* unlock */
return rc;
}
/* pkeys */
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
u16 *pkey)
{
if (index == 0xFFFF) {
*pkey = 0xFFFF;
return 0;
}
if (index > pkey_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: Index %d exceeded PKEY table max (%d)",
index, pkey_tbl->max);
return -EINVAL;
}
memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey));
return 0;
}
int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
bool update)
{
int i, rc = 0;
if (!pkey_tbl) {
dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
return -EINVAL;
}
/* Do we need a pkey_lock here? */
if (!pkey_tbl->active) {
dev_err(&res->pdev->dev,
"QPLIB: PKEY table has no active entries");
return -ENOMEM;
}
for (i = 0; i < pkey_tbl->max; i++) {
if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
break;
}
if (i == pkey_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: PKEY 0x%04x not found in the pkey table",
*pkey);
return -ENOMEM;
}
memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
pkey_tbl->active--;
/* unlock */
return rc;
}
int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
bool update)
{
int i, free_idx, rc = 0;
if (!pkey_tbl) {
dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
return -EINVAL;
}
/* Do we need a pkey_lock here? */
if (pkey_tbl->active == pkey_tbl->max) {
dev_err(&res->pdev->dev, "QPLIB: PKEY table is full");
return -ENOMEM;
}
free_idx = pkey_tbl->max;
for (i = 0; i < pkey_tbl->max; i++) {
if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
return -EALREADY;
else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max)
free_idx = i;
}
if (free_idx == pkey_tbl->max) {
dev_err(&res->pdev->dev,
"QPLIB: PKEY table is FULL but count is not MAX??");
return -ENOMEM;
}
/* Add PKEY to the pkey_tbl */
memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey));
pkey_tbl->active++;
/* unlock */
return rc;
}
/* AH */
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_ah req;
struct creq_create_ah_resp *resp;
u16 cmd_flags = 0;
u32 temp32[4];
u16 temp16[3];
RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
req.dgid[0] = cpu_to_le32(temp32[0]);
req.dgid[1] = cpu_to_le32(temp32[1]);
req.dgid[2] = cpu_to_le32(temp32[2]);
req.dgid[3] = cpu_to_le32(temp32[3]);
req.type = ah->nw_type;
req.hop_limit = ah->hop_limit;
req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
CMDQ_CREATE_AH_FLOW_LABEL_MASK) |
CMDQ_CREATE_AH_DEST_VLAN_ID_MASK);
req.pd_id = cpu_to_le32(ah->pd->id);
req.traffic_class = ah->traffic_class;
/* MAC in network format */
memcpy(temp16, ah->dmac, 6);
req.dest_mac[0] = cpu_to_le16(temp16[0]);
req.dest_mac[1] = cpu_to_le16(temp16[1]);
req.dest_mac[2] = cpu_to_le16(temp16[2]);
resp = (struct creq_create_ah_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, 1);
if (!resp) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
ah->id = le32_to_cpu(resp->xid);
return 0;
}
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_ah req;
struct creq_destroy_ah_resp *resp;
u16 cmd_flags = 0;
/* Clean up the AH table in the device */
RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
req.ah_cid = cpu_to_le32(ah->id);
resp = (struct creq_destroy_ah_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, 1);
if (!resp) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
return 0;
}
/* MRW */
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_deallocate_key req;
struct creq_deallocate_key_resp *resp;
u16 cmd_flags = 0;
if (mrw->lkey == 0xFFFFFFFF) {
dev_info(&res->pdev->dev,
"QPLIB: SP: Free a reserved lkey MRW");
return 0;
}
RCFW_CMD_PREP(req, DEALLOCATE_KEY, cmd_flags);
req.mrw_flags = mrw->type;
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
req.key = cpu_to_le32(mrw->rkey);
else
req.key = cpu_to_le32(mrw->lkey);
resp = (struct creq_deallocate_key_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, 0);
if (!resp) {
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
dev_err(&res->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
/* Free the qplib's MRW memory */
if (mrw->hwq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
return 0;
}
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_allocate_mrw req;
struct creq_allocate_mrw_resp *resp;
u16 cmd_flags = 0;
unsigned long tmp;
RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
req.pd_id = cpu_to_le32(mrw->pd->id);
req.mrw_flags = mrw->type;
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
mrw->flags & BNXT_QPLIB_FR_PMR) ||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
tmp = (unsigned long)mrw;
req.mrw_handle = cpu_to_le64(tmp);
resp = (struct creq_allocate_mrw_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, 0);
if (!resp) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
(mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
mrw->rkey = le32_to_cpu(resp->xid);
else
mrw->lkey = le32_to_cpu(resp->xid);
return 0;
}
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_deregister_mr req;
struct creq_deregister_mr_resp *resp;
u16 cmd_flags = 0;
int rc;
RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
req.lkey = cpu_to_le32(mrw->lkey);
resp = (struct creq_deregister_mr_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, block);
if (!resp) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
return -EINVAL;
}
if (block)
rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
le16_to_cpu(req.cookie));
else
rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
le16_to_cpu(req.cookie));
if (!rc) {
/* Cmd timed out */
dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
/* Free the qplib's MR memory */
if (mrw->hwq.max_elements) {
mrw->va = 0;
mrw->total_size = 0;
bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
}
return 0;
}
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
u64 *pbl_tbl, int num_pbls, bool block)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_register_mr req;
struct creq_register_mr_resp *resp;
u16 cmd_flags = 0, level;
int pg_ptrs, pages, i, rc;
dma_addr_t **pbl_ptr;
u32 pg_size;
if (num_pbls) {
pg_ptrs = roundup_pow_of_two(num_pbls);
pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
if (!pages)
pages++;
if (pages > MAX_PBL_LVL_1_PGS) {
dev_err(&res->pdev->dev, "QPLIB: SP: Reg MR pages ");
dev_err(&res->pdev->dev,
"requested (0x%x) exceeded max (0x%x)",
pages, MAX_PBL_LVL_1_PGS);
return -ENOMEM;
}
/* Free the hwq if it already exist, must be a rereg */
if (mr->hwq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
mr->hwq.max_elements = pages;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
&mr->hwq.max_elements,
PAGE_SIZE, 0, PAGE_SIZE,
HWQ_TYPE_CTX);
if (rc) {
dev_err(&res->pdev->dev,
"SP: Reg MR memory allocation failed");
return -ENOMEM;
}
/* Write to the hwq */
pbl_ptr = (dma_addr_t **)mr->hwq.pbl_ptr;
for (i = 0; i < num_pbls; i++)
pbl_ptr[PTR_PG(i)][PTR_IDX(i)] =
(pbl_tbl[i] & PAGE_MASK) | PTU_PTE_VALID;
}
RCFW_CMD_PREP(req, REGISTER_MR, cmd_flags);
/* Configure the request */
if (mr->hwq.level == PBL_LVL_MAX) {
level = 0;
req.pbl = 0;
pg_size = PAGE_SIZE;
} else {
level = mr->hwq.level + 1;
req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
pg_size = mr->hwq.pbl[PBL_LVL_0].pg_size;
}
req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
((ilog2(pg_size) <<
CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
req.access = (mr->flags & 0xFFFF);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
req.mr_size = cpu_to_le64(mr->total_size);
resp = (struct creq_register_mr_resp *)
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
NULL, block);
if (!resp) {
dev_err(&res->pdev->dev, "SP: REG_MR send failed");
rc = -EINVAL;
goto fail;
}
if (block)
rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
le16_to_cpu(req.cookie));
else
rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
le16_to_cpu(req.cookie));
if (!rc) {
/* Cmd timed out */
dev_err(&res->pdev->dev, "SP: REG_MR timed out");
rc = -ETIMEDOUT;
goto fail;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
dev_err(&res->pdev->dev,
"QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
rc = -EINVAL;
goto fail;
}
return 0;
fail:
if (mr->hwq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
return rc;
}
int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl,
int max_pg_ptrs)
{
int pg_ptrs, pages, rc;
/* Re-calculate the max to fit the HWQ allocation model */
pg_ptrs = roundup_pow_of_two(max_pg_ptrs);
pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
if (!pages)
pages++;
if (pages > MAX_PBL_LVL_1_PGS)
return -ENOMEM;
frpl->hwq.max_elements = pages;
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &frpl->hwq, NULL, 0,
&frpl->hwq.max_elements, PAGE_SIZE, 0,
PAGE_SIZE, HWQ_TYPE_CTX);
if (!rc)
frpl->max_pg_ptrs = pg_ptrs;
return rc;
}
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl)
{
bnxt_qplib_free_hwq(res->pdev, &frpl->hwq);
return 0;
}
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_map_tc_to_cos req;
struct creq_map_tc_to_cos_resp *resp;
u16 cmd_flags = 0;
int tleft;
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
req.cos0 = cpu_to_le16(cids[0]);
req.cos1 = cpu_to_le16(cids[1]);
resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
if (!resp) {
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
return -EINVAL;
}
tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
if (!tleft) {
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
dev_err(&res->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
return 0;
}

View File

@ -0,0 +1,160 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Slow Path Operators (header)
*
*/
#ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__
struct bnxt_qplib_dev_attr {
char fw_ver[32];
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
#define BNXT_QPLIB_MAX_OUT_RD_ATOM 126
u32 max_qp_rd_atom;
u32 max_qp_init_rd_atom;
u32 max_qp_wqes;
u32 max_qp_sges;
u32 max_cq;
u32 max_cq_wqes;
u32 max_cq_sges;
u32 max_mr;
u64 max_mr_size;
u32 max_pd;
u32 max_mw;
u32 max_raw_ethy_qp;
u32 max_ah;
u32 max_fmr;
u32 max_map_per_fmr;
u32 max_srq;
u32 max_srq_wqes;
u32 max_srq_sges;
u32 max_pkey;
u32 max_inline_data;
u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
};
struct bnxt_qplib_pd {
u32 id;
};
struct bnxt_qplib_gid {
u8 data[16];
};
struct bnxt_qplib_ah {
struct bnxt_qplib_gid dgid;
struct bnxt_qplib_pd *pd;
u32 id;
u8 sgid_index;
/* For Query AH if the hw table and SW table are differnt */
u8 host_sgid_index;
u8 traffic_class;
u32 flow_label;
u8 hop_limit;
u8 sl;
u8 dmac[6];
u16 vlan_id;
u8 nw_type;
};
struct bnxt_qplib_mrw {
struct bnxt_qplib_pd *pd;
int type;
u32 flags;
#define BNXT_QPLIB_FR_PMR 0x80000000
u32 lkey;
u32 rkey;
#define BNXT_QPLIB_RSVD_LKEY 0xFFFFFFFF
u64 va;
u64 total_size;
u32 npages;
u64 mr_handle;
struct bnxt_qplib_hwq hwq;
};
struct bnxt_qplib_frpl {
int max_pg_ptrs;
struct bnxt_qplib_hwq hwq;
};
#define BNXT_QPLIB_ACCESS_LOCAL_WRITE BIT(0)
#define BNXT_QPLIB_ACCESS_REMOTE_READ BIT(1)
#define BNXT_QPLIB_ACCESS_REMOTE_WRITE BIT(2)
#define BNXT_QPLIB_ACCESS_REMOTE_ATOMIC BIT(3)
#define BNXT_QPLIB_ACCESS_MW_BIND BIT(4)
#define BNXT_QPLIB_ACCESS_ZERO_BASED BIT(5)
#define BNXT_QPLIB_ACCESS_ON_DEMAND BIT(6)
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
bool update, u32 *index);
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
u16 *pkey);
int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
bool update);
int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
bool update);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr);
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mrw);
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
u64 *pbl_tbl, int num_pbls, bool block);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl, int max);
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl);
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
#endif /* __BNXT_QPLIB_SP_H__*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,89 @@
/*
* Broadcom NetXtreme-E RoCE driver.
*
* Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Uverbs ABI header file
*/
#ifndef __BNXT_RE_UVERBS_ABI_H__
#define __BNXT_RE_UVERBS_ABI_H__
#define BNXT_RE_ABI_VERSION 1
struct bnxt_re_uctx_resp {
__u32 dev_id;
__u32 max_qp;
__u32 pg_size;
__u32 cqe_sz;
__u32 max_cqd;
__u32 rsvd;
};
struct bnxt_re_pd_resp {
__u32 pdid;
__u32 dpi;
__u64 dbr;
};
struct bnxt_re_cq_req {
__u64 cq_va;
__u64 cq_handle;
};
struct bnxt_re_cq_resp {
__u32 cqid;
__u32 tail;
__u32 phase;
__u32 rsvd;
};
struct bnxt_re_qp_req {
__u64 qpsva;
__u64 qprva;
__u64 qp_handle;
};
struct bnxt_re_qp_resp {
__u32 qpid;
__u32 rsvd;
};
enum bnxt_re_shpg_offt {
BNXT_RE_BEG_RESV_OFFT = 0x00,
BNXT_RE_AVID_OFFT = 0x10,
BNXT_RE_AVID_SIZE = 0x04,
BNXT_RE_END_RESV_OFFT = 0xFF0
};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/