qed: Add module with basic common support

The Qlogic Everest Driver is the backend module for the QL4xxx ethernet
products by Qlogic.

This module serves two main purposes:
 1. It's responsible to contain all the common code that will be shared
    between the various drivers that would be used with said line of
    products. Flows such as chip initialization and de-initialization
    fall under this category.

 2. It would abstract the protocol-specific HW & FW components, allowing
    the protocol drivers to have a clean APIs which is detached in its
    slowpath configuration from the actual HSI.

This adds a very basic module without any protocol-specific bits.
I.e., this adds a basic implementation that almost entirely falls under
the first category.

Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: Ariel Elior <Ariel.Elior@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Yuval Mintz 2015-10-26 11:02:25 +02:00 committed by David S. Miller
parent 8941faa161
commit fe56b9e6a8
27 changed files with 16664 additions and 0 deletions

View File

@ -8540,6 +8540,16 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
QLOGIC QL4xxx ETHERNET DRIVER
M: Yuval Mintz <Yuval.Mintz@qlogic.com>
M: Ariel Elior <Ariel.Elior@qlogic.com>
M: everest-linux-l2@qlogic.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qed/
F: include/linux/qed/
F: drivers/net/ethernet/qlogic/qede/
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/

View File

@ -91,4 +91,10 @@ config NETXEN_NIC
---help---
This enables the support for NetXen's Gigabit Ethernet card.
config QED
tristate "QLogic QED 25/40/100Gb core driver"
depends on PCI
---help---
This enables the support for ...
endif # NET_VENDOR_QLOGIC

View File

@ -6,3 +6,4 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NETXEN_NIC) += netxen/
obj-$(CONFIG_QED) += qed/

View File

@ -0,0 +1,4 @@
obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o

View File

@ -0,0 +1,448 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_H
#define _QED_H
#include <linux/types.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/zlib.h>
#include <linux/hashtable.h>
#include <linux/qed/qed_if.h>
#include "qed_hsi.h"
#define DRV_MODULE_VERSION "8.4.0.0"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
#define VER_SIZE 16
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
QED_COAL_MODE_ENABLE
};
struct qed_eth_cb_ops;
struct qed_dev_info;
/* helpers */
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
return db_addr;
}
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
~((1 << (p_hwfn->cdev->cache_shift)) - 1))
#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
#define D_TRINE(val, cond1, cond2, true1, true2, def) \
(val == (cond1) ? true1 : \
(val == (cond2) ? true2 : def))
/* forward */
struct qed_ptt_pool;
struct qed_spq;
struct qed_sb_info;
struct qed_sb_attn_info;
struct qed_cxt_mngr;
struct qed_sb_sp_info;
struct qed_mcp_info;
struct qed_rt_data {
u32 init_val;
bool b_valid;
};
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol
*/
enum qed_pci_personality {
QED_PCI_ETH,
QED_PCI_DEFAULT /* default in shmem */
};
/* All VFs are symmetric, all counters are PF + all VFs */
struct qed_qm_iids {
u32 cids;
u32 vf_cids;
u32 tids;
};
enum QED_RESOURCES {
QED_SB,
QED_VPORT,
QED_PQ,
QED_RL,
QED_ILT,
QED_MAX_RESC,
};
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
/* Resource Allocation scheme results */
u32 resc_start[QED_MAX_RESC];
u32 resc_num[QED_MAX_RESC];
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
u8 num_tc;
u8 offload_tc;
u8 non_offload_tc;
u32 concrete_fid;
u16 opaque_fid;
u16 ovlan;
u32 part_num[4];
u32 vendor_id;
u32 device_id;
unsigned char hw_mac_addr[ETH_ALEN];
struct qed_igu_info *p_igu_info;
u32 port_mode;
u32 hw_mode;
};
struct qed_hw_cid_data {
u32 cid;
bool b_cid_allocated;
/* Additional identifiers */
u16 opaque_fid;
u8 vport_id;
};
/* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE 0x2000
struct qed_dmae_info {
/* Mutex for synchronizing access to functions */
struct mutex mutex;
u8 channel;
dma_addr_t completion_word_phys_addr;
/* The memory location where the DMAE writes the completion
* value when an operation is finished on this context.
*/
u32 *p_completion_word;
dma_addr_t intermediate_buffer_phys_addr;
/* An intermediate buffer for DMAE operations that use virtual
* addresses - data is DMA'd to/from this buffer and then
* memcpy'd to/from the virtual address
*/
u32 *p_intermediate_buffer;
dma_addr_t dmae_cmd_phys_addr;
struct dmae_cmd *p_dmae_cmd;
};
struct qed_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
u8 pure_lb_pq;
u8 offload_pq;
u8 pure_ack_pq;
u8 vf_queues_offset;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
bool vport_wfq_en;
u8 pf_wfq;
u32 pf_rl;
};
struct qed_fw_data {
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
u32 init_ops_size;
};
struct qed_simd_fp_handler {
void *token;
void (*func)(void *);
};
struct qed_hwfn {
struct qed_dev *cdev;
u8 my_id; /* ID inside the PF */
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
u8 port_id;
bool b_active;
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
bool first_on_engine;
bool hw_init_done;
/* BAR access */
void __iomem *regview;
void __iomem *doorbells;
u64 db_phys_addr;
unsigned long db_size;
/* PTT pool */
struct qed_ptt_pool *p_ptt_pool;
/* HW info */
struct qed_hw_info hw_info;
/* rt_array (for init-tool) */
struct qed_rt_data *rt_data;
/* SPQ */
struct qed_spq *p_spq;
/* EQ */
struct qed_eq *p_eq;
/* Consolidate Q*/
struct qed_consq *p_consq;
/* Slow-Path definitions */
struct tasklet_struct *sp_dpc;
bool b_sp_dpc_enabled;
struct qed_ptt *p_main_ptt;
struct qed_ptt *p_dpc_ptt;
struct qed_sb_sp_info *p_sp_sb;
struct qed_sb_attn_info *p_sb_attn;
/* Protocol related */
struct qed_pf_params pf_params;
/* Array of sb_info of all status blocks */
struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
u16 num_sbs;
struct qed_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/
bool b_int_enabled;
struct qed_mcp_info *mcp_info;
struct qed_dmae_info dmae_info;
/* QM init */
struct qed_qm_info qm_info;
/* Buffer for unzipping firmware data */
void *unzip_buf;
struct qed_simd_fp_handler simd_proto_handler[64];
struct z_stream_s *stream;
};
struct pci_params {
int pm_cap;
unsigned long mem_start;
unsigned long mem_end;
unsigned int irq;
u8 pf_num;
};
struct qed_int_param {
u32 int_mode;
u8 num_vectors;
u8 min_msix_cnt; /* for minimal functionality */
};
struct qed_int_params {
struct qed_int_param in;
struct qed_int_param out;
struct msix_entry *msix_table;
bool fp_initialized;
u8 fp_msix_base;
u8 fp_msix_cnt;
};
struct qed_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
u8 type;
#define QED_DEV_TYPE_BB_A0 (0 << 0)
#define QED_DEV_TYPE_MASK (0x3)
#define QED_DEV_TYPE_SHIFT (0)
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 16
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 4
u16 chip_bond_id;
#define CHIP_BOND_ID_MASK 0xf
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
u8 num_ports_in_engines;
u8 num_funcs_in_port;
u8 path_id;
enum mf_mode mf_mode;
#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
int pcie_width;
int pcie_speed;
u8 ver_str[VER_SIZE];
/* Add MF related configuration */
u8 mcp_rev;
u8 boot_mode;
u8 wol;
u32 int_mode;
enum qed_coalescing_mode int_coalescing_mode;
u8 rx_coalesce_usecs;
u8 tx_coalesce_usecs;
/* Start Bar offset of first hwfn */
void __iomem *regview;
void __iomem *doorbells;
u64 db_phys_addr;
unsigned long db_size;
/* PCI */
u8 cache_shift;
/* Init */
const struct iro *iro_arr;
#define IRO (p_hwfn->cdev->iro_arr)
/* HW functions */
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
u32 drv_type;
struct qed_eth_stats *reset_stats;
struct qed_fw_data *fw_data;
u32 mcp_nvm_resp;
/* Linux specific here */
struct qede_dev *edev;
struct pci_dev *pdev;
int msg_enable;
struct pci_params pci_params;
struct qed_int_params int_params;
u8 protocol;
#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
const struct firmware *firmware;
};
#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
QED_DEV_TYPE_SHIFT)
#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
* the concrete value.
*
* @param concrete_fid
*
* @return inline u8
*/
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
{
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
return pfid;
}
#define PURE_LB_TC 8
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)
#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
(cdev->regview) + \
(offset))
#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
#define DOORBELL(cdev, db_addr, val) \
writel((u32)val, (void __iomem *)((u8 __iomem *)\
(cdev->doorbells) + (db_addr)))
/* Prototypes */
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
#define QED_ETH_INTERFACE_VERSION 300
#endif /* _QED_H */

View File

@ -0,0 +1,847 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
/* Doorbell-Queue constants */
#define DQ_RANGE_SHIFT 4
#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
/* ILT constants */
#define ILT_DEFAULT_HW_P_SIZE 3
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
/* ILT entry structure */
#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
#define ILT_ENTRY_PHY_ADDR_SHIFT 0
#define ILT_ENTRY_VALID_MASK 0x1ULL
#define ILT_ENTRY_VALID_SHIFT 52
#define ILT_ENTRY_IN_REGS 2
#define ILT_REG_SIZE_IN_BYTES 4
/* connection context union */
union conn_context {
struct core_conn_context core_ctx;
struct eth_conn_context eth_ctx;
};
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
/* PF per protocl configuration object */
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define CDUC_BLK (0)
enum ilt_clients {
ILT_CLI_CDUC,
ILT_CLI_QM,
ILT_CLI_MAX
};
struct ilt_cfg_pair {
u32 reg;
u32 val;
};
struct qed_ilt_cli_blk {
u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
};
struct qed_ilt_client_cfg {
bool active;
/* ILT boundaries */
struct ilt_cfg_pair first;
struct ilt_cfg_pair last;
struct ilt_cfg_pair p_size;
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
};
/* Per Path -
* ILT shadow table
* Protocol acquired CID lists
* PF start line in ILT
*/
struct qed_dma_mem {
dma_addr_t p_phys;
void *p_virt;
size_t size;
};
struct qed_cid_acquired_map {
u32 start_cid;
u32 max_count;
unsigned long *cid_map;
};
struct qed_cxt_mngr {
/* Per protocl configuration */
struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
/* ILT shadow table */
struct qed_dma_mem *ilt_shadow;
u32 pf_start_line;
};
static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
{
u32 type, pf_cids = 0;
for (type = 0; type < MAX_CONN_TYPES; type++)
pf_cids += p_mngr->conn_cfg[type].cid_count;
return pf_cids;
}
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
int type;
for (type = 0; type < MAX_CONN_TYPES; type++)
iids->cids += p_mngr->conn_cfg[type].cid_count;
DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
}
/* set the iids count per protocol */
static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 cid_count)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
}
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
u32 start_line, u32 total_size,
u32 elem_size)
{
u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
/* verify thatits called only once for each block */
if (p_blk->total_size)
return;
p_blk->total_size = total_size;
p_blk->real_size_in_page = 0;
if (elem_size)
p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
p_blk->start_line = start_line;
}
static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
u32 *p_line, enum ilt_clients client_id)
{
if (!p_blk->total_size)
return;
if (!p_cli->active)
p_cli->first.val = *p_line;
p_cli->active = true;
*p_line += DIV_ROUND_UP(p_blk->total_size,
p_blk->real_size_in_page);
p_cli->last.val = *p_line - 1;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
client_id, p_cli->first.val,
p_cli->last.val, p_blk->total_size,
p_blk->real_size_in_page, p_blk->start_line);
}
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
u32 curr_line, total, pf_cids;
struct qed_qm_iids qm_iids;
memset(&qm_iids, 0, sizeof(qm_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"hwfn [%d] - Set context manager starting line to be 0x%08x\n",
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* CDUC */
p_cli = &p_mngr->clients[ILT_CLI_CDUC];
curr_line = p_mngr->pf_start_line;
p_cli->pf_total_lines = 0;
/* get the counters for the CDUC and QM clients */
pf_cids = qed_cxt_cdu_iids(p_mngr);
p_blk = &p_cli->pf_blks[CDUC_BLK];
total = pf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn));
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
/* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
p_hwfn->qm_info.num_pqs, 0);
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total * 0x1000,
QM_PQ_ELEMENT_SIZE);
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
RESC_NUM(p_hwfn, QED_ILT)) {
DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
return -EINVAL;
}
return 0;
}
#define for_each_ilt_valid_client(pos, clients) \
for (pos = 0; pos < ILT_CLI_MAX; pos++)
/* Total number of ILT lines used by this PF */
static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
{
u32 size = 0;
u32 i;
for_each_ilt_valid_client(i, ilt_clients) {
if (!ilt_clients[i].active)
continue;
size += (ilt_clients[i].last.val -
ilt_clients[i].first.val + 1);
}
return size;
}
static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 ilt_size, i;
ilt_size = qed_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
if (p_dma->p_virt)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_dma->size, p_dma->p_virt,
p_dma->p_phys);
p_dma->p_virt = NULL;
}
kfree(p_mngr->ilt_shadow);
}
static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
struct qed_ilt_cli_blk *p_blk,
enum ilt_clients ilt_client,
u32 start_line_offset)
{
struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
u32 lines, line, sz_left;
if (!p_blk->total_size)
return 0;
sz_left = p_blk->total_size;
lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
line = p_blk->start_line + start_line_offset -
p_hwfn->p_cxt_mngr->pf_start_line;
for (; lines; lines--) {
dma_addr_t p_phys;
void *p_virt;
u32 size;
size = min_t(u32, sz_left,
p_blk->real_size_in_page);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
size,
&p_phys,
GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
memset(p_virt, 0, size);
ilt_shadow[line].p_phys = p_phys;
ilt_shadow[line].p_virt = p_virt;
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
line, (u64)p_phys, p_virt, size);
sz_left -= size;
line++;
}
return 0;
}
static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *clients = p_mngr->clients;
struct qed_ilt_cli_blk *p_blk;
u32 size, i, j;
int rc;
size = qed_cxt_ilt_shadow_size(clients);
p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
GFP_KERNEL);
if (!p_mngr->ilt_shadow) {
DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
rc = -ENOMEM;
goto ilt_shadow_fail;
}
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Allocated 0x%x bytes for ilt shadow\n",
(u32)(size * sizeof(struct qed_dma_mem)));
for_each_ilt_valid_client(i, clients) {
if (!clients[i].active)
continue;
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
if (rc != 0)
goto ilt_shadow_fail;
}
}
return 0;
ilt_shadow_fail:
qed_ilt_shadow_free(p_hwfn);
return rc;
}
static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 type;
for (type = 0; type < MAX_CONN_TYPES; type++) {
kfree(p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
}
}
static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 start_cid = 0;
u32 type;
for (type = 0; type < MAX_CONN_TYPES; type++) {
u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
u32 size;
if (cid_cnt == 0)
continue;
size = DIV_ROUND_UP(cid_cnt,
sizeof(unsigned long) * BITS_PER_BYTE) *
sizeof(unsigned long);
p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
if (!p_mngr->acquired[type].cid_map)
goto cid_map_fail;
p_mngr->acquired[type].max_count = cid_cnt;
p_mngr->acquired[type].start_cid = start_cid;
p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
DP_VERBOSE(p_hwfn, QED_MSG_CXT,
"Type %08x start: %08x count %08x\n",
type, p_mngr->acquired[type].start_cid,
p_mngr->acquired[type].max_count);
start_cid += cid_cnt;
}
return 0;
cid_map_fail:
qed_cid_map_free(p_hwfn);
return -ENOMEM;
}
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr;
u32 i;
p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
if (!p_mngr) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
return -ENOMEM;
}
/* Initialize ILT client registers */
p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
/* default ILT page size for all clients is 32K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
return 0;
}
int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
{
int rc;
/* Allocate the ILT shadow table */
rc = qed_ilt_shadow_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
goto tables_alloc_fail;
}
/* Allocate and initialize the acquired cids bitmaps */
rc = qed_cid_map_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
goto tables_alloc_fail;
}
return 0;
tables_alloc_fail:
qed_cxt_mngr_free(p_hwfn);
return rc;
}
void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
{
if (!p_hwfn->p_cxt_mngr)
return;
qed_cid_map_free(p_hwfn);
qed_ilt_shadow_free(p_hwfn);
kfree(p_hwfn->p_cxt_mngr);
p_hwfn->p_cxt_mngr = NULL;
}
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
int type;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
if (cid_cnt == 0)
continue;
memset(p_mngr->acquired[type].cid_map, 0,
DIV_ROUND_UP(cid_cnt,
sizeof(unsigned long) * BITS_PER_BYTE) *
sizeof(unsigned long));
}
}
/* CDU Common */
#define CDUC_CXT_SIZE_SHIFT \
CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
#define CDUC_CXT_SIZE_MASK \
(CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
#define CDUC_BLOCK_WASTE_SHIFT \
CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
#define CDUC_BLOCK_WASTE_MASK \
(CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
#define CDUC_NCIB_SHIFT \
CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
#define CDUC_NCIB_MASK \
(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
{
u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
/* CDUC - connection configuration */
page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
cxt_size = CONN_CXT_SIZE(p_hwfn);
elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
}
void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_iids iids;
memset(&iids, 0, sizeof(iids));
qed_cxt_qm_iids(p_hwfn, &iids);
memset(&params, 0, sizeof(params));
params.port_id = p_hwfn->port_id;
params.pf_id = p_hwfn->rel_pf_id;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine;
params.num_pf_cids = iids.cids;
params.start_pq = qm_info->start_pq;
params.num_pf_pqs = qm_info->num_pqs;
params.start_vport = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
}
/* CM PF */
static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
union qed_qm_pq_params pq_params;
u16 pq;
/* XCM pure-LB queue */
memset(&pq_params, 0, sizeof(pq_params));
pq_params.core.tc = LB_TC;
pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
return 0;
}
/* DQ PF */
static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 dq_pf_max_cid = 0;
dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
/* 5 - PF */
dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
}
static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *ilt_clients;
int i;
ilt_clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, ilt_clients) {
if (!ilt_clients[i].active)
continue;
STORE_RT_REG(p_hwfn,
ilt_clients[i].first.reg,
ilt_clients[i].first.val);
STORE_RT_REG(p_hwfn,
ilt_clients[i].last.reg,
ilt_clients[i].last.val);
STORE_RT_REG(p_hwfn,
ilt_clients[i].p_size.reg,
ilt_clients[i].p_size.val);
}
}
/* ILT (PSWRQ2) PF */
static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *clients;
struct qed_cxt_mngr *p_mngr;
struct qed_dma_mem *p_shdw;
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
p_mngr = p_hwfn->p_cxt_mngr;
p_shdw = p_mngr->ilt_shadow;
clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, clients) {
if (!clients[i].active)
continue;
/** Client's 1st val and RT array are absolute, ILT shadows'
* lines are relative.
*/
line = clients[i].first.val - p_mngr->pf_start_line;
rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
clients[i].first.val * ILT_ENTRY_IN_REGS;
for (; line <= clients[i].last.val - p_mngr->pf_start_line;
line++, rt_offst += ILT_ENTRY_IN_REGS) {
u64 ilt_hw_entry = 0;
/** p_virt could be NULL incase of dynamic
* allocation
*/
if (p_shdw[line].p_virt) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
(p_shdw[line].p_phys >> 12));
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
rt_offst, line, i,
(u64)(p_shdw[line].p_phys >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
}
}
}
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
{
qed_cdu_init_common(p_hwfn);
}
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
{
qed_qm_init_pf(p_hwfn);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
qed_ilt_init_pf(p_hwfn);
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 *p_cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 rel_cid;
if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
return -EINVAL;
}
rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
p_mngr->acquired[type].max_count);
if (rel_cid >= p_mngr->acquired[type].max_count) {
DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
type);
return -EINVAL;
}
__set_bit(rel_cid, p_mngr->acquired[type].cid_map);
*p_cid = rel_cid + p_mngr->acquired[type].start_cid;
return 0;
}
static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
u32 cid,
enum protocol_type *p_type)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_cid_acquired_map *p_map;
enum protocol_type p;
u32 rel_cid;
/* Iterate over protocols and find matching cid range */
for (p = 0; p < MAX_CONN_TYPES; p++) {
p_map = &p_mngr->acquired[p];
if (!p_map->cid_map)
continue;
if (cid >= p_map->start_cid &&
cid < p_map->start_cid + p_map->max_count)
break;
}
*p_type = p;
if (p == MAX_CONN_TYPES) {
DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
return false;
}
rel_cid = cid - p_map->start_cid;
if (!test_bit(rel_cid, p_map->cid_map)) {
DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
return false;
}
return true;
}
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
enum protocol_type type;
bool b_acquired;
u32 rel_cid;
/* Test acquired and find matching per-protocol map */
b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
if (!b_acquired)
return;
rel_cid = cid - p_mngr->acquired[type].start_cid;
__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
}
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
enum protocol_type type;
bool b_acquired;
/* Test acquired and find matching per-protocol map */
b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
if (!b_acquired)
return -EINVAL;
/* set the protocl type */
p_info->type = type;
/* compute context virtual pointer */
hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
line = p_info->iid / cxts_per_p;
/* Make sure context is allocated (dynamic allocation) */
if (!p_mngr->ilt_shadow[line].p_virt)
return -EINVAL;
p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
p_info->iid % cxts_per_p * conn_cxt_size;
DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
"Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
return 0;
}
int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
{
struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons);
return 0;
}

View File

@ -0,0 +1,139 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_CXT_H
#define _QED_CXT_H
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/qed/qed_if.h>
#include "qed_hsi.h"
#include "qed.h"
struct qed_cxt_info {
void *p_cxt;
u32 iid;
enum protocol_type type;
};
/**
* @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
*
* @param p_hwfn
* @param type
* @param p_cid
*
* @return int
*/
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 *p_cid);
/**
* @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
*
*
* @param p_hwfn
* @param p_info in/out
*
* @return int
*/
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info);
enum qed_cxt_elem_type {
QED_ELEM_CXT,
QED_ELEM_TASK
};
/**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*
* @param p_hwfn
*
* @return int
*/
int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
*
* @param p_hwfn
*
* @return int
*/
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
*
* @param p_hwfn
*
* @return int
*/
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_mngr_free
*
* @param p_hwfn
*/
void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
*
* @param p_hwfn
*
* @return int
*/
int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_mngr_setup - Reset the acquired CIDs
*
* @param p_hwfn
*/
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
*
*
*
* @param p_hwfn
*/
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
*
*
* @param p_hwfn
*/
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
/**
* @brief qed_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
*/
void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_release - Release a cid
*
* @param p_hwfn
* @param cid
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,222 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_DEV_API_H
#define _QED_DEV_API_H
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
#include "qed_int.h"
/**
* @brief qed_init_dp - initialize the debug level
*
* @param cdev
* @param dp_module
* @param dp_level
*/
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module,
u8 dp_level);
/**
* @brief qed_init_struct - initialize the device structure to
* its defaults
*
* @param cdev
*/
void qed_init_struct(struct qed_dev *cdev);
/**
* @brief qed_resc_free -
*
* @param cdev
*/
void qed_resc_free(struct qed_dev *cdev);
/**
* @brief qed_resc_alloc -
*
* @param cdev
*
* @return int
*/
int qed_resc_alloc(struct qed_dev *cdev);
/**
* @brief qed_resc_setup -
*
* @param cdev
*/
void qed_resc_setup(struct qed_dev *cdev);
/**
* @brief qed_hw_init -
*
* @param cdev
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
* @param bin_fw_data - binary fw data pointer in binary fw file.
* Pass NULL if not using binary fw file.
*
* @return int
*/
int qed_hw_init(struct qed_dev *cdev,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
const u8 *bin_fw_data);
/**
* @brief qed_hw_stop -
*
* @param cdev
*
* @return int
*/
int qed_hw_stop(struct qed_dev *cdev);
/**
* @brief qed_hw_reset -
*
* @param cdev
*
* @return int
*/
int qed_hw_reset(struct qed_dev *cdev);
/**
* @brief qed_hw_prepare -
*
* @param cdev
* @param personality - personality to initialize
*
* @return int
*/
int qed_hw_prepare(struct qed_dev *cdev,
int personality);
/**
* @brief qed_hw_remove -
*
* @param cdev
*/
void qed_hw_remove(struct qed_dev *cdev);
/**
* @brief qed_ptt_acquire - Allocate a PTT window
*
* Should be called at the entry point to the driver (at the beginning of an
* exported function)
*
* @param p_hwfn
*
* @return struct qed_ptt
*/
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ptt_release - Release PTT Window
*
* Should be called at the end of a flow - at the end of the function that
* acquired the PTT.
*
*
* @param p_hwfn
* @param p_ptt
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
enum qed_dmae_address_type_t {
QED_DMAE_ADDRESS_HOST_VIRT,
QED_DMAE_ADDRESS_HOST_PHYS,
QED_DMAE_ADDRESS_GRC
};
/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
* source is a block of length DMAE_MAX_RW_SIZE and the
* destination is larger, the source block will be duplicated as
* many times as required to fill the destination block. This is
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */
};
/**
* @brief qed_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
* @param flags (one of the flags defined above)
*/
int
qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
u32 flags);
/**
* @brief qed_chain_alloc - Allocate and initialize a chain
*
* @param p_hwfn
* @param intended_use
* @param mode
* @param num_elems
* @param elem_size
* @param p_chain
*
* @return int
*/
int
qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
u16 num_elems,
size_t elem_size,
struct qed_chain *p_chain);
/**
* @brief qed_chain_free - Free chain DMA memory
*
* @param p_hwfn
* @param p_chain
*/
void qed_chain_free(struct qed_dev *cdev,
struct qed_chain *p_chain);
/**
* *@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn
* @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative.
*
* @return int
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 id);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,776 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/qed/qed_chain.h>
#include "qed.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_reg_addr.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000
/* Invalid values */
#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
struct qed_ptt {
struct list_head list_entry;
unsigned int idx;
struct pxp_ptt_entry pxp;
};
struct qed_ptt_pool {
struct list_head free_list;
spinlock_t lock; /* ptt synchronized access */
struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
};
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
GFP_ATOMIC);
int i;
if (!p_pool)
return -ENOMEM;
INIT_LIST_HEAD(&p_pool->free_list);
for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
p_pool->ptts[i].idx = i;
p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
p_pool->ptts[i].pxp.pretend.control = 0;
if (i >= RESERVED_PTT_MAX)
list_add(&p_pool->ptts[i].list_entry,
&p_pool->free_list);
}
p_hwfn->p_ptt_pool = p_pool;
spin_lock_init(&p_pool->lock);
return 0;
}
void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
{
struct qed_ptt *p_ptt;
int i;
for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
}
}
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_ptt_pool);
p_hwfn->p_ptt_pool = NULL;
}
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
{
struct qed_ptt *p_ptt;
unsigned int i;
/* Take the free PTT from the list */
for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
struct qed_ptt, list_entry);
list_del(&p_ptt->list_entry);
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"allocated ptt %d\n", p_ptt->idx);
return p_ptt;
}
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
usleep_range(1000, 2000);
}
DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
return NULL;
}
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
}
u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return le32_to_cpu(p_ptt->pxp.offset) << 2;
}
static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
{
return PXP_PF_WINDOW_ADMIN_PER_PF_START +
p_ptt->idx * sizeof(struct pxp_ptt_entry);
}
u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
{
return PXP_EXTERNAL_BAR_PF_WINDOW_START +
p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
}
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 new_hw_addr)
{
u32 prev_hw_addr;
prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
if (new_hw_addr == prev_hw_addr)
return;
/* Update PTT entery in admin window */
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Updating PTT entry %d to offset 0x%x\n",
p_ptt->idx, new_hw_addr);
/* The HW is using DWORDS and the address is in Bytes */
p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
REG_WR(p_hwfn,
qed_ptt_config_addr(p_ptt) +
offsetof(struct pxp_ptt_entry, offset),
le32_to_cpu(p_ptt->pxp.offset));
}
static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr)
{
u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
u32 offset;
offset = hw_addr - win_hw_addr;
/* Verify the address is within the window */
if (hw_addr < win_hw_addr ||
offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
offset = 0;
}
return qed_ptt_get_bar_addr(p_ptt) + offset;
}
struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
enum reserved_ptts ptt_idx)
{
if (ptt_idx >= RESERVED_PTT_MAX) {
DP_NOTICE(p_hwfn,
"Requested PTT %d is out of range\n", ptt_idx);
return NULL;
}
return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
}
void qed_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr, u32 val)
{
u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
REG_WR(p_hwfn, bar_addr, val);
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
bar_addr, hw_addr, val);
}
u32 qed_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr)
{
u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
u32 val = REG_RD(p_hwfn, bar_addr);
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
bar_addr, hw_addr, val);
return val;
}
static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
void *addr,
u32 hw_addr,
size_t n,
bool to_device)
{
u32 dw_count, *host_addr, hw_offset;
size_t quota, done = 0;
u32 __iomem *reg_addr;
while (done < n) {
quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
hw_offset = qed_ptt_get_bar_addr(p_ptt);
dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done);
reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
if (to_device)
while (dw_count--)
DIRECT_REG_WR(reg_addr++, *host_addr++);
else
while (dw_count--)
*host_addr++ = DIRECT_REG_RD(reg_addr++);
done += quota;
}
}
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
void *dest, u32 hw_addr, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
hw_addr, dest, hw_addr, (unsigned long)n);
qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
}
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr, void *src, size_t n)
{
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
hw_addr, hw_addr, src, (unsigned long)n);
qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
}
void qed_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 fid)
{
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
/* Every pretend undos previous pretends, including
* previous port pretend.
*/
SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
p_ptt->pxp.pretend.control = cpu_to_le16(control);
p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
REG_WR(p_hwfn,
qed_ptt_config_addr(p_ptt) +
offsetof(struct pxp_ptt_entry, pretend),
*(u32 *)&p_ptt->pxp.pretend);
}
void qed_port_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 port_id)
{
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
p_ptt->pxp.pretend.control = cpu_to_le16(control);
REG_WR(p_hwfn,
qed_ptt_config_addr(p_ptt) +
offsetof(struct pxp_ptt_entry, pretend),
*(u32 *)&p_ptt->pxp.pretend);
}
void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
p_ptt->pxp.pretend.control = cpu_to_le16(control);
REG_WR(p_hwfn,
qed_ptt_config_addr(p_ptt) +
offsetof(struct pxp_ptt_entry, pretend),
*(u32 *)&p_ptt->pxp.pretend);
}
/* DMAE */
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
u32 opcode = 0;
u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe
* 1- The source is the GRC.
*/
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) <<
DMAE_CMD_SRC_SHIFT;
opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
DMAE_CMD_SRC_PF_ID_SHIFT);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) <<
DMAE_CMD_DST_SHIFT;
opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
DMAE_CMD_DST_PF_ID_SHIFT);
/* Whether to write a completion word to the completion destination:
* 0-Do not write a completion word
* 1-Write the completion word
*/
opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
/* reset source address in next go */
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
/* reset dest address in next go */
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT);
opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
DMAE_CMD_DST_VF_ID_SHIFT);
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
}
u32 qed_dmae_idx_to_go_cmd(u8 idx)
{
/* All the DMAE 'go' registers form an array in internal memory */
return DMAE_REG_GO_C0 + (idx << 2);
}
static int
qed_dmae_post_command(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
int qed_status = 0;
/* verify address is not NULL */
if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
DP_NOTICE(p_hwfn,
"source or destination address 0 idx_cmd=%d\n"
"opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd,
le32_to_cpu(command->opcode),
le16_to_cpu(command->opcode_b),
le16_to_cpu(command->length),
le32_to_cpu(command->src_addr_hi),
le32_to_cpu(command->src_addr_lo),
le32_to_cpu(command->dst_addr_hi),
le32_to_cpu(command->dst_addr_lo));
return -EINVAL;
}
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
"Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd,
le32_to_cpu(command->opcode),
le16_to_cpu(command->opcode_b),
le16_to_cpu(command->length),
le32_to_cpu(command->src_addr_hi),
le32_to_cpu(command->src_addr_lo),
le32_to_cpu(command->dst_addr_hi),
le32_to_cpu(command->dst_addr_lo));
/* Copy the command to DMAE - need to do it before every call
* for source/dest address no reset.
* The first 9 DWs are the command registers, the 10 DW is the
* GO register, and the rest are result registers
* (which are read only by the client).
*/
for (i = 0; i < DMAE_CMD_SIZE; i++) {
u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
*(((u32 *)command) + i) : 0;
qed_wr(p_hwfn, p_ptt,
DMAE_REG_CMD_MEM +
(idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
(i * sizeof(u32)), data);
}
qed_wr(p_hwfn, p_ptt,
qed_dmae_idx_to_go_cmd(idx_cmd),
DMAE_GO_VALUE);
return qed_status;
}
int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
{
dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32),
p_addr,
GFP_KERNEL);
if (!*p_comp) {
DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
goto err;
}
p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
*p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
p_addr, GFP_KERNEL);
if (!*p_cmd) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
goto err;
}
p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
*p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32) * DMAE_MAX_RW_SIZE,
p_addr, GFP_KERNEL);
if (!*p_buff) {
DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
goto err;
}
p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
return 0;
err:
qed_dmae_info_free(p_hwfn);
return -ENOMEM;
}
void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
{
dma_addr_t p_phys;
/* Just make sure no one is in the middle */
mutex_lock(&p_hwfn->dmae_info.mutex);
if (p_hwfn->dmae_info.p_completion_word) {
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32),
p_hwfn->dmae_info.p_completion_word,
p_phys);
p_hwfn->dmae_info.p_completion_word = NULL;
}
if (p_hwfn->dmae_info.p_dmae_cmd) {
p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct dmae_cmd),
p_hwfn->dmae_info.p_dmae_cmd,
p_phys);
p_hwfn->dmae_info.p_dmae_cmd = NULL;
}
if (p_hwfn->dmae_info.p_intermediate_buffer) {
p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(u32) * DMAE_MAX_RW_SIZE,
p_hwfn->dmae_info.p_intermediate_buffer,
p_phys);
p_hwfn->dmae_info.p_intermediate_buffer = NULL;
}
mutex_unlock(&p_hwfn->dmae_info.mutex);
}
static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
{
u32 wait_cnt = 0;
u32 wait_cnt_limit = 10000;
int qed_status = 0;
barrier();
while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
udelay(DMAE_MIN_WAIT_TIME);
if (++wait_cnt > wait_cnt_limit) {
DP_NOTICE(p_hwfn->cdev,
"Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
*p_hwfn->dmae_info.p_completion_word,
DMAE_COMPLETION_VAL);
qed_status = -EBUSY;
break;
}
/* to sync the completion_word since we are not
* using the volatile keyword for p_completion_word
*/
barrier();
}
if (qed_status == 0)
*p_hwfn->dmae_info.p_completion_word = 0;
return qed_status;
}
static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u64 src_addr,
u64 dst_addr,
u8 src_type,
u8 dst_type,
u32 length)
{
dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
int qed_status = 0;
switch (src_type) {
case QED_DMAE_ADDRESS_GRC:
case QED_DMAE_ADDRESS_HOST_PHYS:
cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
break;
/* for virtual source addresses we use the intermediate buffer. */
case QED_DMAE_ADDRESS_HOST_VIRT:
cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
(void *)(uintptr_t)src_addr,
length * sizeof(u32));
break;
default:
return -EINVAL;
}
switch (dst_type) {
case QED_DMAE_ADDRESS_GRC:
case QED_DMAE_ADDRESS_HOST_PHYS:
cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
break;
/* for virtual source addresses we use the intermediate buffer. */
case QED_DMAE_ADDRESS_HOST_VIRT:
cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
break;
default:
return -EINVAL;
}
cmd->length = cpu_to_le16((u16)length);
qed_dmae_post_command(p_hwfn, p_ptt);
qed_status = qed_dmae_operation_wait(p_hwfn);
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
src_addr,
dst_addr,
length);
return qed_status;
}
if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
memcpy((void *)(uintptr_t)(dst_addr),
&p_hwfn->dmae_info.p_intermediate_buffer[0],
length * sizeof(u32));
return 0;
}
static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u64 src_addr, u64 dst_addr,
u8 src_type, u8 dst_type,
u32 size_in_dwords,
struct qed_dmae_params *p_params)
{
dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
u64 src_addr_split = 0, dst_addr_split = 0;
u16 length_limit = DMAE_MAX_RW_SIZE;
int qed_status = 0;
u32 offset = 0;
qed_dmae_opcode(p_hwfn,
(src_type == QED_DMAE_ADDRESS_GRC),
(dst_type == QED_DMAE_ADDRESS_GRC),
p_params);
cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
/* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
cnt_split = size_in_dwords / length_limit;
length_mod = size_in_dwords % length_limit;
src_addr_split = src_addr;
dst_addr_split = dst_addr;
for (i = 0; i <= cnt_split; i++) {
offset = length_limit * i;
if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
if (src_type == QED_DMAE_ADDRESS_GRC)
src_addr_split = src_addr + offset;
else
src_addr_split = src_addr + (offset * 4);
}
if (dst_type == QED_DMAE_ADDRESS_GRC)
dst_addr_split = dst_addr + offset;
else
dst_addr_split = dst_addr + (offset * 4);
length_cur = (cnt_split == i) ? length_mod : length_limit;
/* might be zero on last iteration */
if (!length_cur)
continue;
qed_status = qed_dmae_execute_sub_operation(p_hwfn,
p_ptt,
src_addr_split,
dst_addr_split,
src_type,
dst_type,
length_cur);
if (qed_status) {
DP_NOTICE(p_hwfn,
"qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
qed_status,
src_addr,
dst_addr,
length_cur);
break;
}
}
return qed_status;
}
int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct qed_dmae_params params;
int rc;
memset(&params, 0, sizeof(struct qed_dmae_params));
params.flags = flags;
mutex_lock(&p_hwfn->dmae_info.mutex);
rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
grc_addr_in_dw,
QED_DMAE_ADDRESS_HOST_VIRT,
QED_DMAE_ADDRESS_GRC,
size_in_dwords, &params);
mutex_unlock(&p_hwfn->dmae_info.mutex);
return rc;
}
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
union qed_qm_pq_params *p_params)
{
u16 pq_id = 0;
if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
!p_params) {
DP_NOTICE(p_hwfn,
"Protocol %d received NULL PQ params\n",
proto);
return 0;
}
switch (proto) {
case PROTOCOLID_CORE:
if (p_params->core.tc == LB_TC)
pq_id = p_hwfn->qm_info.pure_lb_pq;
else
pq_id = p_hwfn->qm_info.offload_pq;
break;
case PROTOCOLID_ETH:
pq_id = p_params->eth.tc;
break;
default:
pq_id = 0;
}
pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
return pq_id;
}

View File

@ -0,0 +1,263 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_HW_H
#define _QED_HW_H
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_dev_api.h"
/* Forward decleration */
struct qed_ptt;
enum reserved_ptts {
RESERVED_PTT_EDIAG,
RESERVED_PTT_USER_SPACE,
RESERVED_PTT_MAIN,
RESERVED_PTT_DPC,
RESERVED_PTT_MAX
};
enum _dmae_cmd_dst_mask {
DMAE_CMD_DST_MASK_NONE = 0,
DMAE_CMD_DST_MASK_PCIE = 1,
DMAE_CMD_DST_MASK_GRC = 2
};
enum _dmae_cmd_src_mask {
DMAE_CMD_SRC_MASK_PCIE = 0,
DMAE_CMD_SRC_MASK_GRC = 1
};
enum _dmae_cmd_crc_mask {
DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
};
/* definitions for DMA constants */
#define DMAE_GO_VALUE 0x1
#define DMAE_COMPLETION_VAL 0xD1AE
#define DMAE_CMD_ENDIANITY 0x2
#define DMAE_CMD_SIZE 14
#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
#define DMAE_MIN_WAIT_TIME 0x2
#define DMAE_MAX_CLIENTS 32
/**
* @brief qed_gtt_init - Initialize GTT windows
*
* @param p_hwfn
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
*
* @param p_hwfn
*/
void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
*
* @param p_hwfn
*
* @return struct _qed_status - success (0), negative - error.
*/
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ptt_pool_free -
*
* @param p_hwfn
*/
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
*
* @param p_hwfn
* @param p_ptt
*
* @return u32
*/
u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
*
* @param p_hwfn
* @param p_ptt
*
* @return u32
*/
u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
/**
* @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
*
* @param p_hwfn
* @param new_hw_addr
* @param p_ptt
*/
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 new_hw_addr);
/**
* @brief qed_get_reserved_ptt - Get a specific reserved PTT
*
* @param p_hwfn
* @param ptt_idx
*
* @return struct qed_ptt *
*/
struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
enum reserved_ptts ptt_idx);
/**
* @brief qed_wr - Write value to BAR using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param val
* @param hw_addr
*/
void qed_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr,
u32 val);
/**
* @brief qed_rd - Read value from BAR using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param val
* @param hw_addr
*/
u32 qed_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr);
/**
* @brief qed_memcpy_from - copy n bytes from BAR using the given
* ptt
*
* @param p_hwfn
* @param p_ptt
* @param dest
* @param hw_addr
* @param n
*/
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
void *dest,
u32 hw_addr,
size_t n);
/**
* @brief qed_memcpy_to - copy n bytes to BAR using the given
* ptt
*
* @param p_hwfn
* @param p_ptt
* @param hw_addr
* @param src
* @param n
*/
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr,
void *src,
size_t n);
/**
* @brief qed_fid_pretend - pretend to another function when
* accessing the ptt window. There is no way to unpretend
* a function. The only way to cancel a pretend is to
* pretend back to the original function.
*
* @param p_hwfn
* @param p_ptt
* @param fid - fid field of pxp_pretend structure. Can contain
* either pf / vf, port/path fields are don't care.
*/
void qed_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 fid);
/**
* @brief qed_port_pretend - pretend to another port when
* accessing the ptt window
*
* @param p_hwfn
* @param p_ptt
* @param port_id - the port to pretend to
*/
void qed_port_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 port_id);
/**
* @brief qed_port_unpretend - cancel any previously set port
* pretend
*
* @param p_hwfn
* @param p_ptt
*/
void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
* this is declared here since other files will require it.
* @param idx
*/
u32 qed_dmae_idx_to_go_cmd(u8 idx);
/**
* @brief qed_dmae_info_alloc - Init the dmae_info structure
* which is part of p_hwfn.
* @param p_hwfn
*/
int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_dmae_info_free - Free the dmae_info structure
* which is part of p_hwfn
*
* @param p_hwfn
*/
void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
union qed_qm_pq_params {
struct {
u8 tc;
} core;
struct {
u8 is_vf;
u8 vf_id;
u8 tc;
} eth;
};
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
union qed_qm_pq_params *params);
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
#endif

View File

@ -0,0 +1,798 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
enum cminterface {
MCM_SEC,
MCM_PRI,
UCM_SEC,
UCM_PRI,
TCM_SEC,
TCM_PRI,
YCM_SEC,
YCM_PRI,
XCM_SEC,
XCM_PRI,
NUM_OF_CM_INTERFACES
};
/* general constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
/* feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
/* other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
#define QM_WFQ_UPPER_BOUND 6250000
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
#define QM_WFQ_VP_PQ_PF_SHIFT 5
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
#define QM_WFQ_MAX_INC_VAL 4375000
#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
/* RL constants */
#define QM_RL_UPPER_BOUND 6250000
#define QM_RL_PERIOD 5 /* in us */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
#define QM_RL_INC_VAL(rate) max_t(u32, \
(((rate ? rate : 1000000) \
* QM_RL_PERIOD) / 8), 1)
#define QM_RL_MAX_INC_VAL 4375000
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
#define EAGLE_WORKAROUND_TC 7
/* Command Queue constants */
#define PBF_CMDQ_PURE_LB_LINES 150
#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
4) * \
2) | QM_LINE_CRD_REG_SIGN_BIT)
/* BTB: blocks constants (block size = 256B) */
#define BTB_JUMBO_PKT_BLOCKS 38
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_EAGLE_WORKAROUND_BLOCKS 4
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */
#define QM_STOP_PQ_MASK_WIDTH 32
#define QM_STOP_CMD_ADDR 0x2
#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
#define QM_STOP_CMD_PAUSE_MASK_MASK -1
#define QM_STOP_CMD_GROUP_ID_OFFSET 1
#define QM_STOP_CMD_GROUP_ID_SHIFT 16
#define QM_STOP_CMD_GROUP_ID_MASK 15
#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
#define QM_STOP_CMD_PQ_TYPE_MASK 1
#define QM_STOP_CMD_MAX_POLL_COUNT 100
#define QM_STOP_CMD_POLL_PERIOD_US 500
/* QM command macros */
#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
_STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, \
value) SET_FIELD(var[cmd ## _ ## field ## \
_OFFSET], \
cmd ## _ ## field, \
value)
/* QM: VOQ macros */
#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
(max_phy_tcs_pr_port) \
+ (tc))
#define LB_VOQ(port) ( \
MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phy_tcs_pr_port) \
((tc) < \
LB_TC ? PHYS_VOQ(port, \
tc, \
max_phy_tcs_pr_port) \
: LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
/* enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1);
/* write RL period */
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIOD_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
/* set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
QM_RL_UPPER_BOUND);
}
}
/* Prepare PF WFQ enable/disable runtime init values */
static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
/* set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
}
/* Prepare VPORT RL enable/disable runtime init values */
static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
bool vport_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
if (vport_rl_en) {
/* write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
/* set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
QM_RL_UPPER_BOUND);
}
}
/* Prepare VPORT WFQ enable/disable runtime init values */
static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
/* set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
}
/* Prepare runtime init values to allocate PBF command queue lines for
* the specified VOQ
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
u8 voq,
u16 cmdq_lines)
{
u32 qm_line_crd;
/* In A0 - Limit the size of pbf queue so that only 511 commands with
* the minimum size of 4 (FCoE minimum size)
*/
bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
if (is_bb_a0)
cmdq_lines = min_t(u32, cmdq_lines, 1022);
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
qm_line_crd);
}
/* Prepare runtime init values to allocate PBF command queue lines. */
static void qed_cmdq_lines_rt_init(
struct qed_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u8 tc, voq, port_id;
/* clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) {
u16 phys_lines, phys_lines_per_tc;
u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
/* find #lines to divide between the active
* physical TCs.
*/
phys_lines = port_params[port_id].num_pbf_cmd_lines -
PBF_CMDQ_PURE_LB_LINES;
/* find #lines per active physical TC */
phys_lines_per_tc = phys_lines / phys_tcs;
/* init registers per active TC */
for (tc = 0; tc < phys_tcs; tc++) {
voq = PHYS_VOQ(port_id, tc,
max_phys_tcs_per_port);
qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
phys_lines_per_tc);
}
/* init registers for pure LB TC */
qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
PBF_CMDQ_PURE_LB_LINES);
}
}
}
static void qed_btb_blocks_rt_init(
struct qed_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
u8 tc, voq, port_id;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
u32 temp;
u8 phys_tcs;
if (!port_params[port_id].active)
continue;
phys_tcs = port_params[port_id].num_active_phys_tcs;
/* subtract headroom blocks */
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
/* find blocks per physical TC. use factor to avoid
* floating arithmethic.
*/
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
(phys_tcs * BTB_PURE_LB_FACTOR +
BTB_PURE_LB_RATIO);
pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
pure_lb_blocks / BTB_PURE_LB_FACTOR);
phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
/* init physical TCs */
for (tc = 0; tc < phys_tcs; tc++) {
voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
}
/* init pure LB TC */
temp = LB_VOQ(port_id);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
pure_lb_blocks);
}
}
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void qed_tx_pq_map_rt_init(
struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_qm_pf_rt_init_params *p_params,
u32 base_mem_addr_4kb)
{
struct init_qm_vport_params *vport_params = p_params->vport_params;
u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
QM_PF_QUEUE_GROUP_SIZE;
bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
u16 i, pq_id, pq_group;
/* a bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
/* set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(p_params->pf_id));
/* set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_pf_cids));
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_vf_cids));
/* go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
p_params->max_phys_tcs_per_port);
bool is_vf_pq = (i >= p_params->num_pf_pqs);
struct qm_rf_pq_map tx_pq_map;
/* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
/* create new VP PQ */
pq_ids[p_params->pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
/* map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET +
first_tx_pq_id,
(voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
(p_params->pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
/* fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
is_vf_pq ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
is_vf_pq ? p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
p_params->pq_params[i].wrr_group);
/* write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
*((u32 *)&tx_pq_map));
/* set base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
/* check if VF PQ */
if (is_vf_pq) {
/* if PQ is associated with a VF, add indication
* to PQ VF mask
*/
tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
(1 << (pq_id % tx_pq_vf_mask_width));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
}
}
/* store Tx PQ VF mask to size select register */
for (i = 0; i < num_tx_pq_vf_masks; i++) {
if (tx_pq_vf_mask[i]) {
if (is_bb_a0) {
u32 curr_mask = 0, addr;
addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
if (!p_params->is_first_pf)
curr_mask = qed_rd(p_hwfn, p_ptt,
addr);
addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
STORE_RT_REG(p_hwfn, addr,
curr_mask | tx_pq_vf_mask[i]);
} else {
u32 addr;
addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
STORE_RT_REG(p_hwfn, addr,
tx_pq_vf_mask[i]);
}
}
}
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u32 num_pf_cids,
u32 num_tids,
u32 base_mem_addr_4kb)
{
u16 i, pq_id;
/* a single other PQ group is used in each PF,
* where PQ group i is used in PF i.
*/
u16 pq_group = pf_id;
u32 pq_size = num_pf_cids + num_tids;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
u32 mem_addr_4kb = base_mem_addr_4kb;
/* map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
/* set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
/* set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
mem_addr_4kb += pq_mem_4kb;
}
}
/* Prepare PF WFQ runtime init values for the specified PF.
* Return -1 on error.
*/
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
u32 crd_reg_offset;
u32 inc_val;
u16 i;
if (p_params->pf_id < MAX_NUM_PFS_BB)
crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
else
crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
(p_params->pf_id % MAX_NUM_PFS_BB);
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
for (i = 0; i < num_tx_pqs; i++) {
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
p_params->max_phys_tcs_per_port);
OVERWRITE_RT_REG(p_hwfn,
crd_reg_offset + voq * MAX_NUM_PFS_BB,
QM_WFQ_INIT_CRD(inc_val) |
QM_WFQ_CRD_REG_SIGN_BIT);
}
return 0;
}
/* Prepare PF RL runtime init values for the specified PF.
* Return -1 on error.
*/
static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
u8 pf_id,
u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
return 0;
}
/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
* Return -1 on error.
*/
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
u8 tc, i, vport_id;
u32 inc_val;
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
if (!vport_params[i].vport_wfq)
continue;
inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
"Invalid VPORT WFQ weight configuration");
return -1;
}
/* each VPORT can have several VPORT PQ IDs for
* different TCs
*/
for (tc = 0; tc < NUM_OF_TCS; tc++) {
u16 vport_pq_id = pq_ids[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPWEIGHT_RT_OFFSET +
vport_pq_id, inc_val);
STORE_RT_REG(p_hwfn, temp + vport_pq_id,
QM_WFQ_UPPER_BOUND |
QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPCRD_RT_OFFSET +
vport_pq_id,
QM_WFQ_INIT_CRD(inc_val) |
QM_WFQ_CRD_REG_SIGN_BIT);
}
}
}
return 0;
}
static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
"Invalid VPORT rate-limit configuration");
return -1;
}
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
return 0;
}
static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 reg_val, i;
for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
i++) {
udelay(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
/* check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Timeout when waiting for QM SDM command ready signal\n");
return false;
}
return true;
}
static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd_addr,
u32 cmd_data_lsb,
u32 cmd_data_msb)
{
if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}
/******************** INTERFACE IMPLEMENTATION *********************/
u32 qed_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
}
int qed_qm_common_rt_init(
struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params)
{
/* init AFullOprtnstcCrdMask */
u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
(QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
(p_params->pf_wfq_en <<
QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
(p_params->vport_wfq_en <<
QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
(p_params->pf_rl_en <<
QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
(p_params->vport_rl_en <<
QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
(QM_OPPOR_FW_STOP_DEF <<
QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
(QM_OPPOR_PQ_EMPTY_DEF <<
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
qed_cmdq_lines_rt_init(p_hwfn,
p_params->max_ports_per_engine,
p_params->max_phys_tcs_per_port,
p_params->port_params);
qed_btb_blocks_rt_init(p_hwfn,
p_params->max_ports_per_engine,
p_params->max_phys_tcs_per_port,
p_params->port_params);
return 0;
}
int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_qm_pf_rt_init_params *p_params)
{
struct init_qm_vport_params *vport_params = p_params->vport_params;
u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
p_params->num_tids) *
QM_OTHER_PQS_PER_PF;
u8 tc, i;
/* clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
/* map Other PQs (if any) */
qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
p_params->num_pf_cids, p_params->num_tids, 0);
/* map Tx PQs */
qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
if (p_params->pf_wfq)
if (qed_pf_wfq_rt_init(p_hwfn, p_params))
return -1;
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
p_params->num_vports, vport_params))
return -1;
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
p_params->num_vports, vport_params))
return -1;
return 0;
}
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 pf_id,
u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
return -1;
}
qed_wr(p_hwfn, p_ptt,
QM_REG_RLPFCRD + pf_id * 4,
QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
return 0;
}
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 vport_id,
u32 vport_rl)
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
return -1;
}
qed_wr(p_hwfn, p_ptt,
QM_REG_RLGLBLCRD + vport_id * 4,
QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
return 0;
}
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool is_release_cmd,
bool is_tx_pq,
u16 start_pq,
u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
/* set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
/* set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
/* if last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
PAUSE_MASK, pq_mask);
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
GROUP_ID,
pq_id / QM_STOP_PQ_MASK_WIDTH);
if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
cmd_arr[0], cmd_arr[1]))
return false;
pq_mask = 0;
}
}
return true;
}

View File

@ -0,0 +1,531 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
#define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500
static u32 pxp_global_win[] = {
0,
0,
0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
0,
0,
0,
0,
0,
0,
0,
};
void qed_init_iro_array(struct qed_dev *cdev)
{
cdev->iro_arr = iro_arr;
}
/* Runtime configuration helpers */
void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
{
int i;
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
p_hwfn->rt_data[i].b_valid = false;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 val)
{
p_hwfn->rt_data[rt_offset].init_val = val;
p_hwfn->rt_data[rt_offset].b_valid = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 *val,
size_t size)
{
size_t i;
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data[rt_offset + i].init_val = val[i];
p_hwfn->rt_data[rt_offset + i].b_valid = true;
}
}
static void qed_init_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u32 rt_offset,
u32 size)
{
struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
u32 i;
for (i = 0; i < size; i++) {
if (!rt_data[i].b_valid)
continue;
qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
}
}
int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rt_data *rt_data;
rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
if (!rt_data)
return -ENOMEM;
p_hwfn->rt_data = rt_data;
return 0;
}
void qed_init_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->rt_data);
p_hwfn->rt_data = NULL;
}
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u32 dmae_data_offset,
u32 size,
const u32 *buf,
bool b_must_dmae,
bool b_can_dmae)
{
int rc = 0;
/* Perform DMAE only for lengthy enough sections or for wide-bus */
if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
const u32 *data = buf + dmae_data_offset;
u32 i;
for (i = 0; i < size; i++)
qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
} else {
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(buf + dmae_data_offset),
addr, size, 0);
}
return rc;
}
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u32 fill,
u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
/* invoke the DMAE virtual/physical buffer API with
* 1. DMAE init channel
* 2. addr,
* 3. p_hwfb->temp_data,
* 4. fill_count
*/
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
addr, fill_count,
QED_DMAE_FLAG_RW_REPL_SRC);
}
static void qed_init_fill(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 addr,
u32 fill,
u32 fill_count)
{
u32 i;
for (i = 0; i < fill_count; i++, addr += sizeof(u32))
qed_wr(p_hwfn, p_ptt, addr, fill);
}
static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_write_op *cmd,
bool b_must_dmae,
bool b_can_dmae)
{
u32 data = le32_to_cpu(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
u32 offset, output_len, input_len, max_size;
struct qed_dev *cdev = p_hwfn->cdev;
union init_array_hdr *hdr;
const u32 *array_data;
int rc = 0;
u32 size;
array_data = cdev->fw_data->arr_data;
hdr = (union init_array_hdr *)(array_data +
dmae_array_offset);
data = le32_to_cpu(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
offset = dmae_array_offset + 1;
input_len = GET_FIELD(data,
INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
max_size = MAX_ZIPPED_SIZE * 4;
memset(p_hwfn->unzip_buf, 0, max_size);
output_len = qed_unzip_data(p_hwfn, input_len,
(u8 *)&array_data[offset],
max_size, (u8 *)p_hwfn->unzip_buf);
if (output_len) {
rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
output_len,
p_hwfn->unzip_buf,
b_must_dmae, b_can_dmae);
} else {
DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
rc = -EINVAL;
}
break;
case INIT_ARR_PATTERN:
{
u32 repeats = GET_FIELD(data,
INIT_ARRAY_PATTERN_HDR_REPETITIONS);
u32 i;
size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
for (i = 0; i < repeats; i++, addr += size << 2) {
rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
dmae_array_offset + 1,
size, array_data,
b_must_dmae, b_can_dmae);
if (rc)
break;
}
break;
}
case INIT_ARR_STANDARD:
size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
dmae_array_offset + 1,
size, array_data,
b_must_dmae, b_can_dmae);
break;
}
return rc;
}
/* init_ops write command */
static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_write_op *cmd,
bool b_can_dmae)
{
u32 data = le32_to_cpu(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
union init_write_args *arg = &cmd->args;
int rc = 0;
/* Sanitize */
if (b_must_dmae && !b_can_dmae) {
DP_NOTICE(p_hwfn,
"Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
addr);
return -EINVAL;
}
switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
case INIT_SRC_INLINE:
qed_wr(p_hwfn, p_ptt, addr,
le32_to_cpu(arg->inline_val));
break;
case INIT_SRC_ZEROS:
if (b_must_dmae ||
(b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
le32_to_cpu(arg->zeros_count));
else
qed_init_fill(p_hwfn, p_ptt, addr, 0,
le32_to_cpu(arg->zeros_count));
break;
case INIT_SRC_ARRAY:
rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
qed_init_rt(p_hwfn, p_ptt, addr,
le16_to_cpu(arg->runtime.offset),
le16_to_cpu(arg->runtime.size));
break;
}
return rc;
}
static inline bool comp_eq(u32 val, u32 expected_val)
{
return val == expected_val;
}
static inline bool comp_and(u32 val, u32 expected_val)
{
return (val & expected_val) == expected_val;
}
static inline bool comp_or(u32 val, u32 expected_val)
{
return (val | expected_val) > 0;
}
/* init_ops read/poll commands */
static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_read_op *cmd)
{
u32 data = le32_to_cpu(cmd->op_data);
u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
bool (*comp_check)(u32 val,
u32 expected_val);
u32 delay = QED_INIT_POLL_PERIOD_US, val;
val = qed_rd(p_hwfn, p_ptt, addr);
data = le32_to_cpu(cmd->op_data);
if (GET_FIELD(data, INIT_READ_OP_POLL)) {
int i;
switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
case INIT_COMPARISON_EQ:
comp_check = comp_eq;
break;
case INIT_COMPARISON_OR:
comp_check = comp_or;
break;
case INIT_COMPARISON_AND:
comp_check = comp_and;
break;
default:
comp_check = NULL;
DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
data);
return;
}
for (i = 0;
i < QED_INIT_MAX_POLL_COUNT &&
!comp_check(val, le32_to_cpu(cmd->expected_val));
i++) {
udelay(delay);
val = qed_rd(p_hwfn, p_ptt, addr);
}
if (i == QED_INIT_MAX_POLL_COUNT)
DP_ERR(p_hwfn,
"Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
addr, le32_to_cpu(cmd->expected_val),
val, data);
}
}
/* init_ops callbacks entry point */
static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_callback_op *p_cmd)
{
DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
u16 *offset,
int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
modes_tree_buf = cdev->fw_data->modes_tree_buf;
tree_val = modes_tree_buf[(*offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
case INIT_MODE_OP_OR:
arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
return arg1 | arg2;
case INIT_MODE_OP_AND:
arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
return arg1 & arg2;
default:
tree_val -= MAX_INIT_MODE_OPS;
return (modes & (1 << tree_val)) ? 1 : 0;
}
}
static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
struct init_if_mode_op *p_cmd,
int modes)
{
u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
return 0;
else
return GET_FIELD(le32_to_cpu(p_cmd->op_data),
INIT_IF_MODE_OP_CMD_OFFSET);
}
static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
u32 phase,
u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
u32 op_data = le32_to_cpu(p_cmd->op_data);
if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
else
return 0;
}
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
int phase,
int phase_id,
int modes)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
int rc = 0;
num_init_ops = cdev->fw_data->init_ops_size;
init_ops = cdev->fw_data->init_ops;
p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
if (!p_hwfn->unzip_buf) {
DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
return -ENOMEM;
}
for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
union init_op *cmd = &init_ops[cmd_num];
u32 data = le32_to_cpu(cmd->raw.op_data);
switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
case INIT_OP_WRITE:
rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
b_dmae);
break;
case INIT_OP_READ:
qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
break;
case INIT_OP_IF_MODE:
cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
modes);
break;
case INIT_OP_IF_PHASE:
cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* qed_init_run is always invoked from
* sleep-able context
*/
udelay(le32_to_cpu(cmd->delay.delay));
break;
case INIT_OP_CALLBACK:
qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}
if (rc)
break;
}
kfree(p_hwfn->unzip_buf);
return rc;
}
void qed_gtt_init(struct qed_hwfn *p_hwfn)
{
u32 gtt_base;
u32 i;
/* Set the global windows */
gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
if (pxp_global_win[i])
REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
pxp_global_win[i]);
}
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *data)
{
struct qed_fw_data *fw = cdev->fw_data;
struct bin_buffer_hdr *buf_hdr;
u32 offset, len;
if (!data) {
DP_NOTICE(cdev, "Invalid fw data\n");
return -EINVAL;
}
buf_hdr = (struct bin_buffer_hdr *)data;
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
fw->init_ops = (union init_op *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
fw->arr_data = (u32 *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
fw->modes_tree_buf = (u8 *)(data + offset);
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
return 0;
}

View File

@ -0,0 +1,110 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_INIT_OPS_H
#define _QED_INIT_OPS_H
#include <linux/types.h>
#include <linux/slab.h>
#include "qed.h"
/**
* @brief qed_init_iro_array - init iro_arr.
*
*
* @param cdev
*/
void qed_init_iro_array(struct qed_dev *cdev);
/**
* @brief qed_init_run - Run the init-sequence.
*
*
* @param p_hwfn
* @param p_ptt
* @param phase
* @param phase_id
* @param modes
* @return _qed_status_t
*/
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
int phase,
int phase_id,
int modes);
/**
* @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
*
*
* @param p_hwfn
*
* @return _qed_status_t
*/
int qed_init_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_init_hwfn_deallocate
*
*
* @param p_hwfn
*/
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_init_clear_rt_data - Clears the runtime init array.
*
*
* @param p_hwfn
*/
void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
/**
* @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
*
*
* @param p_hwfn
* @param rt_offset
* @param val
*/
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 val);
#define STORE_RT_REG(hwfn, offset, val) \
qed_init_store_rt_reg(hwfn, offset, val)
#define OVERWRITE_RT_REG(hwfn, offset, val) \
qed_init_store_rt_reg(hwfn, offset, val)
/**
* @brief
*
*
* @param p_hwfn
* @param rt_offset
* @param val
* @param size
*/
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 *val,
size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
/**
* @brief
* Initialize GTT global windows and set admin window
* related params of GTT/PTT to default values.
*
* @param p_hwfn
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
#endif

View File

@ -0,0 +1,802 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
struct qed_pi_info {
qed_int_comp_cb_t comp_cb;
void *cookie;
};
struct qed_sb_sp_info {
struct qed_sb_info sb_info;
/* per protocol index data */
struct qed_pi_info pi_info_arr[PIS_PER_SB];
};
void qed_int_sp_dpc(unsigned long hwfn_cookie)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
struct qed_pi_info *pi_info = NULL;
struct qed_sb_info *sb_info;
int arr_size;
u16 rc = 0;
if (!p_hwfn) {
DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n");
return;
}
if (!p_hwfn->p_sp_sb) {
DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
return;
}
sb_info = &p_hwfn->p_sp_sb->sb_info;
arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
if (!sb_info) {
DP_ERR(p_hwfn->cdev,
"Status block is NULL - cannot ack interrupts\n");
return;
}
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
p_hwfn, p_hwfn->my_id);
/* Disable ack for def status block. Required both for msix +
* inta in non-mask mode, in inta does no harm.
*/
qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
/* Gather Interrupts/Attentions information */
if (!sb_info->sb_virt) {
DP_ERR(
p_hwfn->cdev,
"Interrupt Status block is NULL - cannot check for new interrupts!\n");
} else {
u32 tmp_index = sb_info->sb_ack;
rc = qed_sb_update_sb_idx(sb_info);
DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
"Interrupt indices: 0x%08x --> 0x%08x\n",
tmp_index, sb_info->sb_ack);
}
/* Check if we expect interrupts at this time. if not just ack them */
if (!(rc & QED_SB_EVENT_MASK)) {
qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
return;
}
/* Check the validity of the DPC ptt. If not ack interrupts and fail */
if (!p_hwfn->p_dpc_ptt) {
DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
return;
}
if (rc & QED_SB_IDX) {
int pi;
/* Look for a free index */
for (pi = 0; pi < arr_size; pi++) {
pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
if (pi_info->comp_cb)
pi_info->comp_cb(p_hwfn, pi_info->cookie);
}
}
qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
}
/* coalescing timeout = timeset << (timer_res + 1) */
#define QED_CAU_DEF_RX_USECS 24
#define QED_CAU_DEF_TX_USECS 48
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
u8 pf_id,
u16 vf_number,
u8 vf_valid)
{
u32 cau_state;
memset(p_sb_entry, 0, sizeof(*p_sb_entry));
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
/* setting the time resultion to a fixed value ( = 1) */
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
QED_CAU_DEF_RX_TIMER_RES);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
QED_CAU_DEF_TX_TIMER_RES);
cau_state = CAU_HC_DISABLE_STATE;
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
cau_state = CAU_HC_ENABLE_STATE;
if (!p_hwfn->cdev->rx_coalesce_usecs)
p_hwfn->cdev->rx_coalesce_usecs =
QED_CAU_DEF_RX_USECS;
if (!p_hwfn->cdev->tx_coalesce_usecs)
p_hwfn->cdev->tx_coalesce_usecs =
QED_CAU_DEF_TX_USECS;
}
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t sb_phys,
u16 igu_sb_id,
u16 vf_number,
u8 vf_valid)
{
struct cau_sb_entry sb_entry;
u32 val;
qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
vf_number, vf_valid);
if (p_hwfn->hw_init_done) {
val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
upper_32_bits(sb_phys));
val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
igu_sb_id * 2,
sb_phys);
STORE_RT_REG_AGG(p_hwfn,
CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
igu_sb_id * 2,
sb_entry);
}
/* Configure pi coalescing if set */
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
(QED_CAU_DEF_RX_TIMER_RES + 1);
u8 num_tc = 1, i;
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
QED_COAL_RX_STATE_MACHINE,
timeset);
timeset = p_hwfn->cdev->tx_coalesce_usecs >>
(QED_CAU_DEF_TX_TIMER_RES + 1);
for (i = 0; i < num_tc; i++) {
qed_int_cau_conf_pi(p_hwfn, p_ptt,
igu_sb_id, TX_PI(i),
QED_COAL_TX_STATE_MACHINE,
timeset);
}
}
}
void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 igu_sb_id,
u32 pi_index,
enum qed_coalescing_fsm coalescing_fsm,
u8 timeset)
{
struct cau_pi_entry pi_entry;
u32 sb_offset;
u32 pi_offset;
sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
else
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
pi_offset = sb_offset + pi_index;
if (p_hwfn->hw_init_done) {
qed_wr(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
*((u32 *)&(pi_entry)));
} else {
STORE_RT_REG(p_hwfn,
CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
*((u32 *)&(pi_entry)));
}
}
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info)
{
/* zero status block and ack counter */
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
sb_info->igu_sb_id, 0, 0);
}
/**
* @brief qed_get_igu_sb_id - given a sw sb_id return the
* igu_sb_id
*
* @param p_hwfn
* @param sb_id
*
* @return u16
*/
static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
u16 sb_id)
{
u16 igu_sb_id;
/* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
return igu_sb_id;
}
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr,
u16 sb_id)
{
sb_info->sb_virt = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
if (sb_id != QED_SP_SB_ID) {
p_hwfn->sbs_info[sb_id] = sb_info;
p_hwfn->num_sbs++;
}
sb_info->cdev = p_hwfn->cdev;
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
(sb_info->igu_sb_id << 3);
sb_info->flags |= QED_SB_INFO_INIT;
qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
return 0;
}
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info,
u16 sb_id)
{
if (sb_id == QED_SP_SB_ID) {
DP_ERR(p_hwfn, "Do Not free sp sb using this function");
return -EINVAL;
}
/* zero status block and ack counter */
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
p_hwfn->sbs_info[sb_id] = NULL;
p_hwfn->num_sbs--;
return 0;
}
static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
{
struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
if (p_sb) {
if (p_sb->sb_info.sb_virt)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
SB_ALIGNED_SIZE(p_hwfn),
p_sb->sb_info.sb_virt,
p_sb->sb_info.sb_phys);
kfree(p_sb);
}
}
static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_sb_sp_info *p_sb;
dma_addr_t p_phys = 0;
void *p_virt;
/* SB struct */
p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
if (!p_sb) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
return -ENOMEM;
}
/* SB ring */
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
SB_ALIGNED_SIZE(p_hwfn),
&p_phys, GFP_KERNEL);
if (!p_virt) {
DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
kfree(p_sb);
return -ENOMEM;
}
/* Status Block setup */
p_hwfn->p_sp_sb = p_sb;
qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
p_phys, QED_SP_SB_ID);
memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
return 0;
}
static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
if (!p_hwfn)
return;
if (p_hwfn->p_sp_sb)
qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
else
DP_NOTICE(p_hwfn->cdev,
"Failed to setup Slow path status block - NULL pointer\n");
}
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
void *cookie,
u8 *sb_idx,
__le16 **p_fw_cons)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
int qed_status = -ENOMEM;
u8 pi;
/* Look for a free index */
for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
p_sp_sb->pi_info_arr[pi].cookie = cookie;
*sb_idx = pi;
*p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
qed_status = 0;
break;
}
}
return qed_status;
}
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
int qed_status = -ENOMEM;
if (p_sp_sb->pi_info_arr[pi].comp_cb) {
p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
p_sp_sb->pi_info_arr[pi].cookie = NULL;
qed_status = 0;
}
return qed_status;
}
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
{
return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
}
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode)
{
u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
p_hwfn->cdev->int_mode = int_mode;
switch (p_hwfn->cdev->int_mode) {
case QED_INT_MODE_INTA:
igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
break;
case QED_INT_MODE_MSI:
igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
break;
case QED_INT_MODE_MSIX:
igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
break;
case QED_INT_MODE_POLL:
break;
}
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
}
void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode)
{
int i;
p_hwfn->b_int_enabled = 1;
/* Mask non-link attentions */
for (i = 0; i < 9; i++)
qed_wr(p_hwfn, p_ptt,
MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
/* Enable interrupt Generation */
qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
/* Flush the writes to IGU */
mmiowb();
}
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
p_hwfn->b_int_enabled = 0;
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
}
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 sb_id,
bool cleanup_set,
u16 opaque_fid
)
{
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
u32 data = 0;
u32 cmd_ctrl = 0;
u32 val = 0;
u32 sb_bit = 0;
u32 sb_bit_addr = 0;
/* Set the data field */
SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
/* Set the control register */
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
barrier();
qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
/* Flush the write to IGU */
mmiowb();
/* calculate where to read the status bit from */
sb_bit = 1 << (sb_id % 32);
sb_bit_addr = sb_id / 32 * sizeof(u32);
sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
/* Now wait for the command to complete */
do {
val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
break;
usleep_range(5000, 10000);
} while (--sleep_cnt);
if (!sleep_cnt)
DP_NOTICE(p_hwfn,
"Timeout waiting for clear status 0x%08x [for sb %d]\n",
val, sb_id);
}
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 sb_id,
u16 opaque,
bool b_set)
{
int pi;
/* Set */
if (b_set)
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
/* Clear */
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
qed_wr(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
}
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_set,
bool b_slowpath)
{
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
u32 sb_id = 0;
u32 val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"IGU cleaning SBs [%d,...,%d]\n",
igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
p_hwfn->hw_info.opaque_fid,
b_set);
if (b_slowpath) {
sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"IGU cleaning slowpath SB [%d]\n", sb_id);
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
p_hwfn->hw_info.opaque_fid,
b_set);
}
}
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
struct qed_igu_block *blk;
u32 val;
u16 sb_id;
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
if (!p_hwfn->hw_info.p_igu_info)
return -ENOMEM;
p_igu_info = p_hwfn->hw_info.p_igu_info;
/* Initialize base sb / sb cnt for PFs */
p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
val = qed_rd(p_hwfn, p_ptt,
IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
/* stop scanning when hit first invalid PF entry */
if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
break;
blk->status = QED_IGU_STATUS_VALID;
blk->function_id = GET_FIELD(val,
IGU_MAPPING_LINE_FUNCTION_NUMBER);
blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
blk->vector_number = GET_FIELD(val,
IGU_MAPPING_LINE_VECTOR_NUMBER);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
val, blk->function_id, blk->is_pf,
blk->vector_number);
if (blk->is_pf) {
if (blk->function_id == p_hwfn->rel_pf_id) {
blk->status |= QED_IGU_STATUS_PF;
if (blk->vector_number == 0) {
if (p_igu_info->igu_dsb_id == 0xffff)
p_igu_info->igu_dsb_id = sb_id;
} else {
if (p_igu_info->igu_base_sb ==
0xffff) {
p_igu_info->igu_base_sb = sb_id;
} else if (prev_sb_id != sb_id - 1) {
DP_NOTICE(p_hwfn->cdev,
"consecutive igu vectors for HWFN %x broken",
p_hwfn->rel_pf_id);
break;
}
prev_sb_id = sb_id;
/* we don't count the default */
(p_igu_info->igu_sb_cnt)++;
}
}
}
}
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb,
p_igu_info->igu_sb_cnt,
p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff ||
p_igu_info->igu_sb_cnt == 0) {
DP_NOTICE(p_hwfn,
"IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb,
p_igu_info->igu_sb_cnt,
p_igu_info->igu_dsb_id);
return -EINVAL;
}
return 0;
}
/**
* @brief Initialize igu runtime registers
*
* @param p_hwfn
*/
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
{
u32 igu_pf_conf = 0;
igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
}
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
{
u64 intr_status = 0;
u32 intr_status_lo = 0;
u32 intr_status_hi = 0;
u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
IGU_CMD_INT_ACK_BASE;
u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
IGU_CMD_INT_ACK_BASE;
intr_status_lo = REG_RD(p_hwfn,
GTT_BAR0_MAP_REG_IGU_CMD +
lsb_igu_cmd_addr * 8);
intr_status_hi = REG_RD(p_hwfn,
GTT_BAR0_MAP_REG_IGU_CMD +
msb_igu_cmd_addr * 8);
intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
return intr_status;
}
static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
{
tasklet_init(p_hwfn->sp_dpc,
qed_int_sp_dpc, (unsigned long)p_hwfn);
p_hwfn->b_sp_dpc_enabled = true;
}
static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
{
p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
if (!p_hwfn->sp_dpc)
return -ENOMEM;
return 0;
}
static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->sp_dpc);
}
int qed_int_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
int rc = 0;
rc = qed_int_sp_dpc_alloc(p_hwfn);
if (rc) {
DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
return rc;
}
rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
if (rc) {
DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
return rc;
}
return rc;
}
void qed_int_free(struct qed_hwfn *p_hwfn)
{
qed_int_sp_sb_free(p_hwfn);
qed_int_sp_dpc_free(p_hwfn);
}
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
qed_int_sp_sb_setup(p_hwfn, p_ptt);
qed_int_sp_dpc_setup(p_hwfn);
}
int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
int *p_iov_blks)
{
struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
if (!info)
return 0;
if (p_iov_blks)
*p_iov_blks = info->free_blks;
return info->igu_sb_cnt;
}

View File

@ -0,0 +1,391 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_INT_H
#define _QED_INT_H
#include <linux/types.h>
#include <linux/slab.h>
#include "qed.h"
/* Fields of IGU PF CONFIGRATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
/* Igu control commands
*/
enum igu_ctrl_cmd {
IGU_CTRL_CMD_TYPE_RD,
IGU_CTRL_CMD_TYPE_WR,
MAX_IGU_CTRL_CMD
};
/* Control register for the IGU command register
*/
struct igu_ctrl_reg {
u32 ctrl_data;
#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
#define IGU_CTRL_REG_FID_SHIFT 0
#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
#define IGU_CTRL_REG_RESERVED_MASK 0x1
#define IGU_CTRL_REG_RESERVED_SHIFT 28
#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
#define IGU_CTRL_REG_TYPE_SHIFT 31
};
enum qed_coalescing_fsm {
QED_COAL_RX_STATE_MACHINE,
QED_COAL_TX_STATE_MACHINE
};
/**
* @brief qed_int_cau_conf_pi - configure cau for a given
* status block
*
* @param p_hwfn
* @param p_ptt
* @param igu_sb_id
* @param pi_index
* @param state
* @param timeset
*/
void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 igu_sb_id,
u32 pi_index,
enum qed_coalescing_fsm coalescing_fsm,
u8 timeset);
/**
* @brief qed_int_igu_enable_int - enable device interrupts
*
* @param p_hwfn
* @param p_ptt
* @param int_mode - interrupt mode to use
*/
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
* @brief qed_int_igu_disable_int - disable device interrupts
*
* @param p_hwfn
* @param p_ptt
*/
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
* register from igu.
*
* @param p_hwfn
*
* @return u64
*/
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
#define QED_SP_SB_ID 0xffff
/**
* @brief qed_int_sb_init - Initializes the sb_info structure.
*
* once the structure is initialized it can be passed to sb related functions.
*
* @param p_hwfn
* @param p_ptt
* @param sb_info points to an uninitialized (but
* allocated) sb_info structure
* @param sb_virt_addr
* @param sb_phy_addr
* @param sb_id the sb_id to be used (zero based in driver)
* should use QED_SP_SB_ID for SP Status block
*
* @return int
*/
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr,
u16 sb_id);
/**
* @brief qed_int_sb_setup - Setup the sb.
*
* @param p_hwfn
* @param p_ptt
* @param sb_info initialized sb_info structure
*/
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info);
/**
* @brief qed_int_sb_release - releases the sb_info structure.
*
* once the structure is released, it's memory can be freed
*
* @param p_hwfn
* @param sb_info points to an allocated sb_info structure
* @param sb_id the sb_id to be used (zero based in driver)
* should never be equal to QED_SP_SB_ID
* (SP Status block)
*
* @return int
*/
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info,
u16 sb_id);
/**
* @brief qed_int_sp_dpc - To be called when an interrupt is received on the
* default status block.
*
* @param p_hwfn - pointer to hwfn
*
*/
void qed_int_sp_dpc(unsigned long hwfn_cookie);
/**
* @brief qed_int_get_num_sbs - get the number of status
* blocks configured for this funciton in the igu.
*
* @param p_hwfn
* @param p_iov_blks - configured free blks for vfs
*
* @return int - number of status blocks configured
*/
int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
int *p_iov_blks);
/**
* @file
*
* @brief Interrupt handler
*/
#define QED_CAU_DEF_RX_TIMER_RES 0
#define QED_CAU_DEF_TX_TIMER_RES 0
#define QED_SB_ATT_IDX 0x0001
#define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
struct qed_igu_block {
u8 status;
#define QED_IGU_STATUS_FREE 0x01
#define QED_IGU_STATUS_VALID 0x02
#define QED_IGU_STATUS_PF 0x04
u8 vector_number;
u8 function_id;
u8 is_pf;
};
struct qed_igu_map {
struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
};
struct qed_igu_info {
struct qed_igu_map igu_map;
u16 igu_dsb_id;
u16 igu_base_sb;
u16 igu_base_sb_iov;
u16 igu_sb_cnt;
u16 igu_sb_cnt_iov;
u16 free_blks;
};
/* TODO Names of function may change... */
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_set,
bool b_slowpath);
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
/**
* @brief qed_int_igu_read_cam - Reads the IGU CAM.
* This function needs to be called during hardware
* prepare. It reads the info from igu cam to know which
* status block is the default / base status block etc.
*
* @param p_hwfn
* @param p_ptt
*
* @return int
*/
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
void *cookie);
/**
* @brief qed_int_register_cb - Register callback func for
* slowhwfn statusblock.
*
* Every protocol that uses the slowhwfn status block
* should register a callback function that will be called
* once there is an update of the sp status block.
*
* @param p_hwfn
* @param comp_cb - function to be called when there is an
* interrupt on the sp sb
*
* @param cookie - passed to the callback function
* @param sb_idx - OUT parameter which gives the chosen index
* for this protocol.
* @param p_fw_cons - pointer to the actual address of the
* consumer for this protocol.
*
* @return int
*/
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
void *cookie,
u8 *sb_idx,
__le16 **p_fw_cons);
/**
* @brief qed_int_unregister_cb - Unregisters callback
* function from sp sb.
* Partner of qed_int_register_cb -> should be called
* when no longer required.
*
* @param p_hwfn
* @param pi
*
* @return int
*/
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
u8 pi);
/**
* @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
*
* @param p_hwfn
*
* @return u16
*/
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
/**
* @brief Status block cleanup. Should be called for each status
* block that will be used -> both PF / VF
*
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
* @param cleanup_set - set(1) / clear(0)
* @param opaque_fid - the function for which to perform
* cleanup, for example a PF on behalf of
* its VFs.
*/
void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 sb_id,
bool cleanup_set,
u16 opaque_fid);
/**
* @brief Status block cleanup. Should be called for each status
* block that will be used -> both PF / VF
*
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
* @param cleanup_set - set(1) / clear(0)
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 sb_id,
u16 opaque,
bool b_set);
/**
* @brief qed_int_cau_conf - configure cau for a given status
* block
*
* @param p_hwfn
* @param ptt
* @param sb_phys
* @param igu_sb_id
* @param vf_number
* @param vf_valid
*/
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t sb_phys,
u16 igu_sb_id,
u16 vf_number,
u8 vf_valid);
/**
* @brief qed_int_alloc
*
* @param p_hwfn
* @param p_ptt
*
* @return int
*/
int qed_int_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief qed_int_free
*
* @param p_hwfn
*/
void qed_int_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_int_setup
*
* @param p_hwfn
* @param p_ptt
*/
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
* @param p_ptt
* @param int_mode
*/
void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
* @brief - Initialize CAU status block entry
*
* @param p_hwfn
* @param p_sb_entry
* @param pf_id
* @param vf_number
* @param vf_valid
*/
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
u8 pf_id,
u16 vf_number,
u8 vf_valid);
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
#endif

View File

@ -0,0 +1,948 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/stddef.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/dma-mapping.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/qed/qed_if.h>
#include "qed.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_hw.h"
static const char version[] =
"QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
#define FW_FILE_VERSION \
__stringify(FW_MAJOR_VERSION) "." \
__stringify(FW_MINOR_VERSION) "." \
__stringify(FW_REVISION_VERSION) "." \
__stringify(FW_ENGINEERING_VERSION)
#define QED_FW_FILE_NAME \
"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
static int __init qed_init(void)
{
pr_notice("qed_init called\n");
pr_info("%s", version);
return 0;
}
static void __exit qed_cleanup(void)
{
pr_notice("qed_cleanup called\n");
}
module_init(qed_init);
module_exit(qed_cleanup);
/* Check if the DMA controller on the machine can properly handle the DMA
* addressing required by the device.
*/
static int qed_set_coherency_mask(struct qed_dev *cdev)
{
struct device *dev = &cdev->pdev->dev;
if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
DP_NOTICE(cdev,
"Can't request 64-bit consistent allocations\n");
return -EIO;
}
} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
return -EIO;
}
return 0;
}
static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
if (cdev->doorbells)
iounmap(cdev->doorbells);
if (cdev->regview)
iounmap(cdev->regview);
if (atomic_read(&pdev->enable_cnt) == 1)
pci_release_regions(pdev);
pci_disable_device(pdev);
}
/* Performs PCI initializations as well as initializing PCI-related parameters
* in the device structrue. Returns 0 in case of success.
*/
static int qed_init_pci(struct qed_dev *cdev,
struct pci_dev *pdev)
{
int rc;
cdev->pdev = pdev;
rc = pci_enable_device(pdev);
if (rc) {
DP_NOTICE(cdev, "Cannot enable PCI device\n");
goto err0;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #0\n");
rc = -EIO;
goto err1;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO;
goto err1;
}
if (atomic_read(&pdev->enable_cnt) == 1) {
rc = pci_request_regions(pdev, "qed");
if (rc) {
DP_NOTICE(cdev,
"Failed to request PCI memory resources\n");
goto err1;
}
pci_set_master(pdev);
pci_save_state(pdev);
}
if (!pci_is_pcie(pdev)) {
DP_NOTICE(cdev, "The bus is not PCI Express\n");
rc = -EIO;
goto err2;
}
cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (cdev->pci_params.pm_cap == 0)
DP_NOTICE(cdev, "Cannot find power management capability\n");
rc = qed_set_coherency_mask(cdev);
if (rc)
goto err2;
cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
cdev->pci_params.irq = pdev->irq;
cdev->regview = pci_ioremap_bar(pdev, 0);
if (!cdev->regview) {
DP_NOTICE(cdev, "Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err2;
}
cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
cdev->db_size = pci_resource_len(cdev->pdev, 2);
cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
if (!cdev->doorbells) {
DP_NOTICE(cdev, "Cannot map doorbell space\n");
return -ENOMEM;
}
return 0;
err2:
pci_release_regions(pdev);
err1:
pci_disable_device(pdev);
err0:
return rc;
}
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info)
{
memset(dev_info, 0, sizeof(struct qed_dev_info));
dev_info->num_hwfns = cdev->num_hwfns;
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
dev_info->fw_major = FW_MAJOR_VERSION;
dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode;
qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
return 0;
}
static void qed_free_cdev(struct qed_dev *cdev)
{
kfree((void *)cdev);
}
static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
{
struct qed_dev *cdev;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return cdev;
qed_init_struct(cdev);
return cdev;
}
/* Sets the requested power state */
static int qed_set_power_state(struct qed_dev *cdev,
pci_power_t state)
{
if (!cdev)
return -ENODEV;
DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
return 0;
}
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
enum qed_protocol protocol,
u32 dp_module,
u8 dp_level)
{
struct qed_dev *cdev;
int rc;
cdev = qed_alloc_cdev(pdev);
if (!cdev)
goto err0;
cdev->protocol = protocol;
qed_init_dp(cdev, dp_module, dp_level);
rc = qed_init_pci(cdev, pdev);
if (rc) {
DP_ERR(cdev, "init pci failed\n");
goto err1;
}
DP_INFO(cdev, "PCI init completed successfully\n");
rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
if (rc) {
DP_ERR(cdev, "hw prepare failed\n");
goto err2;
}
DP_INFO(cdev, "qed_probe completed successffuly\n");
return cdev;
err2:
qed_free_pci(cdev);
err1:
qed_free_cdev(cdev);
err0:
return NULL;
}
static void qed_remove(struct qed_dev *cdev)
{
if (!cdev)
return;
qed_hw_remove(cdev);
qed_free_pci(cdev);
qed_set_power_state(cdev, PCI_D3hot);
qed_free_cdev(cdev);
}
static void qed_disable_msix(struct qed_dev *cdev)
{
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
pci_disable_msix(cdev->pdev);
kfree(cdev->int_params.msix_table);
} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
pci_disable_msi(cdev->pdev);
}
memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
}
static int qed_enable_msix(struct qed_dev *cdev,
struct qed_int_params *int_params)
{
int i, rc, cnt;
cnt = int_params->in.num_vectors;
for (i = 0; i < cnt; i++)
int_params->msix_table[i].entry = i;
rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
int_params->in.min_msix_cnt, cnt);
if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
(rc % cdev->num_hwfns)) {
pci_disable_msix(cdev->pdev);
/* If fastpath is initialized, we need at least one interrupt
* per hwfn [and the slow path interrupts]. New requested number
* should be a multiple of the number of hwfns.
*/
cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
DP_NOTICE(cdev,
"Trying to enable MSI-X with less vectors (%d out of %d)\n",
cnt, int_params->in.num_vectors);
rc = pci_enable_msix_exact(cdev->pdev,
int_params->msix_table, cnt);
if (!rc)
rc = cnt;
}
if (rc > 0) {
/* MSI-x configuration was achieved */
int_params->out.int_mode = QED_INT_MODE_MSIX;
int_params->out.num_vectors = rc;
rc = 0;
} else {
DP_NOTICE(cdev,
"Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
cnt, rc);
}
return rc;
}
/* This function outputs the int mode and the number of enabled msix vector */
static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
{
struct qed_int_params *int_params = &cdev->int_params;
struct msix_entry *tbl;
int rc = 0, cnt;
switch (int_params->in.int_mode) {
case QED_INT_MODE_MSIX:
/* Allocate MSIX table */
cnt = int_params->in.num_vectors;
int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
if (!int_params->msix_table) {
rc = -ENOMEM;
goto out;
}
/* Enable MSIX */
rc = qed_enable_msix(cdev, int_params);
if (!rc)
goto out;
DP_NOTICE(cdev, "Failed to enable MSI-X\n");
kfree(int_params->msix_table);
if (force_mode)
goto out;
/* Fallthrough */
case QED_INT_MODE_MSI:
rc = pci_enable_msi(cdev->pdev);
if (!rc) {
int_params->out.int_mode = QED_INT_MODE_MSI;
goto out;
}
DP_NOTICE(cdev, "Failed to enable MSI\n");
if (force_mode)
goto out;
/* Fallthrough */
case QED_INT_MODE_INTA:
int_params->out.int_mode = QED_INT_MODE_INTA;
rc = 0;
goto out;
default:
DP_NOTICE(cdev, "Unknown int_mode value %d\n",
int_params->in.int_mode);
rc = -EINVAL;
}
out:
cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
return rc;
}
static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
int index, void(*handler)(void *))
{
struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
int relative_idx = index / cdev->num_hwfns;
hwfn->simd_proto_handler[relative_idx].func = handler;
hwfn->simd_proto_handler[relative_idx].token = token;
}
static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
{
struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
int relative_idx = index / cdev->num_hwfns;
memset(&hwfn->simd_proto_handler[relative_idx], 0,
sizeof(struct qed_simd_fp_handler));
}
static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
{
tasklet_schedule((struct tasklet_struct *)tasklet);
return IRQ_HANDLED;
}
static irqreturn_t qed_single_int(int irq, void *dev_instance)
{
struct qed_dev *cdev = (struct qed_dev *)dev_instance;
struct qed_hwfn *hwfn;
irqreturn_t rc = IRQ_NONE;
u64 status;
int i, j;
for (i = 0; i < cdev->num_hwfns; i++) {
status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
if (!status)
continue;
hwfn = &cdev->hwfns[i];
/* Slowpath interrupt */
if (unlikely(status & 0x1)) {
tasklet_schedule(hwfn->sp_dpc);
status &= ~0x1;
rc = IRQ_HANDLED;
}
/* Fastpath interrupts */
for (j = 0; j < 64; j++) {
if ((0x2ULL << j) & status) {
hwfn->simd_proto_handler[j].func(
hwfn->simd_proto_handler[j].token);
status &= ~(0x2ULL << j);
rc = IRQ_HANDLED;
}
}
if (unlikely(status))
DP_VERBOSE(hwfn, NETIF_MSG_INTR,
"got an unknown interrupt status 0x%llx\n",
status);
}
return rc;
}
static int qed_slowpath_irq_req(struct qed_dev *cdev)
{
int i = 0, rc = 0;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
/* Request all the slowpath MSI-X vectors */
for (i = 0; i < cdev->num_hwfns; i++) {
snprintf(cdev->hwfns[i].name, NAME_SIZE,
"sp-%d-%02x:%02x.%02x",
i, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn),
cdev->hwfns[i].abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[i].vector,
qed_msix_sp_int, 0,
cdev->hwfns[i].name,
cdev->hwfns[i].sp_dpc);
if (rc)
break;
DP_VERBOSE(&cdev->hwfns[i],
(NETIF_MSG_INTR | QED_MSG_SP),
"Requested slowpath MSI-X\n");
}
if (i != cdev->num_hwfns) {
/* Free already request MSI-X vectors */
for (i--; i >= 0; i--) {
unsigned int vec =
cdev->int_params.msix_table[i].vector;
synchronize_irq(vec);
free_irq(cdev->int_params.msix_table[i].vector,
cdev->hwfns[i].sp_dpc);
}
}
} else {
unsigned long flags = 0;
snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
PCI_FUNC(cdev->pdev->devfn));
if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
flags |= IRQF_SHARED;
rc = request_irq(cdev->pdev->irq, qed_single_int,
flags, cdev->name, cdev);
}
return rc;
}
static void qed_slowpath_irq_free(struct qed_dev *cdev)
{
int i;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i) {
synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector,
cdev->hwfns[i].sp_dpc);
}
} else {
free_irq(cdev->pdev->irq, cdev);
}
}
static int qed_nic_stop(struct qed_dev *cdev)
{
int i, rc;
rc = qed_hw_stop(cdev);
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (p_hwfn->b_sp_dpc_enabled) {
tasklet_disable(p_hwfn->sp_dpc);
p_hwfn->b_sp_dpc_enabled = false;
DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
"Disabled sp taskelt [hwfn %d] at %p\n",
i, p_hwfn->sp_dpc);
}
}
return rc;
}
static int qed_nic_reset(struct qed_dev *cdev)
{
int rc;
rc = qed_hw_reset(cdev);
if (rc)
return rc;
qed_resc_free(cdev);
return 0;
}
static int qed_nic_setup(struct qed_dev *cdev)
{
int rc;
rc = qed_resc_alloc(cdev);
if (rc)
return rc;
DP_INFO(cdev, "Allocated qed resources\n");
qed_resc_setup(cdev);
return rc;
}
static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
{
int limit = 0;
/* Mark the fastpath as free/used */
cdev->int_params.fp_initialized = cnt ? true : false;
if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
limit = cdev->num_hwfns * 63;
else if (cdev->int_params.fp_msix_cnt)
limit = cdev->int_params.fp_msix_cnt;
if (!limit)
return -ENOMEM;
return min_t(int, cnt, limit);
}
static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
{
memset(info, 0, sizeof(struct qed_int_info));
if (!cdev->int_params.fp_initialized) {
DP_INFO(cdev,
"Protocol driver requested interrupt information, but its support is not yet configured\n");
return -EINVAL;
}
/* Need to expose only MSI-X information; Single IRQ is handled solely
* by qed.
*/
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
int msix_base = cdev->int_params.fp_msix_base;
info->msix_cnt = cdev->int_params.fp_msix_cnt;
info->msix = &cdev->int_params.msix_table[msix_base];
}
return 0;
}
static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
int rc, i;
u8 num_vectors = 0;
memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = int_mode;
for_each_hwfn(cdev, i)
num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
cdev->int_params.in.num_vectors = num_vectors;
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
rc = qed_set_int_mode(cdev, false);
if (rc) {
DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
return rc;
}
cdev->int_params.fp_msix_base = cdev->num_hwfns;
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
return 0;
}
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
{
int rc;
p_hwfn->stream->next_in = input_buf;
p_hwfn->stream->avail_in = input_len;
p_hwfn->stream->next_out = unzip_buf;
p_hwfn->stream->avail_out = max_size;
rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
if (rc != Z_OK) {
DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
rc);
return 0;
}
rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
zlib_inflateEnd(p_hwfn->stream);
if (rc != Z_OK && rc != Z_STREAM_END) {
DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
p_hwfn->stream->msg, rc);
return 0;
}
return p_hwfn->stream->total_out / 4;
}
static int qed_alloc_stream_mem(struct qed_dev *cdev)
{
int i;
void *workspace;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
if (!p_hwfn->stream)
return -ENOMEM;
workspace = vzalloc(zlib_inflate_workspacesize());
if (!workspace)
return -ENOMEM;
p_hwfn->stream->workspace = workspace;
}
return 0;
}
static void qed_free_stream_mem(struct qed_dev *cdev)
{
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (!p_hwfn->stream)
return;
vfree(p_hwfn->stream->workspace);
kfree(p_hwfn->stream);
}
}
static void qed_update_pf_params(struct qed_dev *cdev,
struct qed_pf_params *params)
{
int i;
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
p_hwfn->pf_params = *params;
}
}
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
int rc;
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
if (rc) {
DP_NOTICE(cdev,
"Failed to find fw file - /lib/firmware/%s\n",
QED_FW_FILE_NAME);
goto err;
}
rc = qed_nic_setup(cdev);
if (rc)
goto err;
rc = qed_slowpath_setup_int(cdev, params->int_mode);
if (rc)
goto err1;
/* Request the slowpath IRQ */
rc = qed_slowpath_irq_req(cdev);
if (rc)
goto err2;
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev);
if (rc) {
DP_NOTICE(cdev, "Failed to allocate stream memory\n");
goto err3;
}
/* Start the slowpath */
data = cdev->firmware->data;
rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
true, data);
if (rc)
goto err3;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
(params->drv_rev << 8) |
(params->drv_eng);
strlcpy(drv_version.name, params->name,
MCP_DRV_VER_STR_SIZE - 4);
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
DP_NOTICE(cdev, "Failed sending drv version command\n");
return rc;
}
return 0;
err3:
qed_free_stream_mem(cdev);
qed_slowpath_irq_free(cdev);
err2:
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
err:
release_firmware(cdev->firmware);
return rc;
}
static int qed_slowpath_stop(struct qed_dev *cdev)
{
if (!cdev)
return -ENODEV;
qed_free_stream_mem(cdev);
qed_nic_stop(cdev);
qed_slowpath_irq_free(cdev);
qed_disable_msix(cdev);
qed_nic_reset(cdev);
release_firmware(cdev->firmware);
return 0;
}
static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
char ver_str[VER_SIZE])
{
int i;
memcpy(cdev->name, name, NAME_SIZE);
for_each_hwfn(cdev, i)
snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
memcpy(cdev->ver_str, ver_str, VER_SIZE);
cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}
static u32 qed_sb_init(struct qed_dev *cdev,
struct qed_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr, u16 sb_id,
enum qed_sb_type type)
{
struct qed_hwfn *p_hwfn;
int hwfn_index;
u16 rel_sb_id;
u8 n_hwfns;
u32 rc;
/* RoCE uses single engine and CMT uses two engines. When using both
* we force only a single engine. Storage uses only engine 0 too.
*/
if (type == QED_SB_TYPE_L2_QUEUE)
n_hwfns = cdev->num_hwfns;
else
n_hwfns = 1;
hwfn_index = sb_id % n_hwfns;
p_hwfn = &cdev->hwfns[hwfn_index];
rel_sb_id = sb_id / n_hwfns;
DP_VERBOSE(cdev, NETIF_MSG_INTR,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
hwfn_index, rel_sb_id, sb_id);
rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
sb_virt_addr, sb_phy_addr, rel_sb_id);
return rc;
}
static u32 qed_sb_release(struct qed_dev *cdev,
struct qed_sb_info *sb_info,
u16 sb_id)
{
struct qed_hwfn *p_hwfn;
int hwfn_index;
u16 rel_sb_id;
u32 rc;
hwfn_index = sb_id % cdev->num_hwfns;
p_hwfn = &cdev->hwfns[hwfn_index];
rel_sb_id = sb_id / cdev->num_hwfns;
DP_VERBOSE(cdev, NETIF_MSG_INTR,
"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
hwfn_index, rel_sb_id, sb_id);
rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
return rc;
}
static int qed_drain(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
struct qed_ptt *ptt;
int i, rc;
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
return -EBUSY;
}
rc = qed_mcp_drain(hwfn, ptt);
if (rc)
return rc;
qed_ptt_release(hwfn, ptt);
}
return 0;
}
const struct qed_common_ops qed_common_ops_pass = {
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
.set_id = &qed_set_id,
.update_pf_params = &qed_update_pf_params,
.slowpath_start = &qed_slowpath_start,
.slowpath_stop = &qed_slowpath_stop,
.set_fp_int = &qed_set_int_fp,
.get_fp_int = &qed_get_int_fp,
.sb_init = &qed_sb_init,
.sb_release = &qed_sb_release,
.simd_handler_config = &qed_simd_handler_config,
.simd_handler_clean = &qed_simd_handler_clean,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
};
u32 qed_get_protocol_version(enum qed_protocol protocol)
{
switch (protocol) {
case QED_PROTOCOL_ETH:
return QED_ETH_INTERFACE_VERSION;
default:
return 0;
}
}
EXPORT_SYMBOL(qed_get_protocol_version);

View File

@ -0,0 +1,549 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
_val)
#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
offsetof(struct public_drv_mb, _field), _val)
#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
offsetof(struct public_drv_mb, _field))
#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
DRV_ID_PDA_COMP_VER_SHIFT)
#define MCP_BYTES_PER_MBIT_SHIFT 17
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
{
if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
return false;
return true;
}
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PORT);
u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
MFW_PORT(p_hwfn));
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"port_addr = 0x%x, port_id 0x%02x\n",
p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
}
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
u32 tmp, i;
if (!p_hwfn->mcp_info->public_base)
return;
for (i = 0; i < length; i++) {
tmp = qed_rd(p_hwfn, p_ptt,
p_hwfn->mcp_info->mfw_mb_addr +
(i << 2) + sizeof(u32));
/* The MB data is actually BE; Need to force it to cpu */
((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
be32_to_cpu((__force __be32)tmp);
}
}
int qed_mcp_free(struct qed_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
kfree(p_hwfn->mcp_info->mfw_mb_cur);
kfree(p_hwfn->mcp_info->mfw_mb_shadow);
}
kfree(p_hwfn->mcp_info);
return 0;
}
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
if (!p_info->public_base)
return 0;
p_info->public_base |= GRCBASE_MCP;
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
PUBLIC_DRV_MB));
p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
/* Set the MFW MB address */
mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
PUBLIC_MFW_MB));
p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
/* Get the current driver mailbox sequence before sending
* the first command
*/
p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
/* Get current FW pulse sequence */
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return 0;
}
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info;
u32 size;
/* Allocate mcp_info structure */
p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
if (!p_hwfn->mcp_info)
goto err;
p_info = p_hwfn->mcp_info;
if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
DP_NOTICE(p_hwfn, "MCP is not initialized\n");
/* Do not free mcp_info here, since public_base indicate that
* the MCP is not initialized
*/
return 0;
}
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
p_info->mfw_mb_shadow =
kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
p_info->mfw_mb_length), GFP_ATOMIC);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
/* Initialize the MFW mutex */
mutex_init(&p_info->mutex);
return 0;
err:
DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
qed_mcp_free(p_hwfn);
return -ENOMEM;
}
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
u8 delay = CHIP_MCP_RESP_ITER_US;
u32 org_mcp_reset_seq, cnt = 0;
int rc = 0;
/* Set drv command along with the updated sequence */
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
(DRV_MSG_CODE_MCP_RESET | seq));
do {
/* Wait for MFW response */
udelay(delay);
/* Give the FW up to 500 second (50*1000*10usec) */
} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
MISCS_REG_GENERIC_POR_0)) &&
(cnt++ < QED_MCP_RESET_RETRIES));
if (org_mcp_reset_seq !=
qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"MCP was reset after %d usec\n", cnt * delay);
} else {
DP_ERR(p_hwfn, "Failed to reset MCP\n");
rc = -EAGAIN;
}
return rc;
}
static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param)
{
u8 delay = CHIP_MCP_RESP_ITER_US;
u32 seq, cnt = 1, actual_mb_seq;
int rc = 0;
/* Get actual driver mailbox sequence */
actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
/* Use MCP history register to check if MCP reset occurred between
* init time and now.
*/
if (p_hwfn->mcp_info->mcp_hist !=
qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
qed_load_mcp_offsets(p_hwfn, p_ptt);
qed_mcp_cmd_port_init(p_hwfn, p_ptt);
}
seq = ++p_hwfn->mcp_info->drv_mb_seq;
/* Set drv param */
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
/* Set drv command along with the updated sequence */
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"wrote command (%x) to MFW MB param 0x%08x\n",
(cmd | seq), param);
do {
/* Wait for MFW response */
udelay(delay);
*o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
/* Give the FW up to 5 second (500*10ms) */
} while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
(cnt++ < QED_DRV_MB_MAX_RETRIES));
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"[after %d ms] read (%x) seq is (%x) from FW MB\n",
cnt * delay, *o_mcp_resp, seq);
/* Is this a reply to our command? */
if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
*o_mcp_resp &= FW_MSG_CODE_MASK;
/* Get the MCP param */
*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
} else {
/* FW BUG! */
DP_ERR(p_hwfn, "MFW failed to respond!\n");
*o_mcp_resp = 0;
rc = -EAGAIN;
}
return rc;
}
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param)
{
int rc = 0;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY;
}
/* Lock Mutex to ensure only single thread is
* accessing the MCP at one time
*/
mutex_lock(&p_hwfn->mcp_info->mutex);
rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
o_mcp_resp, o_mcp_param);
/* Release Mutex */
mutex_unlock(&p_hwfn->mcp_info->mutex);
return rc;
}
static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 i;
/* Copy version string to MCP */
for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
*(u32 *)&cdev->ver_str[i * sizeof(u32)]);
}
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 param;
int rc;
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY;
}
/* Save driver's version to shmem */
qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
p_hwfn->mcp_info->drv_mb_seq,
p_hwfn->mcp_info->drv_pulse_seq);
/* Load Request */
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
(PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
cdev->drv_type),
p_load_code, &param);
/* if mcp fails to respond we must abort */
if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
return rc;
}
/* If MFW refused (e.g. other port is in diagnostic mode) we
* must abort. This can happen in the following cases:
* - Other port is in diagnostic mode
* - Previously loaded function on the engine is not compliant with
* the requester.
* - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
* -
*/
if (!(*p_load_code) ||
((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
return -EBUSY;
}
return 0;
}
int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
u32 *p_mfw_ver)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
u32 global_offsize;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
global_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
public_base,
PUBLIC_GLOBAL));
*p_mfw_ver = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global, mfw_ver));
qed_ptt_release(p_hwfn, p_ptt);
return 0;
}
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct public_func *p_data,
int pfid)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
u32 i, size;
memset(p_data, 0, sizeof(*p_data));
size = min_t(u32, sizeof(*p_data),
QED_SECTION_SIZE(mfw_path_offsize));
for (i = 0; i < size / sizeof(u32); i++)
((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
func_addr + (i << 2));
return size;
}
static int
qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
struct public_func *p_info,
enum qed_pci_personality *p_proto)
{
int rc = 0;
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
*p_proto = QED_PCI_ETH;
break;
default:
rc = -EINVAL;
}
return rc;
}
int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *info;
struct public_func shmem_info;
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
MCP_PF_ID(p_hwfn));
info = &p_hwfn->mcp_info->func_info;
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
&info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return -EINVAL;
}
if (p_hwfn->cdev->mf_mode != SF) {
info->bandwidth_min = (shmem_info.config &
FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT;
if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
info->bandwidth_min);
info->bandwidth_min = 1;
}
info->bandwidth_max = (shmem_info.config &
FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
info->bandwidth_max);
info->bandwidth_max = 100;
}
}
if (shmem_info.mac_upper || shmem_info.mac_lower) {
info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
info->mac[1] = (u8)(shmem_info.mac_upper);
info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
info->mac[5] = (u8)(shmem_info.mac_lower);
} else {
DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
}
info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
(((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
(((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
"Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
info->pause_on_host, info->protocol,
info->bandwidth_min, info->bandwidth_max,
info->mac[0], info->mac[1], info->mac[2],
info->mac[3], info->mac[4], info->mac[5],
info->wwn_port, info->wwn_node, info->ovlan);
return 0;
}
int qed_mcp_drain(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_NIG_DRAIN, 100,
&resp, &param);
/* Wait for the drain to complete before returning */
msleep(120);
return rc;
}
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver)
{
int rc = 0;
u32 param = 0, reply = 0, i;
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY;
}
DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
p_ver->version);
/* Copy version string to shmem */
for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
DRV_MB_WR(p_hwfn, p_ptt,
union_data.drv_version.name[i * sizeof(u32)],
*(u32 *)&p_ver->name[i * sizeof(u32)]);
}
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
&param);
if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
return rc;
}
return 0;
}

View File

@ -0,0 +1,232 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_MCP_H
#define _QED_MCP_H
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "qed_hsi.h"
struct qed_mcp_function_info {
u8 pause_on_host;
enum qed_pci_personality protocol;
u8 bandwidth_min;
u8 bandwidth_max;
u8 mac[ETH_ALEN];
u64 wwn_port;
u64 wwn_node;
#define QED_MCP_VLAN_UNSET (0xffff)
u16 ovlan;
};
struct qed_mcp_nvm_common {
u32 offset;
u32 param;
u32 resp;
u32 cmd;
};
struct qed_mcp_drv_version {
u32 version;
u8 name[MCP_DRV_VER_STR_SIZE - 4];
};
/**
* @brief Get the management firmware version value
*
* @param cdev - qed dev pointer
* @param mfw_ver - mfw version value
*
* @return int - 0 - operation was successul.
*/
int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
u32 *mfw_ver);
/**
* @brief General function for sending commands to the MCP
* mailbox. It acquire mutex lock for the entire
* operation, from sending the request until the MCP
* response. Waiting for MCP response will be checked up
* to 5 seconds every 5ms.
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param cmd - command to be sent to the MCP.
* @param param - Optional param
* @param o_mcp_resp - The MCP response code (exclude sequence).
* @param o_mcp_param- Optional parameter provided by the MCP
* response
* @return int - 0 - operation
* was successul.
*/
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param);
/**
* @brief - drains the nig, allowing completion to pass in case of pauses.
* (Should be called only from sleepable context)
*
* @param p_hwfn
* @param p_ptt
*/
int qed_mcp_drain(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief Send driver version to MFW
*
* @param p_hwfn
* @param p_ptt
* @param version - Version value
* @param name - Protocol driver name
*
* @return int - 0 - operation was successul.
*/
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver);
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
* required during hw-init will be placed in their correct place in shmem
* we need it in qed_dev.c [for readin the nvram reflection in shmem].
*/
#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
((rel_pfid) | \
((p_hwfn)->abs_pf_id & 1) << 3) : \
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
/* TODO - this is only correct as long as only BB is supported, and
* no port-swapping is implemented; Afterwards we'll need to fix it.
*/
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->cdev->num_ports_in_engines * 2))
struct qed_mcp_info {
struct mutex mutex; /* MCP access lock */
u32 public_base;
u32 drv_mb_addr;
u32 mfw_mb_addr;
u32 port_addr;
u16 drv_mb_seq;
u16 drv_pulse_seq;
struct qed_mcp_function_info func_info;
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
u16 mcp_hist;
};
/**
* @brief Initialize the interface with the MCP
*
* @param p_hwfn - HW func
* @param p_ptt - PTT required for register access
*
* @return int
*/
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief Initialize the port interface with the MCP
*
* @param p_hwfn
* @param p_ptt
* Can only be called after `num_ports_in_engines' is set
*/
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief Releases resources allocated during the init process.
*
* @param p_hwfn - HW func
* @param p_ptt - PTT required for register access
*
* @return int
*/
int qed_mcp_free(struct qed_hwfn *p_hwfn);
/**
* @brief Sends a LOAD_REQ to the MFW, and in case operation
* succeed, returns whether this PF is the first on the
* chip/engine/port or function. This function should be
* called when driver is ready to accept MFW events after
* Storms initializations are done.
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param p_load_code - The MCP response param containing one
* of the following:
* FW_MSG_CODE_DRV_LOAD_ENGINE
* FW_MSG_CODE_DRV_LOAD_PORT
* FW_MSG_CODE_DRV_LOAD_FUNCTION
* @return int -
* 0 - Operation was successul.
* -EBUSY - Operation failed
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_load_code);
/**
* @brief Read the MFW mailbox into Current buffer.
*
* @param p_hwfn
* @param p_ptt
*/
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief - calls during init to read shmem of all function-related info.
*
* @param p_hwfn
*
* @param return 0 upon success.
*/
int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief - Reset the MCP using mailbox command.
*
* @param p_hwfn
* @param p_ptt
*
* @param return 0 upon success.
*/
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
*
* @return true iff MFW is running and mcp_info is initialized
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
#endif

View File

@ -0,0 +1,366 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef REG_ADDR_H
#define REG_ADDR_H
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
0
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
0xfff << 0)
#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
12
#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
0xfff << 12)
#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
24
#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
0xff << 24)
#define XSDM_REG_OPERATION_GEN \
0xf80408UL
#define NIG_REG_RX_BRB_OUT_EN \
0x500e18UL
#define NIG_REG_STORM_OUT_EN \
0x500e08UL
#define PSWRQ2_REG_L2P_VALIDATE_VFID \
0x240c50UL
#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL
#define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \
0x1d80000UL
#define BAR0_MAP_REG_PSDM_RAM \
0x1f00000UL
#define BAR0_MAP_REG_TSDM_RAM \
0x1c80000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL
#define PRS_REG_SEARCH_TCP \
0x1f0400UL
#define PRS_REG_SEARCH_UDP \
0x1f0404UL
#define PRS_REG_SEARCH_FCOE \
0x1f0408UL
#define PRS_REG_SEARCH_ROCE \
0x1f040cUL
#define PRS_REG_SEARCH_OPENFLOW \
0x1f0434UL
#define TM_REG_PF_ENABLE_CONN \
0x2c043cUL
#define TM_REG_PF_ENABLE_TASK \
0x2c0444UL
#define TM_REG_PF_SCAN_ACTIVE_CONN \
0x2c04fcUL
#define TM_REG_PF_SCAN_ACTIVE_TASK \
0x2c0500UL
#define IGU_REG_LEADING_EDGE_LATCH \
0x18082cUL
#define IGU_REG_TRAILING_EDGE_LATCH \
0x180830UL
#define QM_REG_USG_CNT_PF_TX \
0x2f2eacUL
#define QM_REG_USG_CNT_PF_OTHER \
0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \
0x100508UL
#define QM_REG_PF_EN \
0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \
0x2d0708UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
#define PGLUE_B_REG_PGL_ADDR_88_F0 \
0x2aa404UL
#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
0x2aa408UL
#define PGLUE_B_REG_PGL_ADDR_90_F0 \
0x2aa40cUL
#define PGLUE_B_REG_PGL_ADDR_94_F0 \
0x2aa410UL
#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
0x2aa138UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
0x2aa174UL
#define MISC_REG_GEN_PURP_CR0 \
0x008c80UL
#define MCP_REG_SCRATCH \
0xe20000UL
#define CNIG_REG_NW_PORT_MODE_BB_B0 \
0x218200UL
#define MISCS_REG_CHIP_NUM \
0x00976cUL
#define MISCS_REG_CHIP_REV \
0x009770UL
#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
0x00971cUL
#define MISCS_REG_CHIP_TEST_REG \
0x009778UL
#define MISCS_REG_CHIP_METAL \
0x009774UL
#define BRB_REG_HEADER_SIZE \
0x340804UL
#define BTB_REG_HEADER_SIZE \
0xdb0804UL
#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL
#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
#define DMAE_REG_INIT \
0x00c000UL
#define DORQ_REG_IFEN \
0x100040UL
#define GRC_REG_TIMEOUT_EN \
0x050404UL
#define IGU_REG_BLOCK_CONFIGURATION \
0x180040UL
#define MCM_REG_INIT \
0x1200000UL
#define MCP2_REG_DBG_DWORD_ENABLE \
0x052404UL
#define MISC_REG_PORT_MODE \
0x008c00UL
#define MISCS_REG_CLK_100G_MODE \
0x009070UL
#define MSDM_REG_ENABLE_IN1 \
0xfc0004UL
#define MSEM_REG_ENABLE_IN \
0x1800004UL
#define NIG_REG_CM_HDR \
0x500840UL
#define NCSI_REG_CONFIG \
0x040200UL
#define PBF_REG_INIT \
0xd80000UL
#define PTU_REG_ATC_INIT_ARRAY \
0x560000UL
#define PCM_REG_INIT \
0x1100000UL
#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
0x2a9000UL
#define PRM_REG_DISABLE_PRM \
0x230000UL
#define PRS_REG_SOFT_RST \
0x1f0000UL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
0x1600004UL
#define PSWRQ_REG_DBG_SELECT \
0x280020UL
#define PSWRQ2_REG_CDUT_P_SIZE \
0x24000cUL
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
0x29e050UL
#define PSWRD_REG_DBG_SELECT \
0x29c040UL
#define PSWRD2_REG_CONF11 \
0x29d064UL
#define PSWWR_REG_USDM_FULL_TH \
0x29a040UL
#define PSWWR2_REG_CDU_FULL_TH2 \
0x29b040UL
#define QM_REG_MAXPQSIZE_0 \
0x2f0434UL
#define RSS_REG_RSS_INIT_EN \
0x238804UL
#define RDIF_REG_STOP_ON_ERROR \
0x300040UL
#define SRC_REG_SOFT_RST \
0x23874cUL
#define TCFC_REG_ACTIVITY_COUNTER \
0x2d8800UL
#define TCM_REG_INIT \
0x1180000UL
#define TM_REG_PXP_READ_DATA_FIFO_INIT \
0x2c0014UL
#define TSDM_REG_ENABLE_IN1 \
0xfb0004UL
#define TSEM_REG_ENABLE_IN \
0x1700004UL
#define TDIF_REG_STOP_ON_ERROR \
0x310040UL
#define UCM_REG_INIT \
0x1280000UL
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
0x051004UL
#define USDM_REG_ENABLE_IN1 \
0xfd0004UL
#define USEM_REG_ENABLE_IN \
0x1900004UL
#define XCM_REG_INIT \
0x1000000UL
#define XSDM_REG_ENABLE_IN1 \
0xf80004UL
#define XSEM_REG_ENABLE_IN \
0x1400004UL
#define YCM_REG_INIT \
0x1080000UL
#define YSDM_REG_ENABLE_IN1 \
0xf90004UL
#define YSEM_REG_ENABLE_IN \
0x1500004UL
#define XYLD_REG_SCBD_STRICT_PRIO \
0x4c0000UL
#define TMLD_REG_SCBD_STRICT_PRIO \
0x4d0000UL
#define MULD_REG_SCBD_STRICT_PRIO \
0x4e0000UL
#define YULD_REG_SCBD_STRICT_PRIO \
0x4c8000UL
#define MISC_REG_SHARED_MEM_ADDR \
0x008c20UL
#define DMAE_REG_GO_C0 \
0x00c048UL
#define DMAE_REG_GO_C1 \
0x00c04cUL
#define DMAE_REG_GO_C2 \
0x00c050UL
#define DMAE_REG_GO_C3 \
0x00c054UL
#define DMAE_REG_GO_C4 \
0x00c058UL
#define DMAE_REG_GO_C5 \
0x00c05cUL
#define DMAE_REG_GO_C6 \
0x00c060UL
#define DMAE_REG_GO_C7 \
0x00c064UL
#define DMAE_REG_GO_C8 \
0x00c068UL
#define DMAE_REG_GO_C9 \
0x00c06cUL
#define DMAE_REG_GO_C10 \
0x00c070UL
#define DMAE_REG_GO_C11 \
0x00c074UL
#define DMAE_REG_GO_C12 \
0x00c078UL
#define DMAE_REG_GO_C13 \
0x00c07cUL
#define DMAE_REG_GO_C14 \
0x00c080UL
#define DMAE_REG_GO_C15 \
0x00c084UL
#define DMAE_REG_GO_C16 \
0x00c088UL
#define DMAE_REG_GO_C17 \
0x00c08cUL
#define DMAE_REG_GO_C18 \
0x00c090UL
#define DMAE_REG_GO_C19 \
0x00c094UL
#define DMAE_REG_GO_C20 \
0x00c098UL
#define DMAE_REG_GO_C21 \
0x00c09cUL
#define DMAE_REG_GO_C22 \
0x00c0a0UL
#define DMAE_REG_GO_C23 \
0x00c0a4UL
#define DMAE_REG_GO_C24 \
0x00c0a8UL
#define DMAE_REG_GO_C25 \
0x00c0acUL
#define DMAE_REG_GO_C26 \
0x00c0b0UL
#define DMAE_REG_GO_C27 \
0x00c0b4UL
#define DMAE_REG_GO_C28 \
0x00c0b8UL
#define DMAE_REG_GO_C29 \
0x00c0bcUL
#define DMAE_REG_GO_C30 \
0x00c0c0UL
#define DMAE_REG_GO_C31 \
0x00c0c4UL
#define DMAE_REG_CMD_MEM \
0x00c800UL
#define QM_REG_MAXPQSIZETXSEL_0 \
0x2f0440UL
#define QM_REG_SDMCMDREADY \
0x2f1e10UL
#define QM_REG_SDMCMDADDR \
0x2f1e04UL
#define QM_REG_SDMCMDDATALSB \
0x2f1e08UL
#define QM_REG_SDMCMDDATAMSB \
0x2f1e0cUL
#define QM_REG_SDMCMDGO \
0x2f1e14UL
#define QM_REG_RLPFCRD \
0x2f4d80UL
#define QM_REG_RLPFINCVAL \
0x2f4c80UL
#define QM_REG_RLGLBLCRD \
0x2f4400UL
#define QM_REG_RLGLBLINCVAL \
0x2f3400UL
#define IGU_REG_ATTENTION_ENABLE \
0x18083cUL
#define IGU_REG_ATTN_MSG_ADDR_L \
0x180820UL
#define IGU_REG_ATTN_MSG_ADDR_H \
0x180824UL
#define MISC_REG_AEU_GENERAL_ATTN_0 \
0x008400UL
#define CAU_REG_SB_ADDR_MEMORY \
0x1c8000UL
#define CAU_REG_SB_VAR_MEMORY \
0x1c6000UL
#define CAU_REG_PI_MEMORY \
0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \
0x180800UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
#define MISC_REG_AEU_MASK_ATTN_IGU \
0x008494UL
#define IGU_REG_CLEANUP_STATUS_0 \
0x180980UL
#define IGU_REG_CLEANUP_STATUS_1 \
0x180a00UL
#define IGU_REG_CLEANUP_STATUS_2 \
0x180a80UL
#define IGU_REG_CLEANUP_STATUS_3 \
0x180b00UL
#define IGU_REG_CLEANUP_STATUS_4 \
0x180b80UL
#define IGU_REG_COMMAND_REG_32LSB_DATA \
0x180840UL
#define IGU_REG_COMMAND_REG_CTRL \
0x180848UL
#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
0x1 << 1)
#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
#define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL
#define MCP_REG_NVM_CFG4 \
0xe0642cUL
#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
0x7 << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
#endif

View File

@ -0,0 +1,333 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_SP_H
#define _QED_SP_H
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/qed/qed_chain.h>
#include "qed.h"
#include "qed_hsi.h"
enum spq_mode {
QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
QED_SPQ_MODE_CB, /* Client supplies a callback */
QED_SPQ_MODE_EBLOCK, /* QED should block until completion */
};
struct qed_spq_comp_cb {
void (*function)(struct qed_hwfn *,
void *,
union event_ring_data *,
u8 fw_return_code);
void *cookie;
};
union ramrod_data {
struct pf_start_ramrod_data pf_start;
};
#define EQ_MAX_CREDIT 0xffffffff
enum spq_priority {
QED_SPQ_PRIORITY_NORMAL,
QED_SPQ_PRIORITY_HIGH,
};
union qed_spq_req_comp {
struct qed_spq_comp_cb cb;
u64 *done_addr;
};
struct qed_spq_comp_done {
u64 done;
u8 fw_return_code;
};
struct qed_spq_entry {
struct list_head list;
u8 flags;
/* HSI slow path element */
struct slow_path_element elem;
union ramrod_data ramrod;
enum spq_priority priority;
/* pending queue for this entry */
struct list_head *queue;
enum spq_mode comp_mode;
struct qed_spq_comp_cb comp_cb;
struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
};
struct qed_eq {
struct qed_chain chain;
u8 eq_sb_index; /* index within the SB */
__le16 *p_fw_cons; /* ptr to index value */
};
struct qed_consq {
struct qed_chain chain;
};
struct qed_spq {
spinlock_t lock; /* SPQ lock */
struct list_head unlimited_pending;
struct list_head pending;
struct list_head completion_pending;
struct list_head free_pool;
struct qed_chain chain;
/* allocated dma-able memory for spq entries (+ramrod data) */
dma_addr_t p_phys;
struct qed_spq_entry *p_virt;
/* Used as index for completions (returns on EQ by FW) */
u16 echo_idx;
/* Statistics */
u32 unlimited_pending_count;
u32 normal_count;
u32 high_count;
u32 comp_sent_count;
u32 comp_count;
u32 cid;
};
/**
* @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
* Pends it to the future list.
*
* @param p_hwfn
* @param p_req
*
* @return int
*/
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *fw_return_code);
/**
* @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
*
* @param p_hwfn
*
* @return int
*/
int qed_spq_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_spq_setup - Reset the SPQ to its start state.
*
* @param p_hwfn
*/
void qed_spq_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_spq_deallocate - Deallocates the given SPQ struct.
*
* @param p_hwfn
*/
void qed_spq_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_spq_get_entry - Obtain an entrry from the spq
* free pool list.
*
*
*
* @param p_hwfn
* @param pp_ent
*
* @return int
*/
int
qed_spq_get_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent);
/**
* @brief qed_spq_return_entry - Return an entry to spq free
* pool list
*
* @param p_hwfn
* @param p_ent
*/
void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
/**
* @brief qed_eq_allocate - Allocates & initializes an EQ struct
*
* @param p_hwfn
* @param num_elem number of elements in the eq
*
* @return struct qed_eq* - a newly allocated structure; NULL upon error.
*/
struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
u16 num_elem);
/**
* @brief qed_eq_setup - Reset the SPQ to its start state.
*
* @param p_hwfn
* @param p_eq
*/
void qed_eq_setup(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq);
/**
* @brief qed_eq_deallocate - deallocates the given EQ struct.
*
* @param p_hwfn
* @param p_eq
*/
void qed_eq_free(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq);
/**
* @brief qed_eq_prod_update - update the FW with default EQ producer
*
* @param p_hwfn
* @param prod
*/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
u16 prod);
/**
* @brief qed_eq_completion - Completes currently pending EQ elements
*
* @param p_hwfn
* @param cookie
*
* @return int
*/
int qed_eq_completion(struct qed_hwfn *p_hwfn,
void *cookie);
/**
* @brief qed_spq_completion - Completes a single event
*
* @param p_hwfn
* @param echo - echo value from cookie (used for determining completion)
* @param p_data - data from cookie (used in callback function if applicable)
*
* @return int
*/
int qed_spq_completion(struct qed_hwfn *p_hwfn,
__le16 echo,
u8 fw_return_code,
union event_ring_data *p_data);
/**
* @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
*
* @param p_hwfn
*
* @return u32 - SPQ CID
*/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
/**
* @brief qed_consq_alloc - Allocates & initializes an ConsQ
* struct
*
* @param p_hwfn
*
* @return struct qed_eq* - a newly allocated structure; NULL upon error.
*/
struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_consq_setup - Reset the ConsQ to its start
* state.
*
* @param p_hwfn
* @param p_eq
*/
void qed_consq_setup(struct qed_hwfn *p_hwfn,
struct qed_consq *p_consq);
/**
* @brief qed_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
* @param p_eq
*/
void qed_consq_free(struct qed_hwfn *p_hwfn,
struct qed_consq *p_consq);
/**
* @file
*
* @brief Slow-hwfn low-level commands (Ramrods) function definitions.
*/
#define QED_SP_EQ_COMPLETION 0x01
#define QED_SP_CQE_COMPLETION 0x02
struct qed_sp_init_request_params {
size_t ramrod_data_size;
enum spq_mode comp_mode;
struct qed_spq_comp_cb *p_comp_data;
};
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u32 cid,
u16 opaque_fid,
u8 cmd,
u8 protocol,
struct qed_sp_init_request_params *p_params);
/**
* @brief qed_sp_pf_start - PF Function Start Ramrod
*
* This ramrod is sent to initialize a physical function (PF). It will
* configure the function related parameters and write its completion to the
* event ring specified in the parameters.
*
* Ramrods complete on the common event ring for the PF. This ring is
* allocated by the driver on host memory and its parameters are written
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
* @param mode
*
* @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
enum mf_mode mode);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
*
* This ramrod is sent to close a Physical Function (PF). It is the last ramrod
* sent and the last completion written to the PFs Event Ring. This ramrod also
* deletes the context for the Slowhwfn connection on this PF.
*
* @note Not required for first packet.
*
* @param p_hwfn
*
* @return int
*/
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
#endif

View File

@ -0,0 +1,170 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include "qed.h"
#include <linux/qed/qed_chain.h>
#include "qed_cxt.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
u32 cid,
u16 opaque_fid,
u8 cmd,
u8 protocol,
struct qed_sp_init_request_params *p_params)
{
int rc = -EINVAL;
struct qed_spq_entry *p_ent = NULL;
u32 opaque_cid = opaque_fid << 16 | cid;
if (!pp_ent)
return -ENOMEM;
rc = qed_spq_get_entry(p_hwfn, pp_ent);
if (rc != 0)
return rc;
p_ent = *pp_ent;
p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid);
p_ent->elem.hdr.cmd_id = cmd;
p_ent->elem.hdr.protocol_id = protocol;
p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
p_ent->comp_mode = p_params->comp_mode;
p_ent->comp_done.done = 0;
switch (p_ent->comp_mode) {
case QED_SPQ_MODE_EBLOCK:
p_ent->comp_cb.cookie = &p_ent->comp_done;
break;
case QED_SPQ_MODE_BLOCK:
if (!p_params->p_comp_data)
return -EINVAL;
p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
break;
case QED_SPQ_MODE_CB:
if (!p_params->p_comp_data)
p_ent->comp_cb.function = NULL;
else
p_ent->comp_cb = *p_params->p_comp_data;
break;
default:
DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
opaque_cid, cmd, protocol,
(unsigned long)&p_ent->ramrod,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
if (p_params->ramrod_data_size)
memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
return 0;
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
enum mf_mode mode)
{
struct qed_sp_init_request_params params;
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
struct qed_spq_entry *p_ent = NULL;
int rc = -EINVAL;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
memset(&params, 0, sizeof(params));
params.ramrod_data_size = sizeof(*p_ramrod);
params.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn,
&p_ent,
qed_spq_get_cid(p_hwfn),
p_hwfn->hw_info.opaque_fid,
COMMON_RAMROD_PF_START,
PROTOCOLID_COMMON,
&params);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.pf_start;
p_ramrod->event_ring_sb_id = cpu_to_le16(sb);
p_ramrod->event_ring_sb_index = sb_index;
p_ramrod->path_id = QED_PATH_ID(p_hwfn);
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
p_ramrod->mf_mode = mode;
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
p_ramrod->event_ring_pbl_addr.hi =
DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
p_ramrod->event_ring_pbl_addr.lo =
DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
p_ramrod->consolid_q_pbl_addr.hi =
DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
p_ramrod->consolid_q_pbl_addr.lo =
DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
p_hwfn->hw_info.personality = PERSONALITY_ETH;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
sb, sb_index,
(p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
p_ramrod->outer_tag);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
struct qed_sp_init_request_params params;
struct qed_spq_entry *p_ent = NULL;
int rc = -EINVAL;
memset(&params, 0, sizeof(params));
params.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
p_hwfn->hw_info.opaque_fid,
COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
&params);
if (rc)
return rc;
return qed_spq_post(p_hwfn, p_ent, NULL);
}

View File

@ -0,0 +1,831 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
/***************************************************************************
* Structures & Definitions
***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
#define SPQ_BLOCK_SLEEP_LENGTH (1000)
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
union event_ring_data *data,
u8 fw_return_code)
{
struct qed_spq_comp_done *comp_done;
comp_done = (struct qed_spq_comp_done *)cookie;
comp_done->done = 0x1;
comp_done->fw_return_code = fw_return_code;
/* make update visible to waiting thread */
smp_wmb();
}
static int qed_spq_block(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *p_fw_ret)
{
int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
struct qed_spq_comp_done *comp_done;
int rc;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
while (sleep_count) {
/* validate we receive completion update */
smp_rmb();
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
usleep_range(5000, 10000);
sleep_count--;
}
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
if (rc != 0)
DP_NOTICE(p_hwfn, "MCP drain failed\n");
/* Retry after drain */
sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
while (sleep_count) {
/* validate we receive completion update */
smp_rmb();
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
usleep_range(5000, 10000);
sleep_count--;
}
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
}
DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
return -EBUSY;
}
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
static int
qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
p_ent->elem.hdr.echo = 0;
p_hwfn->p_spq->echo_idx++;
p_ent->flags = 0;
switch (p_ent->comp_mode) {
case QED_SPQ_MODE_EBLOCK:
case QED_SPQ_MODE_BLOCK:
p_ent->comp_cb.function = qed_spq_blocking_cb;
break;
case QED_SPQ_MODE_CB:
break;
default:
DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
p_ent->elem.hdr.cid,
p_ent->elem.hdr.cmd_id,
p_ent->elem.hdr.protocol_id,
p_ent->elem.data_ptr.hi,
p_ent->elem.data_ptr.lo,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
return 0;
}
/***************************************************************************
* HSI access
***************************************************************************/
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
u16 pq;
struct qed_cxt_info cxt_info;
struct core_conn_context *p_cxt;
union qed_qm_pq_params pq_params;
int rc;
cxt_info.iid = p_spq->cid;
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc < 0) {
DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
p_spq->cid);
return;
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
memset(&pq_params, 0, sizeof(pq_params));
pq_params.core.tc = LB_TC;
pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
p_cxt->xstorm_st_context.spq_base_lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
p_cxt->xstorm_st_context.spq_base_hi =
DMA_HI_LE(p_spq->chain.p_phys_addr);
p_cxt->xstorm_st_context.consolid_base_addr.lo =
DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
p_cxt->xstorm_st_context.consolid_base_addr.hi =
DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq,
struct qed_spq_entry *p_ent)
{
struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
struct slow_path_element *elem;
struct core_db_data db;
elem = qed_chain_produce(p_chain);
if (!elem) {
DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
return -EINVAL;
}
*elem = p_ent->elem; /* struct assignment */
/* send a doorbell on the slow hwfn session */
memset(&db, 0, sizeof(db));
SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
/* validate producer is up to-date */
rmb();
db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
/* do not reorder */
barrier();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
mmiowb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
p_spq->cid, db.params, db.agg_flags,
qed_chain_get_prod_idx(p_chain));
return 0;
}
/***************************************************************************
* Asynchronous events
***************************************************************************/
static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return -EINVAL;
}
/***************************************************************************
* EQ API
***************************************************************************/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
u16 prod)
{
u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
/* keep prod updates ordered */
mmiowb();
}
int qed_eq_completion(struct qed_hwfn *p_hwfn,
void *cookie)
{
struct qed_eq *p_eq = cookie;
struct qed_chain *p_chain = &p_eq->chain;
int rc = 0;
/* take a snapshot of the FW consumer */
u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
/* Need to guarantee the fw_cons index we use points to a usuable
* element (to comply with our chain), so our macros would comply
*/
if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
qed_chain_get_usable_per_page(p_chain))
fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
/* Complete current segment of eq entries */
while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
if (!p_eqe) {
rc = -EINVAL;
break;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"op %x prot %x res0 %x echo %x fwret %x flags %x\n",
p_eqe->opcode,
p_eqe->protocol_id,
p_eqe->reserved0,
le16_to_cpu(p_eqe->echo),
p_eqe->fw_return_code,
p_eqe->flags);
if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
if (qed_async_event_completion(p_hwfn, p_eqe))
rc = -EINVAL;
} else if (qed_spq_completion(p_hwfn,
p_eqe->echo,
p_eqe->fw_return_code,
&p_eqe->data)) {
rc = -EINVAL;
}
qed_chain_recycle_consumed(p_chain);
}
qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
return rc;
}
struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
u16 num_elem)
{
struct qed_eq *p_eq;
/* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
if (!p_eq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
return NULL;
}
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
num_elem,
sizeof(union event_ring_element),
&p_eq->chain)) {
DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
/* register EQ completion on the SP SB */
qed_int_register_cb(p_hwfn,
qed_eq_completion,
p_eq,
&p_eq->eq_sb_index,
&p_eq->p_fw_cons);
return p_eq;
eq_allocate_fail:
qed_eq_free(p_hwfn, p_eq);
return NULL;
}
void qed_eq_setup(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq)
{
qed_chain_reset(&p_eq->chain);
}
void qed_eq_free(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq)
{
if (!p_eq)
return;
qed_chain_free(p_hwfn->cdev, &p_eq->chain);
kfree(p_eq);
}
/***************************************************************************
* Slow hwfn Queue (spq)
***************************************************************************/
void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_virt = NULL;
dma_addr_t p_phys = 0;
unsigned int i = 0;
INIT_LIST_HEAD(&p_spq->pending);
INIT_LIST_HEAD(&p_spq->completion_pending);
INIT_LIST_HEAD(&p_spq->free_pool);
INIT_LIST_HEAD(&p_spq->unlimited_pending);
spin_lock_init(&p_spq->lock);
/* SPQ empty pool */
p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
p_virt = p_spq->p_virt;
for (i = 0; i < p_spq->chain.capacity; i++) {
p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool);
p_virt++;
p_phys += sizeof(struct qed_spq_entry);
}
/* Statistics */
p_spq->normal_count = 0;
p_spq->comp_count = 0;
p_spq->comp_sent_count = 0;
p_spq->unlimited_pending_count = 0;
p_spq->echo_idx = 0;
/* SPQ cid, cannot fail */
qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
qed_spq_hw_initialize(p_hwfn, p_spq);
/* reset the chain itself */
qed_chain_reset(&p_spq->chain);
}
int qed_spq_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = NULL;
dma_addr_t p_phys = 0;
struct qed_spq_entry *p_virt = NULL;
/* SPQ struct */
p_spq =
kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
if (!p_spq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
return -ENOMEM;
}
/* SPQ ring */
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_SINGLE,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain)) {
DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
/* allocate and fill the SPQ elements (incl. ramrod data list) */
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
p_spq->chain.capacity *
sizeof(struct qed_spq_entry),
&p_phys,
GFP_KERNEL);
if (!p_virt)
goto spq_allocate_fail;
p_spq->p_virt = p_virt;
p_spq->p_phys = p_phys;
p_hwfn->p_spq = p_spq;
return 0;
spq_allocate_fail:
qed_chain_free(p_hwfn->cdev, &p_spq->chain);
kfree(p_spq);
return -ENOMEM;
}
void qed_spq_free(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
if (!p_spq)
return;
if (p_spq->p_virt)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_spq->chain.capacity *
sizeof(struct qed_spq_entry),
p_spq->p_virt,
p_spq->p_phys);
qed_chain_free(p_hwfn->cdev, &p_spq->chain);
;
kfree(p_spq);
}
int
qed_spq_get_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
int rc = 0;
spin_lock_bh(&p_spq->lock);
if (list_empty(&p_spq->free_pool)) {
p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
if (!p_ent) {
rc = -ENOMEM;
goto out_unlock;
}
p_ent->queue = &p_spq->unlimited_pending;
} else {
p_ent = list_first_entry(&p_spq->free_pool,
struct qed_spq_entry,
list);
list_del(&p_ent->list);
p_ent->queue = &p_spq->pending;
}
*pp_ent = p_ent;
out_unlock:
spin_unlock_bh(&p_spq->lock);
return rc;
}
/* Locked variant; Should be called while the SPQ lock is taken */
static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
}
void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
spin_lock_bh(&p_hwfn->p_spq->lock);
__qed_spq_return_entry(p_hwfn, p_ent);
spin_unlock_bh(&p_hwfn->p_spq->lock);
}
/**
* @brief qed_spq_add_entry - adds a new entry to the pending
* list. Should be used while lock is being held.
*
* Addes an entry to the pending list is there is room (en empty
* element is available in the free_pool), or else places the
* entry in the unlimited_pending pool.
*
* @param p_hwfn
* @param p_ent
* @param priority
*
* @return int
*/
static int
qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
enum spq_priority priority)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) {
struct qed_spq_entry *p_en2;
if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
return 0;
}
p_en2 = list_first_entry(&p_spq->free_pool,
struct qed_spq_entry,
list);
list_del(&p_en2->list);
/* Strcut assignment */
*p_en2 = *p_ent;
kfree(p_ent);
p_ent = p_en2;
}
/* entry is to be placed in 'pending' queue */
switch (priority) {
case QED_SPQ_PRIORITY_NORMAL:
list_add_tail(&p_ent->list, &p_spq->pending);
p_spq->normal_count++;
break;
case QED_SPQ_PRIORITY_HIGH:
list_add(&p_ent->list, &p_spq->pending);
p_spq->high_count++;
break;
default:
return -EINVAL;
}
return 0;
}
/***************************************************************************
* Accessor
***************************************************************************/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
{
if (!p_hwfn->p_spq)
return 0xffffffff; /* illegal */
return p_hwfn->p_spq->cid;
}
/***************************************************************************
* Posting new Ramrods
***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
struct list_head *head,
u32 keep_reserve)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
int rc;
while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
!list_empty(head)) {
struct qed_spq_entry *p_ent =
list_first_entry(head, struct qed_spq_entry, list);
list_del(&p_ent->list);
list_add_tail(&p_ent->list, &p_spq->completion_pending);
p_spq->comp_sent_count++;
rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
if (rc) {
list_del(&p_ent->list);
__qed_spq_return_entry(p_hwfn, p_ent);
return rc;
}
}
return 0;
}
static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
while (!list_empty(&p_spq->free_pool)) {
if (list_empty(&p_spq->unlimited_pending))
break;
p_ent = list_first_entry(&p_spq->unlimited_pending,
struct qed_spq_entry,
list);
if (!p_ent)
return -EINVAL;
list_del(&p_ent->list);
qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
}
return qed_spq_post_list(p_hwfn, &p_spq->pending,
SPQ_HIGH_PRI_RESERVE_DEFAULT);
}
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *fw_return_code)
{
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
bool b_ret_ent = true;
if (!p_hwfn)
return -EINVAL;
if (!p_ent) {
DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
return -EINVAL;
}
/* Complete the entry */
rc = qed_spq_fill_entry(p_hwfn, p_ent);
spin_lock_bh(&p_spq->lock);
/* Check return value after LOCK is taken for cleaner error flow */
if (rc)
goto spq_post_fail;
/* Add the request to the pending queue */
rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
if (rc)
goto spq_post_fail;
rc = qed_spq_pend_post(p_hwfn);
if (rc) {
/* Since it's possible that pending failed for a different
* entry [although unlikely], the failed entry was already
* dealt with; No need to return it here.
*/
b_ret_ent = false;
goto spq_post_fail;
}
spin_unlock_bh(&p_spq->lock);
if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
/* For entries in QED BLOCK mode, the completion code cannot
* perform the necessary cleanup - if it did, we couldn't
* access p_ent here to see whether it's successful or not.
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
if (rc)
goto spq_post_fail2;
/* return to pool */
qed_spq_return_entry(p_hwfn, p_ent);
}
return rc;
spq_post_fail2:
spin_lock_bh(&p_spq->lock);
list_del(&p_ent->list);
qed_chain_return_produced(&p_spq->chain);
spq_post_fail:
/* return to the free pool */
if (b_ret_ent)
__qed_spq_return_entry(p_hwfn, p_ent);
spin_unlock_bh(&p_spq->lock);
return rc;
}
int qed_spq_completion(struct qed_hwfn *p_hwfn,
__le16 echo,
u8 fw_return_code,
union event_ring_data *p_data)
{
struct qed_spq *p_spq;
struct qed_spq_entry *p_ent = NULL;
struct qed_spq_entry *tmp;
struct qed_spq_entry *found = NULL;
int rc;
if (!p_hwfn)
return -EINVAL;
p_spq = p_hwfn->p_spq;
if (!p_spq)
return -EINVAL;
spin_lock_bh(&p_spq->lock);
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
list) {
if (p_ent->elem.hdr.echo == echo) {
list_del(&p_ent->list);
qed_chain_return_produced(&p_spq->chain);
p_spq->comp_count++;
found = p_ent;
break;
}
}
/* Release lock before callback, as callback may post
* an additional ramrod.
*/
spin_unlock_bh(&p_spq->lock);
if (!found) {
DP_NOTICE(p_hwfn,
"Failed to find an entry this EQE completes\n");
return -EEXIST;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
p_ent->comp_cb.function, p_ent->comp_cb.cookie);
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
/* EBLOCK is responsible for freeing its own entry */
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
spin_lock_bh(&p_spq->lock);
rc = qed_spq_pend_post(p_hwfn);
spin_unlock_bh(&p_spq->lock);
return rc;
}
struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_consq *p_consq;
/* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
if (!p_consq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
return NULL;
}
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_PAGE_SIZE / 0x80,
0x80,
&p_consq->chain)) {
DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
return p_consq;
consq_allocate_fail:
qed_consq_free(p_hwfn, p_consq);
return NULL;
}
void qed_consq_setup(struct qed_hwfn *p_hwfn,
struct qed_consq *p_consq)
{
qed_chain_reset(&p_consq->chain);
}
void qed_consq_free(struct qed_hwfn *p_hwfn,
struct qed_consq *p_consq)
{
if (!p_consq)
return;
qed_chain_free(p_hwfn->cdev, &p_consq->chain);
kfree(p_consq);
}

View File

@ -0,0 +1,607 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 4
#define FW_REVISION_VERSION 2
#define FW_ENGINEERING_VERSION 0
/***********************/
/* COMMON HW CONSTANTS */
/***********************/
/* PCI functions */
#define MAX_NUM_PORTS_K2 (4)
#define MAX_NUM_PORTS_BB (2)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
#define MAX_NUM_PFS_K2 (16)
#define MAX_NUM_PFS_BB (8)
#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_BB (120)
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
#define MAX_NUM_L2_QUEUES_K2 (320)
#define MAX_NUM_L2_QUEUES_BB (256)
#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
#define NUM_PHYS_TCS_4PORT_K2 (4)
#define NUM_OF_PHYS_TCS (8)
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
#define LB_TC (NUM_OF_PHYS_TCS)
/* Num of possible traffic priority values */
#define NUM_OF_PRIO (8)
#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
/* CIDs */
#define NUM_OF_CONNECTION_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
/*****************/
/* CDU CONSTANTS */
/*****************/
#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
/*****************/
/* DQ CONSTANTS */
/*****************/
/* DEMS */
#define DQ_DEMS_LEGACY 0
/* XCM agg val selection */
#define DQ_XCM_AGG_VAL_SEL_WORD2 0
#define DQ_XCM_AGG_VAL_SEL_WORD3 1
#define DQ_XCM_AGG_VAL_SEL_WORD4 2
#define DQ_XCM_AGG_VAL_SEL_WORD5 3
#define DQ_XCM_AGG_VAL_SEL_REG3 4
#define DQ_XCM_AGG_VAL_SEL_REG4 5
#define DQ_XCM_AGG_VAL_SEL_REG5 6
#define DQ_XCM_AGG_VAL_SEL_REG6 7
/* XCM agg val selection */
#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
DQ_XCM_AGG_VAL_SEL_WORD2
#define DQ_XCM_ETH_TX_BD_CONS_CMD \
DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_CORE_TX_BD_CONS_CMD \
DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ETH_TX_BD_PROD_CMD \
DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_CORE_TX_BD_PROD_CMD \
DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_CORE_SPQ_PROD_CMD \
DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
/* XCM agg counter flag selection */
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
/* XCM agg counter flag selection */
#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF18)
#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF18)
#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF23)
/*****************/
/* QM CONSTANTS */
/*****************/
/* number of TX queues in the QM */
#define MAX_QM_TX_QUEUES_K2 512
#define MAX_QM_TX_QUEUES_BB 448
#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
/* number of Other queues in the QM */
#define MAX_QM_OTHER_QUEUES_BB 64
#define MAX_QM_OTHER_QUEUES_K2 128
#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
/* number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
/* base number of Tx PQs in the CM PQ representation.
* should be used when storing PQ IDs in CM PQ registers and context
*/
#define CM_TX_PQ_BASE 0x200
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
#define QM_BYTE_CRD_REG_WIDTH 24
#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
#define QM_WFQ_CRD_REG_WIDTH 32
#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
#define QM_RL_CRD_REG_WIDTH 32
#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
/*****************/
/* CAU CONSTANTS */
/*****************/
#define CAU_FSM_ETH_RX 0
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB 12
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
#define CAU_HC_ENABLE_STATE 0
/*****************/
/* IGU CONSTANTS */
/*****************/
#define MAX_SB_PER_PATH_K2 (368)
#define MAX_SB_PER_PATH_BB (288)
#define MAX_TOT_SB_PER_PATH \
MAX_SB_PER_PATH_K2
#define MAX_SB_PER_PF_MIMD 129
#define MAX_SB_PER_PF_SIMD 64
#define MAX_SB_PER_VF 64
/* Memory addresses on the BAR for the IGU Sub Block */
#define IGU_MEM_BASE 0x0000
#define IGU_MEM_MSIX_BASE 0x0000
#define IGU_MEM_MSIX_UPPER 0x0101
#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
#define IGU_MEM_PBA_MSIX_BASE 0x0200
#define IGU_MEM_PBA_MSIX_UPPER 0x0202
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
#define IGU_CMD_INT_ACK_BASE 0x0400
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
MAX_TOT_SB_PER_PATH - \
1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
MAX_TOT_SB_PER_PATH - \
1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
/* PXP CONSTANTS */
/*****************/
/* PTT and GTT */
#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
#define PXP_PF_WINDOW_ADMIN_START 0
#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
PXP_PF_WINDOW_ADMIN_LENGTH - 1)
#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
PXP_PER_PF_ENTRY_SIZE)
#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
PXP_GLOBAL_ENTRY_SIZE)
#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
(PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0
#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4
#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
(PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
(PXP_EXTERNAL_BAR_PF_WINDOW_START + \
PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
(PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
/******************/
/* PBF CONSTANTS */
/******************/
/* Number of PBF command queue lines. Each line is 32B. */
#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
#define BTB_MAX_BLOCKS 1440
/*****************/
/* PRS CONSTANTS */
/*****************/
/* Async data KCQ CQE */
struct async_data {
__le32 cid;
__le16 itid;
u8 error_code;
u8 fw_debug_param;
};
struct regpair {
__le32 lo;
__le32 hi;
};
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
struct async_data async_info;
};
/* Event Ring Entry */
struct event_ring_entry {
u8 protocol_id;
u8 opcode;
__le16 reserved0;
__le16 echo;
u8 fw_return_code;
u8 flags;
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
union event_ring_data data;
};
/* Multi function mode */
enum mf_mode {
SF,
MF_OVLAN,
MF_NPAR,
MAX_MF_MODE
};
/* Per-protocol connection types */
enum protocol_type {
PROTOCOLID_RESERVED1,
PROTOCOLID_RESERVED2,
PROTOCOLID_RESERVED3,
PROTOCOLID_CORE,
PROTOCOLID_ETH,
PROTOCOLID_RESERVED4,
PROTOCOLID_RESERVED5,
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
PROTOCOLID_RESERVED6,
MAX_PROTOCOL_TYPE
};
/* status block structure */
struct cau_pi_entry {
u32 prod;
#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
#define CAU_PI_ENTRY_RESERVED_SHIFT 24
};
/* status block structure */
struct cau_sb_entry {
u32 data;
#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
#define CAU_SB_ENTRY_STATE0_MASK 0xF
#define CAU_SB_ENTRY_STATE0_SHIFT 24
#define CAU_SB_ENTRY_STATE1_MASK 0xF
#define CAU_SB_ENTRY_STATE1_SHIFT 28
u32 params;
#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
#define CAU_SB_ENTRY_TPH_MASK 0x1
#define CAU_SB_ENTRY_TPH_SHIFT 31
};
/* core doorbell data */
struct core_db_data {
u8 params;
#define CORE_DB_DATA_DEST_MASK 0x3
#define CORE_DB_DATA_DEST_SHIFT 0
#define CORE_DB_DATA_AGG_CMD_MASK 0x3
#define CORE_DB_DATA_AGG_CMD_SHIFT 2
#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
#define CORE_DB_DATA_RESERVED_MASK 0x1
#define CORE_DB_DATA_RESERVED_SHIFT 5
#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 spq_prod;
};
/* Enum of doorbell aggregative command selection */
enum db_agg_cmd_sel {
DB_AGG_CMD_NOP,
DB_AGG_CMD_SET,
DB_AGG_CMD_ADD,
DB_AGG_CMD_MAX,
MAX_DB_AGG_CMD_SEL
};
/* Enum of doorbell destination */
enum db_dest {
DB_DEST_XCM,
DB_DEST_UCM,
DB_DEST_TCM,
DB_NUM_DESTINATIONS,
MAX_DB_DEST
};
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
#define DB_LEGACY_ADDR_DEMS_MASK 0x7
#define DB_LEGACY_ADDR_DEMS_SHIFT 2
#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
/* Igu interrupt command */
enum igu_int_cmd {
IGU_INT_ENABLE = 0,
IGU_INT_DISABLE = 1,
IGU_INT_NOP = 2,
IGU_INT_NOP2 = 3,
MAX_IGU_INT_CMD
};
/* IGU producer or consumer update command */
struct igu_prod_cons_update {
u32 sb_id_and_flags;
#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
u32 reserved1;
};
/* Igu segments access for default status block only */
enum igu_seg_access {
IGU_SEG_ACCESS_REG = 0,
IGU_SEG_ACCESS_ATTN = 1,
MAX_IGU_SEG_ACCESS
};
struct parsing_and_err_flags {
__le16 flags;
#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
/* Concrete Function ID. */
struct pxp_concrete_fid {
__le16 fid;
#define PXP_CONCRETE_FID_PFID_MASK 0xF
#define PXP_CONCRETE_FID_PFID_SHIFT 0
#define PXP_CONCRETE_FID_PORT_MASK 0x3
#define PXP_CONCRETE_FID_PORT_SHIFT 4
#define PXP_CONCRETE_FID_PATH_MASK 0x1
#define PXP_CONCRETE_FID_PATH_SHIFT 6
#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
#define PXP_CONCRETE_FID_VFID_MASK 0xFF
#define PXP_CONCRETE_FID_VFID_SHIFT 8
};
struct pxp_pretend_concrete_fid {
__le16 fid;
#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
};
union pxp_pretend_fid {
struct pxp_pretend_concrete_fid concrete_fid;
__le16 opaque_fid;
};
/* Pxp Pretend Command Register. */
struct pxp_pretend_cmd {
union pxp_pretend_fid fid;
__le16 control;
#define PXP_PRETEND_CMD_PATH_MASK 0x1
#define PXP_PRETEND_CMD_PATH_SHIFT 0
#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
#define PXP_PRETEND_CMD_PORT_MASK 0x3
#define PXP_PRETEND_CMD_PORT_SHIFT 2
#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
};
/* PTT Record in PXP Admin Window. */
struct pxp_ptt_entry {
__le32 offset;
#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
struct pxp_pretend_cmd pretend;
};
/* RSS hash type */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
RSS_HASH_TYPE_TCP_IPV4 = 2,
RSS_HASH_TYPE_IPV6 = 3,
RSS_HASH_TYPE_TCP_IPV6 = 4,
RSS_HASH_TYPE_UDP_IPV4 = 5,
RSS_HASH_TYPE_UDP_IPV6 = 6,
MAX_RSS_HASH_TYPE
};
/* status block structure */
struct status_block {
__le16 pi_array[PIS_PER_SB];
__le32 sb_num;
#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
#define STATUS_BLOCK_SB_NUM_SHIFT 0
#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
__le32 prod_index;
#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
#endif /* __COMMON_HSI__ */

View File

@ -0,0 +1,539 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_CHAIN_H
#define _QED_CHAIN_H
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
/* dma_addr_t manip */
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
QED_CHAIN_MODE_NEXT_PTR,
/* Chain is a single page (next ptr) is unrequired */
QED_CHAIN_MODE_SINGLE,
/* Page pointers are located in a side list */
QED_CHAIN_MODE_PBL,
};
enum qed_chain_use_mode {
QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
};
struct qed_chain_next {
struct regpair next_phys;
void *next_virt;
};
struct qed_chain_pbl {
dma_addr_t p_phys_table;
void *p_virt_table;
u16 prod_page_idx;
u16 cons_page_idx;
};
struct qed_chain {
void *p_virt_addr;
dma_addr_t p_phys_addr;
void *p_prod_elem;
void *p_cons_elem;
u16 page_cnt;
enum qed_chain_mode mode;
enum qed_chain_use_mode intended_use; /* used to produce/consume */
u16 capacity; /*< number of _usable_ elements */
u16 size; /* number of elements */
u16 prod_idx;
u16 cons_idx;
u16 elem_per_page;
u16 elem_per_page_mask;
u16 elem_unusable;
u16 usable_per_page;
u16 elem_size;
u16 next_page_mask;
struct qed_chain_pbl pbl;
};
#define QED_CHAIN_PBL_ENTRY_SIZE (8)
#define QED_CHAIN_PAGE_SIZE (0x1000)
#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
(1 + ((sizeof(struct qed_chain_next) - 1) / \
(elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \
UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
/* Accessors */
static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
{
return p_chain->prod_idx;
}
static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
{
return p_chain->cons_idx;
}
static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
{
u16 used;
/* we don't need to trancate upon assignmet, as we assign u32->u16 */
used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
(u32)p_chain->cons_idx;
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
used -= (used / p_chain->elem_per_page);
return p_chain->capacity - used;
}
static inline u8 qed_chain_is_full(struct qed_chain *p_chain)
{
return qed_chain_get_elem_left(p_chain) == p_chain->capacity;
}
static inline u8 qed_chain_is_empty(struct qed_chain *p_chain)
{
return qed_chain_get_elem_left(p_chain) == 0;
}
static inline u16 qed_chain_get_elem_per_page(
struct qed_chain *p_chain)
{
return p_chain->elem_per_page;
}
static inline u16 qed_chain_get_usable_per_page(
struct qed_chain *p_chain)
{
return p_chain->usable_per_page;
}
static inline u16 qed_chain_get_unusable_per_page(
struct qed_chain *p_chain)
{
return p_chain->elem_unusable;
}
static inline u16 qed_chain_get_size(struct qed_chain *p_chain)
{
return p_chain->size;
}
static inline dma_addr_t
qed_chain_get_pbl_phys(struct qed_chain *p_chain)
{
return p_chain->pbl.p_phys_table;
}
/**
* @brief qed_chain_advance_page -
*
* Advance the next element accros pages for a linked chain
*
* @param p_chain
* @param p_next_elem
* @param idx_to_inc
* @param page_to_inc
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
void **p_next_elem,
u16 *idx_to_inc,
u16 *page_to_inc)
{
switch (p_chain->mode) {
case QED_CHAIN_MODE_NEXT_PTR:
{
struct qed_chain_next *p_next = *p_next_elem;
*p_next_elem = p_next->next_virt;
*idx_to_inc += p_chain->elem_unusable;
break;
}
case QED_CHAIN_MODE_SINGLE:
*p_next_elem = p_chain->p_virt_addr;
break;
case QED_CHAIN_MODE_PBL:
/* It is assumed pages are sequential, next element needs
* to change only when passing going back to first from last.
*/
if (++(*page_to_inc) == p_chain->page_cnt) {
*page_to_inc = 0;
*p_next_elem = p_chain->p_virt_addr;
}
}
}
#define is_unusable_idx(p, idx) \
(((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define is_unusable_next_idx(p, idx) \
((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define test_ans_skip(p, idx) \
do { \
if (is_unusable_idx(p, idx)) { \
(p)->idx += (p)->elem_unusable; \
} \
} while (0)
/**
* @brief qed_chain_return_multi_produced -
*
* A chain in which the driver "Produces" elements should use this API
* to indicate previous produced elements are now consumed.
*
* @param p_chain
* @param num
*/
static inline void
qed_chain_return_multi_produced(struct qed_chain *p_chain,
u16 num)
{
p_chain->cons_idx += num;
test_ans_skip(p_chain, cons_idx);
}
/**
* @brief qed_chain_return_produced -
*
* A chain in which the driver "Produces" elements should use this API
* to indicate previous produced elements are now consumed.
*
* @param p_chain
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
p_chain->cons_idx++;
test_ans_skip(p_chain, cons_idx);
}
/**
* @brief qed_chain_produce -
*
* A chain in which the driver "Produces" elements should use this to get
* a pointer to the next element which can be "Produced". It's driver
* responsibility to validate that the chain has room for new element.
*
* @param p_chain
*
* @return void*, a pointer to next element
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
void *ret = NULL;
if ((p_chain->prod_idx & p_chain->elem_per_page_mask) ==
p_chain->next_page_mask) {
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
&p_chain->prod_idx,
&p_chain->pbl.prod_page_idx);
}
ret = p_chain->p_prod_elem;
p_chain->prod_idx++;
p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
p_chain->elem_size);
return ret;
}
/**
* @brief qed_chain_get_capacity -
*
* Get the maximum number of BDs in chain
*
* @param p_chain
* @param num
*
* @return u16, number of unusable BDs
*/
static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
{
return p_chain->capacity;
}
/**
* @brief qed_chain_recycle_consumed -
*
* Returns an element which was previously consumed;
* Increments producers so they could be written to FW.
*
* @param p_chain
*/
static inline void
qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
test_ans_skip(p_chain, prod_idx);
p_chain->prod_idx++;
}
/**
* @brief qed_chain_consume -
*
* A Chain in which the driver utilizes data written by a different source
* (i.e., FW) should use this to access passed buffers.
*
* @param p_chain
*
* @return void*, a pointer to the next buffer written
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
void *ret = NULL;
if ((p_chain->cons_idx & p_chain->elem_per_page_mask) ==
p_chain->next_page_mask) {
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
&p_chain->cons_idx,
&p_chain->pbl.cons_page_idx);
}
ret = p_chain->p_cons_elem;
p_chain->cons_idx++;
p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
p_chain->elem_size);
return ret;
}
/**
* @brief qed_chain_reset - Resets the chain to its start state
*
* @param p_chain pointer to a previously allocted chain
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
int i;
p_chain->prod_idx = 0;
p_chain->cons_idx = 0;
p_chain->p_cons_elem = p_chain->p_virt_addr;
p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == QED_CHAIN_MODE_PBL) {
p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1;
p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1;
}
switch (p_chain->intended_use) {
case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
case QED_CHAIN_USE_TO_PRODUCE:
/* Do nothing */
break;
case QED_CHAIN_USE_TO_CONSUME:
/* produce empty elements */
for (i = 0; i < p_chain->capacity; i++)
qed_chain_recycle_consumed(p_chain);
break;
}
}
/**
* @brief qed_chain_init - Initalizes a basic chain struct
*
* @param p_chain
* @param p_virt_addr
* @param p_phys_addr physical address of allocated buffer's beginning
* @param page_cnt number of pages in the allocated buffer
* @param elem_size size of each element in the chain
* @param intended_use
* @param mode
*/
static inline void qed_chain_init(struct qed_chain *p_chain,
void *p_virt_addr,
dma_addr_t p_phys_addr,
u16 page_cnt,
u8 elem_size,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode)
{
/* chain fixed parameters */
p_chain->p_virt_addr = p_virt_addr;
p_chain->p_phys_addr = p_phys_addr;
p_chain->elem_size = elem_size;
p_chain->page_cnt = page_cnt;
p_chain->mode = mode;
p_chain->intended_use = intended_use;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page =
USABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask);
if (mode == QED_CHAIN_MODE_NEXT_PTR) {
struct qed_chain_next *p_next;
u16 i;
for (i = 0; i < page_cnt - 1; i++) {
/* Increment mem_phy to the next page. */
p_phys_addr += QED_CHAIN_PAGE_SIZE;
/* Initialize the physical address of the next page. */
p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
elem_size *
p_chain->
usable_per_page);
p_next->next_phys.lo = DMA_LO_LE(p_phys_addr);
p_next->next_phys.hi = DMA_HI_LE(p_phys_addr);
/* Initialize the virtual address of the next page. */
p_next->next_virt = (void *)((u8 *)p_virt_addr +
QED_CHAIN_PAGE_SIZE);
/* Move to the next page. */
p_virt_addr = p_next->next_virt;
}
/* Last page's next should point to beginning of the chain */
p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
elem_size *
p_chain->usable_per_page);
p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr);
p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr);
p_next->next_virt = p_chain->p_virt_addr;
}
qed_chain_reset(p_chain);
}
/**
* @brief qed_chain_pbl_init - Initalizes a basic pbl chain
* struct
* @param p_chain
* @param p_virt_addr virtual address of allocated buffer's beginning
* @param p_phys_addr physical address of allocated buffer's beginning
* @param page_cnt number of pages in the allocated buffer
* @param elem_size size of each element in the chain
* @param use_mode
* @param p_phys_pbl pointer to a pre-allocated side table
* which will hold physical page addresses.
* @param p_virt_pbl pointer to a pre allocated side table
* which will hold virtual page addresses.
*/
static inline void
qed_chain_pbl_init(struct qed_chain *p_chain,
void *p_virt_addr,
dma_addr_t p_phys_addr,
u16 page_cnt,
u8 elem_size,
enum qed_chain_use_mode use_mode,
dma_addr_t p_phys_pbl,
dma_addr_t *p_virt_pbl)
{
dma_addr_t *p_pbl_dma = p_virt_pbl;
int i;
qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
elem_size, use_mode, QED_CHAIN_MODE_PBL);
p_chain->pbl.p_phys_table = p_phys_pbl;
p_chain->pbl.p_virt_table = p_virt_pbl;
/* Fill the PBL with physical addresses*/
for (i = 0; i < page_cnt; i++) {
*p_pbl_dma = p_phys_addr;
p_phys_addr += QED_CHAIN_PAGE_SIZE;
p_pbl_dma++;
}
}
/**
* @brief qed_chain_set_prod - sets the prod to the given
* value
*
* @param prod_idx
* @param p_prod_elem
*/
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u16 prod_idx,
void *p_prod_elem)
{
p_chain->prod_idx = prod_idx;
p_chain->p_prod_elem = p_prod_elem;
}
/**
* @brief qed_chain_get_elem -
*
* get a pointer to an element represented by absolute idx
*
* @param p_chain
* @assumption p_chain->size is a power of 2
*
* @return void*, a pointer to next element
*/
static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain,
u16 idx)
{
void *ret = NULL;
if (idx >= p_chain->size)
return NULL;
ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx;
return ret;
}
/**
* @brief qed_chain_sge_inc_cons_prod
*
* for sge chains, producer isn't increased serially, the ring
* is expected to be full at all times. Once elements are
* consumed, they are immediately produced.
*
* @param p_chain
* @param cnt
*
* @return inline void
*/
static inline void
qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
u16 cnt)
{
p_chain->prod_idx += cnt;
p_chain->cons_idx += cnt;
}
#endif

498
include/linux/qed/qed_if.h Normal file
View File

@ -0,0 +1,498 @@
/* QLogic qed NIC Driver
*
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_IF_H
#define _QED_IF_H
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF
/* forward */
struct qed_dev;
struct qed_eth_pf_params {
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
};
struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
};
enum qed_int_mode {
QED_INT_MODE_INTA,
QED_INT_MODE_MSIX,
QED_INT_MODE_MSI,
QED_INT_MODE_POLL,
};
struct qed_sb_info {
struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
void __iomem *igu_addr;
u8 flags;
#define QED_SB_INFO_INIT 0x1
#define QED_SB_INFO_SETUP 0x2
struct qed_dev *cdev;
};
struct qed_dev_info {
unsigned long pci_mem_start;
unsigned long pci_mem_end;
unsigned int pci_irq;
u8 num_hwfns;
u8 hw_mac[ETH_ALEN];
bool is_mf;
/* FW version */
u16 fw_major;
u16 fw_minor;
u16 fw_rev;
u16 fw_eng;
/* MFW version */
u32 mfw_rev;
u32 flash_size;
u8 mf_mode;
};
enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE,
};
enum qed_protocol {
QED_PROTOCOL_ETH,
};
struct qed_link_params {
bool link_up;
#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
u32 override_flags;
bool autoneg;
u32 adv_speeds;
u32 forced_speed;
#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
u32 pause_config;
};
struct qed_link_output {
bool link_up;
u32 supported_caps; /* In SUPPORTED defs */
u32 advertised_caps; /* In ADVERTISED defs */
u32 lp_caps; /* In ADVERTISED defs */
u32 speed; /* In Mb/s */
u8 duplex; /* In DUPLEX defs */
u8 port; /* In PORT defs */
bool autoneg;
u32 pause_config;
};
#define QED_DRV_VER_STR_SIZE 12
struct qed_slowpath_params {
u32 int_mode;
u8 drv_major;
u8 drv_minor;
u8 drv_rev;
u8 drv_eng;
u8 name[QED_DRV_VER_STR_SIZE];
};
#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
struct qed_int_info {
struct msix_entry *msix;
u8 msix_cnt;
/* This should be updated by the protocol driver */
u8 used_cnt;
};
struct qed_common_cb_ops {
void (*link_update)(void *dev,
struct qed_link_output *link);
};
struct qed_common_ops {
struct qed_dev* (*probe)(struct pci_dev *dev,
enum qed_protocol protocol,
u32 dp_module, u8 dp_level);
void (*remove)(struct qed_dev *cdev);
int (*set_power_state)(struct qed_dev *cdev,
pci_power_t state);
void (*set_id)(struct qed_dev *cdev,
char name[],
char ver_str[]);
/* Client drivers need to make this call before slowpath_start.
* PF params required for the call before slowpath_start is
* documented within the qed_pf_params structure definition.
*/
void (*update_pf_params)(struct qed_dev *cdev,
struct qed_pf_params *params);
int (*slowpath_start)(struct qed_dev *cdev,
struct qed_slowpath_params *params);
int (*slowpath_stop)(struct qed_dev *cdev);
/* Requests to use `cnt' interrupts for fastpath.
* upon success, returns number of interrupts allocated for fastpath.
*/
int (*set_fp_int)(struct qed_dev *cdev,
u16 cnt);
/* Fills `info' with pointers required for utilizing interrupts */
int (*get_fp_int)(struct qed_dev *cdev,
struct qed_int_info *info);
u32 (*sb_init)(struct qed_dev *cdev,
struct qed_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr,
u16 sb_id,
enum qed_sb_type type);
u32 (*sb_release)(struct qed_dev *cdev,
struct qed_sb_info *sb_info,
u16 sb_id);
void (*simd_handler_config)(struct qed_dev *cdev,
void *token,
int index,
void (*handler)(void *));
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
/**
* @brief set_link - set links according to params
*
* @param cdev
* @param params - values used to override the default link configuration
*
* @return 0 on success, error otherwise.
*/
int (*set_link)(struct qed_dev *cdev,
struct qed_link_params *params);
/**
* @brief get_link - returns the current link state.
*
* @param cdev
* @param if_link - structure to be filled with current link configuration.
*/
void (*get_link)(struct qed_dev *cdev,
struct qed_link_output *if_link);
/**
* @brief - drains chip in case Tx completions fail to arrive due to pause.
*
* @param cdev
*/
int (*drain)(struct qed_dev *cdev);
/**
* @brief update_msglvl - update module debug level
*
* @param cdev
* @param dp_module
* @param dp_level
*/
void (*update_msglvl)(struct qed_dev *cdev,
u32 dp_module,
u8 dp_level);
int (*chain_alloc)(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
u16 num_elems,
size_t elem_size,
struct qed_chain *p_chain);
void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
};
/**
* @brief qed_get_protocol_version
*
* @param protocol
*
* @return version supported by qed for given protocol driver
*/
u32 qed_get_protocol_version(enum qed_protocol protocol);
#define MASK_FIELD(_name, _value) \
((_value) &= (_name ## _MASK))
#define FIELD_VALUE(_name, _value) \
((_value & _name ## _MASK) << _name ## _SHIFT)
#define SET_FIELD(value, name, flag) \
do { \
(value) &= ~(name ## _MASK << name ## _SHIFT); \
(value) |= (((u64)flag) << (name ## _SHIFT)); \
} while (0)
#define GET_FIELD(value, name) \
(((value) >> (name ## _SHIFT)) & name ## _MASK)
/* Debug print definitions */
#define DP_ERR(cdev, fmt, ...) \
pr_err("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
DP_NAME(cdev) ? DP_NAME(cdev) : "", \
## __VA_ARGS__) \
#define DP_NOTICE(cdev, fmt, ...) \
do { \
if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
pr_notice("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
DP_NAME(cdev) ? DP_NAME(cdev) : "", \
## __VA_ARGS__); \
\
} \
} while (0)
#define DP_INFO(cdev, fmt, ...) \
do { \
if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
pr_notice("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
DP_NAME(cdev) ? DP_NAME(cdev) : "", \
## __VA_ARGS__); \
} \
} while (0)
#define DP_VERBOSE(cdev, module, fmt, ...) \
do { \
if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
((cdev)->dp_module & module))) { \
pr_notice("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
DP_NAME(cdev) ? DP_NAME(cdev) : "", \
## __VA_ARGS__); \
} \
} while (0)
enum DP_LEVEL {
QED_LEVEL_VERBOSE = 0x0,
QED_LEVEL_INFO = 0x1,
QED_LEVEL_NOTICE = 0x2,
QED_LEVEL_ERR = 0x3,
};
#define QED_LOG_LEVEL_SHIFT (30)
#define QED_LOG_VERBOSE_MASK (0x3fffffff)
#define QED_LOG_INFO_MASK (0x40000000)
#define QED_LOG_NOTICE_MASK (0x80000000)
enum DP_MODULE {
QED_MSG_SPQ = 0x10000,
QED_MSG_STATS = 0x20000,
QED_MSG_DCB = 0x40000,
QED_MSG_IOV = 0x80000,
QED_MSG_SP = 0x100000,
QED_MSG_STORAGE = 0x200000,
QED_MSG_CXT = 0x800000,
QED_MSG_ILT = 0x2000000,
QED_MSG_ROCE = 0x4000000,
QED_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
struct qed_eth_stats {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
u64 rx_ucast_bytes;
u64 rx_mcast_bytes;
u64 rx_bcast_bytes;
u64 rx_ucast_pkts;
u64 rx_mcast_pkts;
u64 rx_bcast_pkts;
u64 mftag_filter_discards;
u64 mac_filter_discards;
u64 tx_ucast_bytes;
u64 tx_mcast_bytes;
u64 tx_bcast_bytes;
u64 tx_ucast_pkts;
u64 tx_mcast_pkts;
u64 tx_bcast_pkts;
u64 tx_err_drop_pkts;
u64 tpa_coalesced_pkts;
u64 tpa_coalesced_events;
u64 tpa_aborts_num;
u64 tpa_not_coalesced_pkts;
u64 tpa_coalesced_bytes;
/* port */
u64 rx_64_byte_packets;
u64 rx_127_byte_packets;
u64 rx_255_byte_packets;
u64 rx_511_byte_packets;
u64 rx_1023_byte_packets;
u64 rx_1518_byte_packets;
u64 rx_1522_byte_packets;
u64 rx_2047_byte_packets;
u64 rx_4095_byte_packets;
u64 rx_9216_byte_packets;
u64 rx_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
u64 rx_pfc_frames;
u64 rx_align_errors;
u64 rx_carrier_errors;
u64 rx_oversize_packets;
u64 rx_jabbers;
u64 rx_undersize_packets;
u64 rx_fragments;
u64 tx_64_byte_packets;
u64 tx_65_to_127_byte_packets;
u64 tx_128_to_255_byte_packets;
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
u64 rx_mac_uc_packets;
u64 rx_mac_mc_packets;
u64 rx_mac_bc_packets;
u64 rx_mac_frames_ok;
u64 tx_mac_bytes;
u64 tx_mac_uc_packets;
u64 tx_mac_mc_packets;
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
};
#define QED_SB_IDX 0x0002
#define RX_PI 0
#define TX_PI(tc) (RX_PI + 1 + tc)
static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
{
u32 prod = 0;
u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= QED_SB_IDX;
}
/* Let SB update */
mmiowb();
return rc;
}
/**
*
* @brief This function creates an update command for interrupts that is
* written to the IGU.
*
* @param sb_info - This is the structure allocated and
* initialized per status block. Assumption is
* that it was initialized using qed_sb_init
* @param int_cmd - Enable/Disable/Nop
* @param upd_flg - whether igu consumer should be
* updated.
*
* @return inline void
*/
static inline void qed_sb_ack(struct qed_sb_info *sb_info,
enum igu_int_cmd int_cmd,
u8 upd_flg)
{
struct igu_prod_cons_update igu_ack = { 0 };
igu_ack.sb_id_and_flags =
((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
(int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
(IGU_SEG_ACCESS_REG <<
IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
mmiowb();
barrier();
}
static inline void __internal_ram_wr(void *p_hwfn,
void __iomem *addr,
int size,
u32 *data)
{
unsigned int i;
for (i = 0; i < size / sizeof(*data); i++)
DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
}
static inline void internal_ram_wr(void __iomem *addr,
int size,
u32 *data)
{
__internal_ram_wr(NULL, addr, size, data);
}
#endif