staging: qlge: use qlge_* prefix to avoid namespace clashes with other qlogic drivers

To avoid namespace clashes with other qlogic drivers and also for the
sake of naming consistency, use the "qlge_" prefix as suggested in
drivers/staging/qlge/TODO,
 - For existing ql_ prefix,
   sed -i "s/ql_/qlge_/g" *.{c,h}
 - for structs not having a prefix
   1. get a list of structs
      grep "struct.*{" qlge.
   2. add qlge_ for each struct, e.g.,
      sed -i "s/ib_ae_iocb_rsp/qlge_ib_ae_iocb_rsp/g" *.{c,h}

Link: https://lore.kernel.org/patchwork/patch/1318503/#1516131
Suggested-by: Benjamin Poirier <benjamin.poirier@gmail.com>
Signed-off-by: Coiby Xu <coiby.xu@gmail.com>
Link: https://lore.kernel.org/r/20210123104613.38359-2-coiby.xu@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Coiby Xu 2021-01-23 18:46:06 +08:00 committed by Greg Kroah-Hartman
parent 684ceb81dc
commit f8c047be54
6 changed files with 1549 additions and 1554 deletions

View File

@ -28,10 +28,6 @@
* the driver has a habit of using runtime checks where compile time checks are
possible (ex. ql_free_rx_buffers(), ql_alloc_rx_buffers())
* reorder struct members to avoid holes if it doesn't impact performance
* in terms of namespace, the driver uses either qlge_, ql_ (used by
other qlogic drivers, with clashes, ex: ql_sem_spinlock) or nothing (with
clashes, ex: struct ob_mac_iocb_req). Rename everything to use the "qlge_"
prefix.
* avoid legacy/deprecated apis (ex. replace pci_dma_*, replace pci_enable_msi,
use pci_iomap)
* some "while" loops could be rewritten with simple "for", ex.

View File

@ -1081,7 +1081,7 @@ struct tx_buf_desc {
#define OPCODE_IB_MPI_IOCB 0x21
#define OPCODE_IB_AE_IOCB 0x3f
struct ob_mac_iocb_req {
struct qlge_ob_mac_iocb_req {
u8 opcode;
u8 flags1;
#define OB_MAC_IOCB_REQ_OI 0x01
@ -1104,7 +1104,7 @@ struct ob_mac_iocb_req {
struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
} __packed;
struct ob_mac_iocb_rsp {
struct qlge_ob_mac_iocb_rsp {
u8 opcode; /* */
u8 flags1; /* */
#define OB_MAC_IOCB_RSP_OI 0x01 /* */
@ -1121,7 +1121,7 @@ struct ob_mac_iocb_rsp {
__le32 reserved[13];
} __packed;
struct ob_mac_tso_iocb_req {
struct qlge_ob_mac_tso_iocb_req {
u8 opcode;
u8 flags1;
#define OB_MAC_TSO_IOCB_OI 0x01
@ -1149,7 +1149,7 @@ struct ob_mac_tso_iocb_req {
struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
} __packed;
struct ob_mac_tso_iocb_rsp {
struct qlge_ob_mac_tso_iocb_rsp {
u8 opcode;
u8 flags1;
#define OB_MAC_TSO_IOCB_RSP_OI 0x01
@ -1166,7 +1166,7 @@ struct ob_mac_tso_iocb_rsp {
__le32 reserved2[13];
} __packed;
struct ib_mac_iocb_rsp {
struct qlge_ib_mac_iocb_rsp {
u8 opcode; /* 0x20 */
u8 flags1;
#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
@ -1225,7 +1225,7 @@ struct ib_mac_iocb_rsp {
__le64 hdr_addr; /* */
} __packed;
struct ib_ae_iocb_rsp {
struct qlge_ib_ae_iocb_rsp {
u8 opcode;
u8 flags1;
#define IB_AE_IOCB_RSP_OI 0x01
@ -1250,7 +1250,7 @@ struct ib_ae_iocb_rsp {
* These three structures are for generic
* handling of ib and ob iocbs.
*/
struct ql_net_rsp_iocb {
struct qlge_net_rsp_iocb {
u8 opcode;
u8 flags0;
__le16 length;
@ -1258,7 +1258,7 @@ struct ql_net_rsp_iocb {
__le32 reserved[14];
} __packed;
struct net_req_iocb {
struct qlge_net_req_iocb {
u8 opcode;
u8 flags0;
__le16 flags1;
@ -1346,7 +1346,7 @@ struct ricb {
/* SOFTWARE/DRIVER DATA STRUCTURES. */
struct oal {
struct qlge_oal {
struct tx_buf_desc oal[TX_DESC_PER_OAL];
};
@ -1357,9 +1357,9 @@ struct map_list {
struct tx_ring_desc {
struct sk_buff *skb;
struct ob_mac_iocb_req *queue_entry;
struct qlge_ob_mac_iocb_req *queue_entry;
u32 index;
struct oal oal;
struct qlge_oal oal;
struct map_list map[MAX_SKB_FRAGS + 2];
int map_cnt;
struct tx_ring_desc *next;
@ -1388,7 +1388,7 @@ struct tx_ring {
spinlock_t lock;
atomic_t tx_count; /* counts down for every outstanding IO */
struct delayed_work tx_work;
struct ql_adapter *qdev;
struct qlge_adapter *qdev;
u64 tx_packets;
u64 tx_bytes;
u64 tx_errors;
@ -1469,7 +1469,7 @@ struct rx_ring {
dma_addr_t prod_idx_sh_reg_dma;
void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
u32 cnsmr_idx; /* current sw idx */
struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
struct qlge_net_rsp_iocb *curr_entry; /* next entry on queue */
void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
/* Large buffer queue elements. */
@ -1487,7 +1487,7 @@ struct rx_ring {
char name[IFNAMSIZ + 5];
struct napi_struct napi;
u8 reserved;
struct ql_adapter *qdev;
struct qlge_adapter *qdev;
u64 rx_packets;
u64 rx_multicast;
u64 rx_bytes;
@ -1752,14 +1752,14 @@ enum {
#define SHADOW_OFFSET 0xb0000000
#define SHADOW_REG_SHIFT 20
struct ql_nic_misc {
struct qlge_nic_misc {
u32 rx_ring_count;
u32 tx_ring_count;
u32 intr_count;
u32 function;
};
struct ql_reg_dump {
struct qlge_reg_dump {
/* segment 0 */
struct mpi_coredump_global_header mpi_global_header;
@ -1769,7 +1769,7 @@ struct ql_reg_dump {
/* segment 30 */
struct mpi_coredump_segment_header misc_nic_seg_hdr;
struct ql_nic_misc misc_nic_info;
struct qlge_nic_misc misc_nic_info;
/* segment 31 */
/* one interrupt state for each CQ */
@ -1792,7 +1792,7 @@ struct ql_reg_dump {
u32 ets[8 + 2];
};
struct ql_mpi_coredump {
struct qlge_mpi_coredump {
/* segment 0 */
struct mpi_coredump_global_header mpi_global_header;
@ -1914,7 +1914,7 @@ struct ql_mpi_coredump {
/* segment 30 */
struct mpi_coredump_segment_header misc_nic_seg_hdr;
struct ql_nic_misc misc_nic_info;
struct qlge_nic_misc misc_nic_info;
/* segment 31 */
/* one interrupt state for each CQ */
@ -1991,7 +1991,7 @@ struct ql_mpi_coredump {
* irq environment as a context to the ISR.
*/
struct intr_context {
struct ql_adapter *qdev;
struct qlge_adapter *qdev;
u32 intr;
u32 irq_mask; /* Mask of which rings the vector services. */
u32 hooked;
@ -2056,15 +2056,15 @@ enum {
};
struct nic_operations {
int (*get_flash)(struct ql_adapter *qdev);
int (*port_initialize)(struct ql_adapter *qdev);
int (*get_flash)(struct qlge_adapter *qdev);
int (*port_initialize)(struct qlge_adapter *qdev);
};
/*
* The main Adapter structure definition.
* This structure has all fields relevant to the hardware.
*/
struct ql_adapter {
struct qlge_adapter {
struct ricb ricb;
unsigned long flags;
u32 wol;
@ -2139,7 +2139,7 @@ struct ql_adapter {
u32 port_link_up;
u32 port_init;
u32 link_status;
struct ql_mpi_coredump *mpi_coredump;
struct qlge_mpi_coredump *mpi_coredump;
u32 core_is_dumped;
u32 link_config;
u32 led_config;
@ -2166,7 +2166,7 @@ struct ql_adapter {
/*
* Typical Register accessor for memory mapped device.
*/
static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
static inline u32 qlge_read32(const struct qlge_adapter *qdev, int reg)
{
return readl(qdev->reg_base + reg);
}
@ -2174,7 +2174,7 @@ static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
/*
* Typical Register accessor for memory mapped device.
*/
static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
static inline void qlge_write32(const struct qlge_adapter *qdev, int reg, u32 val)
{
writel(val, qdev->reg_base + reg);
}
@ -2189,7 +2189,7 @@ static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
* 1 4k chunk of memory. The lower half of the space is for outbound
* queues. The upper half is for inbound queues.
*/
static inline void ql_write_db_reg(u32 val, void __iomem *addr)
static inline void qlge_write_db_reg(u32 val, void __iomem *addr)
{
writel(val, addr);
}
@ -2205,7 +2205,7 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr)
* queues. The upper half is for inbound queues.
* Caller has to guarantee ordering.
*/
static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr)
static inline void qlge_write_db_reg_relaxed(u32 val, void __iomem *addr)
{
writel_relaxed(val, addr);
}
@ -2220,7 +2220,7 @@ static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr)
* update the relevant index register and then copy the value to the
* shadow register in host memory.
*/
static inline u32 ql_read_sh_reg(__le32 *addr)
static inline u32 qlge_read_sh_reg(__le32 *addr)
{
u32 reg;
@ -2233,51 +2233,51 @@ extern char qlge_driver_name[];
extern const char qlge_driver_version[];
extern const struct ethtool_ops qlge_ethtool_ops;
int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask);
void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask);
int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data);
int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
u32 *value);
int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value);
int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
u16 q_id);
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
void ql_mpi_core_to_log(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
void ql_set_ethtool_ops(struct net_device *ndev);
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
void ql_mpi_idc_work(struct work_struct *work);
void ql_mpi_port_cfg_work(struct work_struct *work);
int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
void qlge_queue_fw_error(struct qlge_adapter *qdev);
void qlge_mpi_work(struct work_struct *work);
void qlge_mpi_reset_work(struct work_struct *work);
void qlge_mpi_core_to_log(struct work_struct *work);
int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void qlge_queue_asic_error(struct qlge_adapter *qdev);
void qlge_set_ethtool_ops(struct net_device *ndev);
int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data);
void qlge_mpi_idc_work(struct work_struct *work);
void qlge_mpi_port_cfg_work(struct work_struct *work);
int qlge_mb_get_fw_state(struct qlge_adapter *qdev);
int qlge_cam_route_initialize(struct qlge_adapter *qdev);
int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data);
int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data);
int qlge_unpause_mpi_risc(struct qlge_adapter *qdev);
int qlge_pause_mpi_risc(struct qlge_adapter *qdev);
int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev);
int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev);
int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf, u32 ram_addr,
int word_count);
int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
int ql_mb_get_led_cfg(struct ql_adapter *qdev);
void ql_link_on(struct ql_adapter *qdev);
void ql_link_off(struct ql_adapter *qdev);
int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *qdev, struct sk_buff *skb);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
int qlge_core_dump(struct qlge_adapter *qdev, struct qlge_mpi_coredump *mpi_coredump);
int qlge_mb_about_fw(struct qlge_adapter *qdev);
int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol);
int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol);
int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config);
int qlge_mb_get_led_cfg(struct qlge_adapter *qdev);
void qlge_link_on(struct qlge_adapter *qdev);
void qlge_link_off(struct qlge_adapter *qdev);
int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control);
int qlge_mb_get_port_cfg(struct qlge_adapter *qdev);
int qlge_mb_set_port_cfg(struct qlge_adapter *qdev);
int qlge_wait_fifo_empty(struct qlge_adapter *qdev);
void qlge_get_dump(struct qlge_adapter *qdev, void *buff);
netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev);
void qlge_check_lb_frame(struct qlge_adapter *qdev, struct sk_buff *skb);
int qlge_own_firmware(struct qlge_adapter *qdev);
int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
/* #define QL_ALL_DUMP */
/* #define QL_REG_DUMP */
@ -2287,12 +2287,12 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
/* #define QL_OB_DUMP */
#ifdef QL_REG_DUMP
void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
void ql_dump_routing_entries(struct ql_adapter *qdev);
void ql_dump_regs(struct ql_adapter *qdev);
#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
void qlge_dump_xgmac_control_regs(struct qlge_adapter *qdev);
void qlge_dump_routing_entries(struct qlge_adapter *qdev);
void qlge_dump_regs(struct qlge_adapter *qdev);
#define QL_DUMP_REGS(qdev) qlge_dump_regs(qdev)
#define QL_DUMP_ROUTE(qdev) qlge_dump_routing_entries(qdev)
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) qlge_dump_xgmac_control_regs(qdev)
#else
#define QL_DUMP_REGS(qdev)
#define QL_DUMP_ROUTE(qdev)
@ -2300,33 +2300,33 @@ void ql_dump_regs(struct ql_adapter *qdev);
#endif
#ifdef QL_STAT_DUMP
void ql_dump_stat(struct ql_adapter *qdev);
#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
void qlge_dump_stat(struct qlge_adapter *qdev);
#define QL_DUMP_STAT(qdev) qlge_dump_stat(qdev)
#else
#define QL_DUMP_STAT(qdev)
#endif
#ifdef QL_DEV_DUMP
void ql_dump_qdev(struct ql_adapter *qdev);
#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
void qlge_dump_qdev(struct qlge_adapter *qdev);
#define QL_DUMP_QDEV(qdev) qlge_dump_qdev(qdev)
#else
#define QL_DUMP_QDEV(qdev)
#endif
#ifdef QL_CB_DUMP
void ql_dump_wqicb(struct wqicb *wqicb);
void ql_dump_tx_ring(struct tx_ring *tx_ring);
void ql_dump_ricb(struct ricb *ricb);
void ql_dump_cqicb(struct cqicb *cqicb);
void ql_dump_rx_ring(struct rx_ring *rx_ring);
void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
void qlge_dump_wqicb(struct wqicb *wqicb);
void qlge_dump_tx_ring(struct tx_ring *tx_ring);
void qlge_dump_ricb(struct ricb *ricb);
void qlge_dump_cqicb(struct cqicb *cqicb);
void qlge_dump_rx_ring(struct rx_ring *rx_ring);
void qlge_dump_hw_cb(struct qlge_adapter *qdev, int size, u32 bit, u16 q_id);
#define QL_DUMP_RICB(ricb) qlge_dump_ricb(ricb)
#define QL_DUMP_WQICB(wqicb) qlge_dump_wqicb(wqicb)
#define QL_DUMP_TX_RING(tx_ring) qlge_dump_tx_ring(tx_ring)
#define QL_DUMP_CQICB(cqicb) qlge_dump_cqicb(cqicb)
#define QL_DUMP_RX_RING(rx_ring) qlge_dump_rx_ring(rx_ring)
#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
ql_dump_hw_cb(qdev, size, bit, q_id)
qlge_dump_hw_cb(qdev, size, bit, q_id)
#else
#define QL_DUMP_RICB(ricb)
#define QL_DUMP_WQICB(wqicb)
@ -2337,26 +2337,26 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#endif
#ifdef QL_OB_DUMP
void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd);
void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb);
void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp);
#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) ql_dump_ob_mac_iocb(qdev, ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) ql_dump_ob_mac_rsp(qdev, ob_mac_rsp)
void qlge_dump_tx_desc(struct qlge_adapter *qdev, struct tx_buf_desc *tbd);
void qlge_dump_ob_mac_iocb(struct qlge_adapter *qdev, struct qlge_ob_mac_iocb_req *ob_mac_iocb);
void qlge_dump_ob_mac_rsp(struct qlge_adapter *qdev, struct qlge_ob_mac_iocb_rsp *ob_mac_rsp);
#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) qlge_dump_ob_mac_iocb(qdev, ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) qlge_dump_ob_mac_rsp(qdev, ob_mac_rsp)
#else
#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp)
#endif
#ifdef QL_IB_DUMP
void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp);
#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) ql_dump_ib_mac_rsp(qdev, ib_mac_rsp)
void qlge_dump_ib_mac_rsp(struct qlge_adapter *qdev, struct qlge_ib_mac_iocb_rsp *ib_mac_rsp);
#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) qlge_dump_ib_mac_rsp(qdev, ib_mac_rsp)
#else
#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp)
#endif
#ifdef QL_ALL_DUMP
void ql_dump_all(struct ql_adapter *qdev);
#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
void qlge_dump_all(struct qlge_adapter *qdev);
#define QL_DUMP_ALL(qdev) qlge_dump_all(qdev)
#else
#define QL_DUMP_ALL(qdev)
#endif

File diff suppressed because it is too large Load Diff

View File

@ -34,16 +34,16 @@
#include "qlge.h"
struct ql_stats {
struct qlge_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
#define QL_SIZEOF(m) sizeof_field(struct ql_adapter, m)
#define QL_OFF(m) offsetof(struct ql_adapter, m)
#define QL_SIZEOF(m) sizeof_field(struct qlge_adapter, m)
#define QL_OFF(m) offsetof(struct qlge_adapter, m)
static const struct ql_stats ql_gstrings_stats[] = {
static const struct qlge_stats qlge_gstrings_stats[] = {
{"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
{"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
{"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
@ -175,15 +175,15 @@ static const struct ql_stats ql_gstrings_stats[] = {
QL_OFF(nic_stats.rx_nic_fifo_drop)},
};
static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
static const char qlge_gstrings_test[][ETH_GSTRING_LEN] = {
"Loopback test (offline)"
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
#define QLGE_TEST_LEN (sizeof(qlge_gstrings_test) / ETH_GSTRING_LEN)
#define QLGE_STATS_LEN ARRAY_SIZE(qlge_gstrings_stats)
#define QLGE_RCV_MAC_ERR_STATS 7
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
static int qlge_update_ring_coalescing(struct qlge_adapter *qdev)
{
int i, status = 0;
struct rx_ring *rx_ring;
@ -205,7 +205,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cqicb->pkt_delay =
cpu_to_le16(qdev->tx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
@ -226,7 +226,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
cqicb->pkt_delay =
cpu_to_le16(qdev->rx_max_coalesced_frames);
cqicb->flags = FLAGS_LI;
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
@ -239,14 +239,14 @@ exit:
return status;
}
static void ql_update_stats(struct ql_adapter *qdev)
static void qlge_update_stats(struct qlge_adapter *qdev)
{
u32 i;
u64 data;
u64 *iter = &qdev->nic_stats.tx_pkts;
spin_lock(&qdev->stats_lock);
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
if (qlge_sem_spinlock(qdev, qdev->xg_sem_mask)) {
netif_err(qdev, drv, qdev->ndev,
"Couldn't get xgmac sem.\n");
goto quit;
@ -255,7 +255,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get TX statistics.
*/
for (i = 0x200; i < 0x280; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
@ -270,7 +270,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get RX statistics.
*/
for (i = 0x300; i < 0x3d0; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
@ -288,7 +288,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get Per-priority TX pause frame counter statistics.
*/
for (i = 0x500; i < 0x540; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
@ -303,7 +303,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get Per-priority RX pause frame counter statistics.
*/
for (i = 0x568; i < 0x5a8; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
if (qlge_read_xgmac_reg64(qdev, i, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n",
i);
@ -317,7 +317,7 @@ static void ql_update_stats(struct ql_adapter *qdev)
/*
* Get RX NIC FIFO DROP statistics.
*/
if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
if (qlge_read_xgmac_reg64(qdev, 0x5b8, &data)) {
netif_err(qdev, drv, qdev->ndev,
"Error reading status register 0x%.04x.\n", i);
goto end;
@ -325,32 +325,32 @@ static void ql_update_stats(struct ql_adapter *qdev)
*iter = data;
}
end:
ql_sem_unlock(qdev, qdev->xg_sem_mask);
qlge_sem_unlock(qdev, qdev->xg_sem_mask);
quit:
spin_unlock(&qdev->stats_lock);
QL_DUMP_STAT(qdev);
}
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
static void qlge_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
int index;
switch (stringset) {
case ETH_SS_TEST:
memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
memcpy(buf, *qlge_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
for (index = 0; index < QLGE_STATS_LEN; index++) {
memcpy(buf + index * ETH_GSTRING_LEN,
ql_gstrings_stats[index].stat_string,
qlge_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
break;
}
}
static int ql_get_sset_count(struct net_device *dev, int sset)
static int qlge_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
@ -363,27 +363,27 @@ static int ql_get_sset_count(struct net_device *dev, int sset)
}
static void
ql_get_ethtool_stats(struct net_device *ndev,
qlge_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
int index, length;
length = QLGE_STATS_LEN;
ql_update_stats(qdev);
qlge_update_stats(qdev);
for (index = 0; index < length; index++) {
char *p = (char *)qdev +
ql_gstrings_stats[index].stat_offset;
*data++ = (ql_gstrings_stats[index].sizeof_stat ==
qlge_gstrings_stats[index].stat_offset;
*data++ = (qlge_gstrings_stats[index].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
}
}
static int ql_get_link_ksettings(struct net_device *ndev,
static int qlge_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *ecmd)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
u32 supported, advertising;
supported = SUPPORTED_10000baseT_Full;
@ -412,10 +412,10 @@ static int ql_get_link_ksettings(struct net_device *ndev,
return 0;
}
static void ql_get_drvinfo(struct net_device *ndev,
static void qlge_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, qlge_driver_version,
@ -429,9 +429,9 @@ static void ql_get_drvinfo(struct net_device *ndev,
sizeof(drvinfo->bus_info));
}
static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
static void qlge_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
unsigned short ssys_dev = qdev->pdev->subsystem_device;
/* WOL is only supported for mezz card. */
@ -442,9 +442,9 @@ static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
}
static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
static int qlge_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
unsigned short ssys_dev = qdev->pdev->subsystem_device;
/* WOL is only supported for mezz card. */
@ -462,25 +462,25 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return 0;
}
static int ql_set_phys_id(struct net_device *ndev,
static int qlge_set_phys_id(struct net_device *ndev,
enum ethtool_phys_id_state state)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
switch (state) {
case ETHTOOL_ID_ACTIVE:
/* Save the current LED settings */
if (ql_mb_get_led_cfg(qdev))
if (qlge_mb_get_led_cfg(qdev))
return -EIO;
/* Start blinking */
ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
qlge_mb_set_led_cfg(qdev, QL_LED_BLINK);
return 0;
case ETHTOOL_ID_INACTIVE:
/* Restore LED settings */
if (ql_mb_set_led_cfg(qdev, qdev->led_config))
if (qlge_mb_set_led_cfg(qdev, qdev->led_config))
return -EIO;
return 0;
@ -489,7 +489,7 @@ static int ql_set_phys_id(struct net_device *ndev,
}
}
static int ql_start_loopback(struct ql_adapter *qdev)
static int qlge_start_loopback(struct qlge_adapter *qdev)
{
if (netif_carrier_ok(qdev->ndev)) {
set_bit(QL_LB_LINK_UP, &qdev->flags);
@ -498,20 +498,20 @@ static int ql_start_loopback(struct ql_adapter *qdev)
clear_bit(QL_LB_LINK_UP, &qdev->flags);
}
qdev->link_config |= CFG_LOOPBACK_PCS;
return ql_mb_set_port_cfg(qdev);
return qlge_mb_set_port_cfg(qdev);
}
static void ql_stop_loopback(struct ql_adapter *qdev)
static void qlge_stop_loopback(struct qlge_adapter *qdev)
{
qdev->link_config &= ~CFG_LOOPBACK_PCS;
ql_mb_set_port_cfg(qdev);
qlge_mb_set_port_cfg(qdev);
if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
netif_carrier_on(qdev->ndev);
clear_bit(QL_LB_LINK_UP, &qdev->flags);
}
}
static void ql_create_lb_frame(struct sk_buff *skb,
static void qlge_create_lb_frame(struct sk_buff *skb,
unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
@ -521,7 +521,7 @@ static void ql_create_lb_frame(struct sk_buff *skb,
skb->data[frame_size / 2 + 12] = (unsigned char)0xAF;
}
void ql_check_lb_frame(struct ql_adapter *qdev,
void qlge_check_lb_frame(struct qlge_adapter *qdev,
struct sk_buff *skb)
{
unsigned int frame_size = skb->len;
@ -534,7 +534,7 @@ void ql_check_lb_frame(struct ql_adapter *qdev,
}
}
static int ql_run_loopback_test(struct ql_adapter *qdev)
static int qlge_run_loopback_test(struct qlge_adapter *qdev)
{
int i;
netdev_tx_t rc;
@ -548,33 +548,33 @@ static int ql_run_loopback_test(struct ql_adapter *qdev)
skb->queue_mapping = 0;
skb_put(skb, size);
ql_create_lb_frame(skb, size);
rc = ql_lb_send(skb, qdev->ndev);
qlge_create_lb_frame(skb, size);
rc = qlge_lb_send(skb, qdev->ndev);
if (rc != NETDEV_TX_OK)
return -EPIPE;
atomic_inc(&qdev->lb_count);
}
/* Give queue time to settle before testing results. */
msleep(2);
ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
qlge_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
return atomic_read(&qdev->lb_count) ? -EIO : 0;
}
static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
static int qlge_loopback_test(struct qlge_adapter *qdev, u64 *data)
{
*data = ql_start_loopback(qdev);
*data = qlge_start_loopback(qdev);
if (*data)
goto out;
*data = ql_run_loopback_test(qdev);
*data = qlge_run_loopback_test(qdev);
out:
ql_stop_loopback(qdev);
qlge_stop_loopback(qdev);
return *data;
}
static void ql_self_test(struct net_device *ndev,
static void qlge_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
@ -582,7 +582,7 @@ static void ql_self_test(struct net_device *ndev,
set_bit(QL_SELFTEST, &qdev->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
if (ql_loopback_test(qdev, &data[0]))
if (qlge_loopback_test(qdev, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
} else {
@ -601,32 +601,32 @@ static void ql_self_test(struct net_device *ndev,
}
}
static int ql_get_regs_len(struct net_device *ndev)
static int qlge_get_regs_len(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
return sizeof(struct ql_mpi_coredump);
return sizeof(struct qlge_mpi_coredump);
else
return sizeof(struct ql_reg_dump);
return sizeof(struct qlge_reg_dump);
}
static void ql_get_regs(struct net_device *ndev,
static void qlge_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
ql_get_dump(qdev, p);
qlge_get_dump(qdev, p);
qdev->core_is_dumped = 0;
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
regs->len = sizeof(struct ql_mpi_coredump);
regs->len = sizeof(struct qlge_mpi_coredump);
else
regs->len = sizeof(struct ql_reg_dump);
regs->len = sizeof(struct qlge_reg_dump);
}
static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
static int qlge_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
struct ql_adapter *qdev = netdev_priv(dev);
struct qlge_adapter *qdev = netdev_priv(dev);
c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
@ -647,9 +647,9 @@ static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
return 0;
}
static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
static int qlge_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
/* Validate user parameters. */
if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
@ -674,25 +674,25 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
return ql_update_ring_coalescing(qdev);
return qlge_update_ring_coalescing(qdev);
}
static void ql_get_pauseparam(struct net_device *netdev,
static void qlge_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ql_adapter *qdev = netdev_priv(netdev);
struct qlge_adapter *qdev = netdev_priv(netdev);
ql_mb_get_port_cfg(qdev);
qlge_mb_get_port_cfg(qdev);
if (qdev->link_config & CFG_PAUSE_STD) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
}
static int ql_set_pauseparam(struct net_device *netdev,
static int qlge_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ql_adapter *qdev = netdev_priv(netdev);
struct qlge_adapter *qdev = netdev_priv(netdev);
if ((pause->rx_pause) && (pause->tx_pause))
qdev->link_config |= CFG_PAUSE_STD;
@ -701,19 +701,19 @@ static int ql_set_pauseparam(struct net_device *netdev,
else
return -EINVAL;
return ql_mb_set_port_cfg(qdev);
return qlge_mb_set_port_cfg(qdev);
}
static u32 ql_get_msglevel(struct net_device *ndev)
static u32 qlge_get_msglevel(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
return qdev->msg_enable;
}
static void ql_set_msglevel(struct net_device *ndev, u32 value)
static void qlge_set_msglevel(struct net_device *ndev, u32 value)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct qlge_adapter *qdev = netdev_priv(ndev);
qdev->msg_enable = value;
}
@ -721,23 +721,23 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
const struct ethtool_ops qlge_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = ql_get_drvinfo,
.get_wol = ql_get_wol,
.set_wol = ql_set_wol,
.get_regs_len = ql_get_regs_len,
.get_regs = ql_get_regs,
.get_msglevel = ql_get_msglevel,
.set_msglevel = ql_set_msglevel,
.get_drvinfo = qlge_get_drvinfo,
.get_wol = qlge_get_wol,
.set_wol = qlge_set_wol,
.get_regs_len = qlge_get_regs_len,
.get_regs = qlge_get_regs,
.get_msglevel = qlge_get_msglevel,
.set_msglevel = qlge_set_msglevel,
.get_link = ethtool_op_get_link,
.set_phys_id = ql_set_phys_id,
.self_test = ql_self_test,
.get_pauseparam = ql_get_pauseparam,
.set_pauseparam = ql_set_pauseparam,
.get_coalesce = ql_get_coalesce,
.set_coalesce = ql_set_coalesce,
.get_sset_count = ql_get_sset_count,
.get_strings = ql_get_strings,
.get_ethtool_stats = ql_get_ethtool_stats,
.get_link_ksettings = ql_get_link_ksettings,
.set_phys_id = qlge_set_phys_id,
.self_test = qlge_self_test,
.get_pauseparam = qlge_get_pauseparam,
.set_pauseparam = qlge_set_pauseparam,
.get_coalesce = qlge_get_coalesce,
.set_coalesce = qlge_set_coalesce,
.get_sset_count = qlge_get_sset_count,
.get_strings = qlge_get_strings,
.get_ethtool_stats = qlge_get_ethtool_stats,
.get_link_ksettings = qlge_get_link_ksettings,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
#include "qlge.h"
int ql_unpause_mpi_risc(struct ql_adapter *qdev)
int qlge_unpause_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
/* Un-pause the RISC */
tmp = ql_read32(qdev, CSR);
tmp = qlge_read32(qdev, CSR);
if (!(tmp & CSR_RP))
return -EIO;
ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
qlge_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
return 0;
}
int ql_pause_mpi_risc(struct ql_adapter *qdev)
int qlge_pause_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
int count;
/* Pause the RISC */
ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
qlge_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
for (count = UDELAY_COUNT; count; count--) {
tmp = ql_read32(qdev, CSR);
tmp = qlge_read32(qdev, CSR);
if (tmp & CSR_RP)
break;
mdelay(UDELAY_DELAY);
@ -30,17 +30,17 @@ int ql_pause_mpi_risc(struct ql_adapter *qdev)
return (count == 0) ? -ETIMEDOUT : 0;
}
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev)
{
u32 tmp;
int count;
/* Reset the RISC */
ql_write32(qdev, CSR, CSR_CMD_SET_RST);
qlge_write32(qdev, CSR, CSR_CMD_SET_RST);
for (count = UDELAY_COUNT; count; count--) {
tmp = ql_read32(qdev, CSR);
tmp = qlge_read32(qdev, CSR);
if (tmp & CSR_RR) {
ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
qlge_write32(qdev, CSR, CSR_CMD_CLR_RST);
break;
}
mdelay(UDELAY_DELAY);
@ -48,47 +48,47 @@ int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
return (count == 0) ? -ETIMEDOUT : 0;
}
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
{
int status;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
qlge_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* get the data */
*data = ql_read32(qdev, PROC_DATA);
*data = qlge_read32(qdev, PROC_DATA);
exit:
return status;
}
int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
{
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
/* write the data to the data reg */
ql_write32(qdev, PROC_DATA, data);
qlge_write32(qdev, PROC_DATA, data);
/* trigger the write */
ql_write32(qdev, PROC_ADDR, reg);
qlge_write32(qdev, PROC_ADDR, reg);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
if (status)
goto exit;
exit:
return status;
}
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev)
{
return ql_write_mpi_reg(qdev, 0x00001010, 1);
return qlge_write_mpi_reg(qdev, 0x00001010, 1);
}
/* Determine if we are in charge of the firmware. If
@ -96,7 +96,7 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
* we are the higher function and the lower function
* is not enabled.
*/
int ql_own_firmware(struct ql_adapter *qdev)
int qlge_own_firmware(struct qlge_adapter *qdev)
{
u32 temp;
@ -112,43 +112,43 @@ int ql_own_firmware(struct ql_adapter *qdev)
* enabled, then we are responsible for
* core dump and firmware reset after an error.
*/
temp = ql_read32(qdev, STS);
temp = qlge_read32(qdev, STS);
if (!(temp & (1 << (8 + qdev->alt_func))))
return 1;
return 0;
}
static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
static int qlge_get_mb_sts(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
if (status)
return -EBUSY;
for (i = 0; i < mbcp->out_count; i++) {
status =
ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
qlge_read_mpi_reg(qdev, qdev->mailbox_out + i,
&mbcp->mbox_out[i]);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
}
}
ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
return status;
}
/* Wait for a single mailbox command to complete.
* Returns zero on success.
*/
static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
static int qlge_wait_mbx_cmd_cmplt(struct qlge_adapter *qdev)
{
int count;
u32 value;
for (count = 100; count; count--) {
value = ql_read32(qdev, STS);
value = qlge_read32(qdev, STS);
if (value & STS_PI)
return 0;
mdelay(UDELAY_DELAY); /* 100ms */
@ -159,7 +159,7 @@ static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
/* Execute a single mailbox command.
* Caller must hold PROC_ADDR semaphore.
*/
static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
static int qlge_exec_mb_cmd(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
@ -167,10 +167,10 @@ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
* Make sure there's nothing pending.
* This shouldn't happen.
*/
if (ql_read32(qdev, CSR) & CSR_HRI)
if (qlge_read32(qdev, CSR) & CSR_HRI)
return -EIO;
status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK);
if (status)
return status;
@ -178,7 +178,7 @@ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
* Fill the outbound mailboxes.
*/
for (i = 0; i < mbcp->in_count; i++) {
status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
status = qlge_write_mpi_reg(qdev, qdev->mailbox_in + i,
mbcp->mbox_in[i]);
if (status)
goto end;
@ -186,9 +186,9 @@ static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
/*
* Wake up the MPI firmware.
*/
ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
qlge_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
end:
ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
qlge_sem_unlock(qdev, SEM_PROC_REG_MASK);
return status;
}
@ -199,7 +199,7 @@ end:
* to handler processing this since a mailbox command
* will need to be sent to ACK the request.
*/
static int ql_idc_req_aen(struct ql_adapter *qdev)
static int qlge_idc_req_aen(struct qlge_adapter *qdev)
{
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
@ -209,17 +209,17 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
* handle the request.
*/
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
qlge_queue_asic_error(qdev);
} else {
/* Begin polled mode early so
* we don't get another interrupt
* when we leave mpi_worker.
*/
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
}
return status;
@ -228,17 +228,17 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
/* Process an inter-device event completion.
* If good, signal the caller's completion.
*/
static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
static int qlge_idc_cmplt_aen(struct qlge_adapter *qdev)
{
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting RISC!\n");
ql_queue_fw_error(qdev);
qlge_queue_fw_error(qdev);
} else {
/* Wake up the sleeping mpi_idc_work thread that is
* waiting for this event.
@ -248,13 +248,13 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
return status;
}
static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
static void qlge_link_up(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 2;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"%s: Could not get mailbox status.\n", __func__);
@ -268,7 +268,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
* then set up the CAM and frame routing.
*/
if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
status = ql_cam_route_initialize(qdev);
status = qlge_cam_route_initialize(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
@ -288,34 +288,34 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
* we don't get another interrupt
* when we leave mpi_worker dpc.
*/
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 0);
}
ql_link_on(qdev);
qlge_link_on(qdev);
}
static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
static void qlge_link_down(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 3;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
ql_link_off(qdev);
qlge_link_off(qdev);
}
static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
static int qlge_sfp_in(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 5;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
else
@ -324,13 +324,13 @@ static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
return status;
}
static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
static int qlge_sfp_out(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 1;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status)
netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
else
@ -339,13 +339,13 @@ static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
return status;
}
static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
static int qlge_aen_lost(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 6;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
} else {
@ -360,20 +360,20 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
return status;
}
static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
static void qlge_init_fw_done(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
mbcp->out_count = 2;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
} else {
netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
mbcp->mbox_out[1]);
qdev->fw_rev_id = mbcp->mbox_out[1];
status = ql_cam_route_initialize(qdev);
status = qlge_cam_route_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
@ -387,18 +387,18 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
* It also gets called when a mailbox command is polling for
* it's completion.
*/
static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
static int qlge_mpi_handler(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
int orig_count = mbcp->out_count;
/* Just get mailbox zero for now. */
mbcp->out_count = 1;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
qlge_queue_asic_error(qdev);
goto end;
}
@ -421,7 +421,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
* command completion.
*/
mbcp->out_count = orig_count;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
return status;
/* We are being asked by firmware to accept
@ -430,7 +430,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
* parameters, or loopback mode.
*/
case AEN_IDC_REQ:
status = ql_idc_req_aen(qdev);
status = qlge_idc_req_aen(qdev);
break;
/* Process and inbound IDC event.
@ -440,15 +440,15 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
case AEN_IDC_CMPLT:
case AEN_IDC_EXT:
status = ql_idc_cmplt_aen(qdev);
status = qlge_idc_cmplt_aen(qdev);
break;
case AEN_LINK_UP:
ql_link_up(qdev, mbcp);
qlge_link_up(qdev, mbcp);
break;
case AEN_LINK_DOWN:
ql_link_down(qdev, mbcp);
qlge_link_down(qdev, mbcp);
break;
case AEN_FW_INIT_DONE:
@ -457,19 +457,19 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
mbcp->out_count = orig_count;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
return status;
}
ql_init_fw_done(qdev, mbcp);
qlge_init_fw_done(qdev, mbcp);
break;
case AEN_AEN_SFP_IN:
ql_sfp_in(qdev, mbcp);
qlge_sfp_in(qdev, mbcp);
break;
case AEN_AEN_SFP_OUT:
ql_sfp_out(qdev, mbcp);
qlge_sfp_out(qdev, mbcp);
break;
/* This event can arrive at boot time or after an
@ -481,24 +481,24 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
mbcp->out_count = orig_count;
status = ql_get_mb_sts(qdev, mbcp);
status = qlge_get_mb_sts(qdev, mbcp);
mbcp->mbox_out[0] = MB_CMD_STS_ERR;
return status;
}
netif_err(qdev, drv, qdev->ndev,
"Firmware initialization failed.\n");
status = -EIO;
ql_queue_fw_error(qdev);
qlge_queue_fw_error(qdev);
break;
case AEN_SYS_ERR:
netif_err(qdev, drv, qdev->ndev, "System Error.\n");
ql_queue_fw_error(qdev);
qlge_queue_fw_error(qdev);
status = -EIO;
break;
case AEN_AEN_LOST:
ql_aen_lost(qdev, mbcp);
qlge_aen_lost(qdev, mbcp);
break;
case AEN_DCBX_CHG:
@ -510,7 +510,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
/* Clear the MPI firmware status. */
}
end:
ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
/* Restore the original mailbox count to
* what the caller asked for. This can get
* changed when a mailbox command is waiting
@ -526,7 +526,7 @@ end:
* element in the array contains the value for it's
* respective mailbox register.
*/
static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
static int qlge_mailbox_command(struct qlge_adapter *qdev, struct mbox_params *mbcp)
{
int status;
unsigned long count;
@ -534,10 +534,10 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
/* Load the mailbox registers and wake up MPI RISC. */
status = ql_exec_mb_cmd(qdev, mbcp);
status = qlge_exec_mb_cmd(qdev, mbcp);
if (status)
goto end;
@ -556,7 +556,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
count = jiffies + HZ * MAILBOX_TIMEOUT;
do {
/* Wait for the interrupt to come in. */
status = ql_wait_mbx_cmd_cmplt(qdev);
status = qlge_wait_mbx_cmd_cmplt(qdev);
if (status)
continue;
@ -565,7 +565,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
* will be spawned. If it's our completion
* we will catch it below.
*/
status = ql_mpi_handler(qdev, mbcp);
status = qlge_mpi_handler(qdev, mbcp);
if (status)
goto end;
@ -590,7 +590,7 @@ done:
/* Now we can clear the interrupt condition
* and look at our status.
*/
ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
if (((mbcp->mbox_out[0] & 0x0000f000) !=
MB_CMD_STS_GOOD) &&
@ -600,7 +600,7 @@ done:
}
end:
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
return status;
}
@ -609,7 +609,7 @@ end:
* driver banner and for ethtool info.
* Returns zero on success.
*/
int ql_mb_about_fw(struct ql_adapter *qdev)
int qlge_mb_about_fw(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -622,7 +622,7 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -641,7 +641,7 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
/* Get functional state for MPI firmware.
* Returns zero on success.
*/
int ql_mb_get_fw_state(struct ql_adapter *qdev)
int qlge_mb_get_fw_state(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -654,7 +654,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -680,7 +680,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
static int ql_mb_idc_ack(struct ql_adapter *qdev)
static int qlge_mb_idc_ack(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -697,7 +697,7 @@ static int ql_mb_idc_ack(struct ql_adapter *qdev)
mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -712,7 +712,7 @@ static int ql_mb_idc_ack(struct ql_adapter *qdev)
* for the current port.
* Most likely will block.
*/
int ql_mb_set_port_cfg(struct ql_adapter *qdev)
int qlge_mb_set_port_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -727,7 +727,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
mbcp->mbox_in[1] = qdev->link_config;
mbcp->mbox_in[2] = qdev->max_frame_size;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -742,7 +742,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
}
static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
static int qlge_mb_dump_ram(struct qlge_adapter *qdev, u64 req_dma, u32 addr,
u32 size)
{
int status = 0;
@ -764,7 +764,7 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
mbcp->mbox_in[7] = LSW(MSD(req_dma));
mbcp->mbox_in[8] = MSW(addr);
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -776,7 +776,7 @@ static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
}
/* Issue a mailbox command to dump RISC RAM. */
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf,
u32 ram_addr, int word_count)
{
int status;
@ -789,7 +789,7 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
if (!my_buf)
return -EIO;
status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
status = qlge_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
if (!status)
memcpy(buf, my_buf, word_count * sizeof(u32));
@ -802,7 +802,7 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
* for the current port.
* Most likely will block.
*/
int ql_mb_get_port_cfg(struct ql_adapter *qdev)
int qlge_mb_get_port_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -815,7 +815,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -832,7 +832,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
return status;
}
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -846,7 +846,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
mbcp->mbox_in[1] = wol;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -857,7 +857,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
return status;
}
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -888,7 +888,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
mbcp->mbox_in[7] = 0;
}
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -906,7 +906,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
* The firmware will complete the request if the other
* function doesn't respond.
*/
static int ql_idc_wait(struct ql_adapter *qdev)
static int qlge_idc_wait(struct qlge_adapter *qdev)
{
int status = -ETIMEDOUT;
struct mbox_params *mbcp = &qdev->idc_mbc;
@ -947,7 +947,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
return status;
}
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -961,7 +961,7 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
mbcp->mbox_in[1] = led_config;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -974,7 +974,7 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
return status;
}
int ql_mb_get_led_cfg(struct ql_adapter *qdev)
int qlge_mb_get_led_cfg(struct qlge_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -987,7 +987,7 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -1001,7 +1001,7 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
return status;
}
int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -1015,7 +1015,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
mbcp->mbox_in[1] = control;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -1038,7 +1038,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
}
/* Returns a negative error code or the mailbox command status. */
static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
static int qlge_mb_get_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 *control)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -1052,7 +1052,7 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
status = ql_mailbox_command(qdev, mbcp);
status = qlge_mailbox_command(qdev, mbcp);
if (status)
return status;
@ -1073,15 +1073,15 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
return status;
}
int ql_wait_fifo_empty(struct ql_adapter *qdev)
int qlge_wait_fifo_empty(struct qlge_adapter *qdev)
{
int count;
u32 mgmnt_fifo_empty;
u32 nic_fifo_empty;
for (count = 6; count; count--) {
nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
nic_fifo_empty = qlge_read32(qdev, STS) & STS_NFE;
qlge_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
if (nic_fifo_empty && mgmnt_fifo_empty)
return 0;
@ -1093,14 +1093,14 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev)
/* API called in work thread context to set new TX/RX
* maximum frame size values to match MTU.
*/
static int ql_set_port_cfg(struct ql_adapter *qdev)
static int qlge_set_port_cfg(struct qlge_adapter *qdev)
{
int status;
status = ql_mb_set_port_cfg(qdev);
status = qlge_mb_set_port_cfg(qdev);
if (status)
return status;
status = ql_idc_wait(qdev);
status = qlge_idc_wait(qdev);
return status;
}
@ -1112,13 +1112,13 @@ static int ql_set_port_cfg(struct ql_adapter *qdev)
* from the firmware and, if necessary, changes them to match
* the MTU setting.
*/
void ql_mpi_port_cfg_work(struct work_struct *work)
void qlge_mpi_port_cfg_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, mpi_port_cfg_work.work);
int status;
status = ql_mb_get_port_cfg(qdev);
status = qlge_mb_get_port_cfg(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to get port config data.\n");
@ -1131,7 +1131,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
status = ql_set_port_cfg(qdev);
status = qlge_set_port_cfg(qdev);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to set port config data.\n");
@ -1141,7 +1141,7 @@ end:
clear_bit(QL_PORT_CFG, &qdev->flags);
return;
err:
ql_queue_fw_error(qdev);
qlge_queue_fw_error(qdev);
goto end;
}
@ -1151,10 +1151,10 @@ err:
* has been made and then send a mailbox command ACKing
* the change request.
*/
void ql_mpi_idc_work(struct work_struct *work)
void qlge_mpi_idc_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_idc_work.work);
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, mpi_idc_work.work);
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
u32 aen;
@ -1170,7 +1170,7 @@ void ql_mpi_idc_work(struct work_struct *work)
break;
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
ql_link_off(qdev);
qlge_link_off(qdev);
fallthrough;
case MB_CMD_SET_PORT_CFG:
/* Signal the resulting link up AEN
@ -1180,7 +1180,7 @@ void ql_mpi_idc_work(struct work_struct *work)
set_bit(QL_CAM_RT_SET, &qdev->flags);
/* Do ACK if required */
if (timeout) {
status = ql_mb_idc_ack(qdev);
status = qlge_mb_idc_ack(qdev);
if (status)
netif_err(qdev, drv, qdev->ndev,
"Bug: No pending IDC!\n");
@ -1202,7 +1202,7 @@ void ql_mpi_idc_work(struct work_struct *work)
/* Drop the link, reload the routing
* table when link comes up.
*/
ql_link_off(qdev);
qlge_link_off(qdev);
set_bit(QL_CAM_RT_SET, &qdev->flags);
fallthrough;
case MB_CMD_IOP_DVR_START:
@ -1213,7 +1213,7 @@ void ql_mpi_idc_work(struct work_struct *work)
case MB_CMD_IOP_NONE: /* an IDC without params */
/* Do ACK if required */
if (timeout) {
status = ql_mb_idc_ack(qdev);
status = qlge_mb_idc_ack(qdev);
if (status)
netif_err(qdev, drv, qdev->ndev,
"Bug: No pending IDC!\n");
@ -1226,54 +1226,54 @@ void ql_mpi_idc_work(struct work_struct *work)
}
}
void ql_mpi_work(struct work_struct *work)
void qlge_mpi_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_work.work);
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, mpi_work.work);
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int err = 0;
mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
while (ql_read32(qdev, STS) & STS_PI) {
while (qlge_read32(qdev, STS) & STS_PI) {
memset(mbcp, 0, sizeof(struct mbox_params));
mbcp->out_count = 1;
/* Don't continue if an async event
* did not complete properly.
*/
err = ql_mpi_handler(qdev, mbcp);
err = qlge_mpi_handler(qdev, mbcp);
if (err)
break;
}
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
}
void ql_mpi_reset_work(struct work_struct *work)
void qlge_mpi_reset_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, mpi_reset_work.work);
struct qlge_adapter *qdev =
container_of(work, struct qlge_adapter, mpi_reset_work.work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
/* If we're not the dominant NIC function,
* then there is nothing to do.
*/
if (!ql_own_firmware(qdev)) {
if (!qlge_own_firmware(qdev)) {
netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
return;
}
if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
if (qdev->mpi_coredump && !qlge_core_dump(qdev, qdev->mpi_coredump)) {
netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
qdev->core_is_dumped = 1;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_core_to_log, 5 * HZ);
}
ql_soft_reset_mpi_risc(qdev);
qlge_soft_reset_mpi_risc(qdev);
}