SCSI misc on 20120531

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.18 (GNU/Linux)
 
 iQEcBAABAgAGBQJPx1M+AAoJEDeqqVYsXL0MNOMH/jSbgDAHQskBuZMCEoVUHykZ
 3aKiPFJQfnF1nQqN/xxECGFc7glrKSHv1fpAG9wDk0HLHNhP+QoOBVYdDGHpzktk
 eP1hB6rWE/auJz90rIrKomJoD+cVYDRHkhlbNr1DsYBuXI+BGX0aUp+uAaajoxAT
 8wp4/Z5007llQQXnep2Z0AvzIWBdCeR4PBXX5YvalJ8Qz3Rj8bYeY10oDpx6nO7v
 iGcyh+h0Eo+q9KEQ3PosoDnqaskq44yTY4MWeE1Kd64fQM1JYTJo0SxOGGVxHHwQ
 ZLfhX+fH3jCyBP0qRzCqBvSKTuiWeMBc8POdLbLMnq6ClCgQTr41iHH7UTuXXjE=
 =fZOy
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull final round of SCSI updates from James Bottomley:
 "This is primarily another round of driver updates (bnx2fc, qla2xxx,
  qla4xxx) including the target mode driver for qla2xxx.  We've also got
  a couple of regression fixes (async scanning, broken this merge window
  and a fix to a long standing break in the scsi_wait_scan module)."

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (45 commits)
  [SCSI] fix scsi_wait_scan
  [SCSI] fix async probe regression
  [SCSI] be2iscsi: fix dma free size mismatch regression
  [SCSI] qla4xxx: Update driver version to 5.02.00-k17
  [SCSI] qla4xxx: Capture minidump for ISP82XX on firmware failure
  [SCSI] qla4xxx: Add change_queue_depth API support
  [SCSI] qla4xxx: Fix clear ddb mbx command failure issue.
  [SCSI] qla4xxx: Fix kernel panic during discovery logout.
  [SCSI] qla4xxx: Correct early completion of pending mbox.
  [SCSI] fcoe, bnx2fc, libfcoe: SW FCoE and bnx2fc use FCoE Syfs
  [SCSI] libfcoe: Add fcoe_sysfs
  [SCSI] bnx2fc: Allocate fcoe_ctlr with bnx2fc_interface, not as a member
  [SCSI] fcoe: Allocate fcoe_ctlr with fcoe_interface, not as a member
  [SCSI] Fix dm-multipath starvation when scsi host is busy
  [SCSI] ufs: fix potential NULL pointer dereferencing error in ufshcd_prove.
  [SCSI] qla2xxx: don't free pool that wasn't allocated
  [SCSI] mptfusion: unlock on error in mpt_config()
  [SCSI] tcm_qla2xxx: Add >= 24xx series fabric module for target-core
  [SCSI] qla2xxx: Add LLD target-mode infrastructure for >= 24xx series
  [SCSI] Revert "qla2xxx: During loopdown perform Diagnostic loopback."
  ...
This commit is contained in:
Linus Torvalds 2012-05-31 12:02:41 -07:00
commit 054552272e
56 changed files with 12124 additions and 673 deletions

View File

@ -0,0 +1,77 @@
What: /sys/bus/fcoe/ctlr_X
Date: March 2012
KernelVersion: TBD
Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
Description: 'FCoE Controller' instances on the fcoe bus
Attributes:
fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
this value will change the dev_loss_tmo for all
FCFs discovered by this controller.
lesb_link_fail: Link Error Status Block (LESB) link failure count.
lesb_vlink_fail: Link Error Status Block (LESB) virtual link
failure count.
lesb_miss_fka: Link Error Status Block (LESB) missed FCoE
Initialization Protocol (FIP) Keep-Alives (FKA).
lesb_symb_err: Link Error Status Block (LESB) symbolic error count.
lesb_err_block: Link Error Status Block (LESB) block error count.
lesb_fcs_error: Link Error Status Block (LESB) Fibre Channel
Serivces error count.
Notes: ctlr_X (global increment starting at 0)
What: /sys/bus/fcoe/fcf_X
Date: March 2012
KernelVersion: TBD
Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
Description: 'FCoE FCF' instances on the fcoe bus. A FCF is a Fibre Channel
Forwarder, which is a FCoE switch that can accept FCoE
(Ethernet) packets, unpack them, and forward the embedded
Fibre Channel frames into a FC fabric. It can also take
outbound FC frames and pack them in Ethernet packets to
be sent to their destination on the Ethernet segment.
Attributes:
fabric_name: Identifies the fabric that the FCF services.
switch_name: Identifies the FCF.
priority: The switch's priority amongst other FCFs on the same
fabric.
selected: 1 indicates that the switch has been selected for use;
0 indicates that the swich will not be used.
fc_map: The Fibre Channel MAP
vfid: The Virtual Fabric ID
mac: The FCF's MAC address
fka_peroid: The FIP Keep-Alive peroid
fabric_state: The internal kernel state
"Unknown" - Initialization value
"Disconnected" - No link to the FCF/fabric
"Connected" - Host is connected to the FCF
"Deleted" - FCF is being removed from the system
dev_loss_tmo: The device loss timeout peroid for this FCF.
Notes: A device loss infrastructre similar to the FC Transport's
is present in fcoe_sysfs. It is nice to have so that a
link flapping adapter doesn't continually advance the count
used to identify the discovered FCF. FCFs will exist in a
"Disconnected" state until either the timer expires and the
FCF becomes "Deleted" or the FCF is rediscovered and becomes
"Connected."
Users: The first user of this interface will be the fcoeadm application,
which is commonly packaged in the fcoe-utils package.

View File

@ -6483,6 +6483,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
printk(MYIOC_s_INFO_FMT "%s: host reset in"
" progress mpt_config timed out.!!\n",
__func__, ioc->name);
mutex_unlock(&ioc->mptbase_cmds.mutex);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);

View File

@ -571,13 +571,12 @@ free_cmd:
static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
int iscsi_cmd, int size)
{
cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
&cmd->dma);
cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
return -ENOMEM;
}
memset(cmd->va, 0, sizeof(size));
memset(cmd->va, 0, size);
cmd->size = size;
be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
return 0;

View File

@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
vshost = vport->drv_port.im_port->shost;
fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
fc_host_supported_classes(vshost) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(vshost), 0,
sizeof(fc_host_supported_fc4s(vshost)));
/* For FCP type 0x08 */
if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
fc_host_supported_fc4s(vshost)[2] = 1;
/* For fibre channel services type 0x20 */
fc_host_supported_fc4s(vshost)[7] = 1;
fc_host_supported_speeds(vshost) =
bfad_im_supported_speeds(&bfad->bfa);
fc_host_maxframe_size(vshost) =
bfa_fcport_get_maxfrsize(&bfad->bfa);
fc_vport->dd_data = vport;
vport->drv_port.im_port->fc_vport = fc_vport;
} else if (rc == BFA_STATUS_INVALID_WWN)

View File

@ -987,7 +987,7 @@ done:
return 0;
}
static u32
u32
bfad_im_supported_speeds(struct bfa_s *bfa)
{
struct bfa_ioc_attr_s *ioc_attr;

View File

@ -37,6 +37,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
struct bfad_im_port_s *im_port, struct device *dev);
void bfad_im_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
u32 bfad_im_supported_speeds(struct bfa_s *bfa);
#define MAX_FCP_TARGET 1024
#define MAX_FCP_LUN 16384

View File

@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
#define BNX2FC_VERSION "1.0.10"
#define BNX2FC_VERSION "1.0.11"
#define PFX "bnx2fc: "
@ -228,13 +228,16 @@ struct bnx2fc_interface {
struct packet_type fip_packet_type;
struct workqueue_struct *timer_work_queue;
struct kref kref;
struct fcoe_ctlr ctlr;
u8 vlan_enabled;
int vlan_id;
bool enabled;
};
#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
#define bnx2fc_from_ctlr(x) \
((struct bnx2fc_interface *)((x) + 1))
#define bnx2fc_to_ctlr(x) \
((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
struct bnx2fc_lport {
struct list_head list;

View File

@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
struct fc_exch *exch = fc_seq_exch(seq);
struct fc_lport *lport = exch->lp;
u8 *mac;
struct fc_frame_header *fh;
u8 op;
if (IS_ERR(fp))
@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
mac = fr_cb(fp)->granted_mac;
if (is_zero_ether_addr(mac)) {
fh = fc_frame_header_get(fp);
if (fh->fh_type != FC_TYPE_ELS) {
printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
"fh_type != FC_TYPE_ELS\n");
fc_frame_free(fp);
return;
}
op = fc_frame_payload_op(fp);
if (lport->vport) {
if (op == ELS_LS_RJT) {
@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
return;
}
}
if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
fc_frame_free(fp);
return;
}
fcoe_ctlr_recv_flogi(fip, lport, fp);
}
fip->update_mac(lport, mac);
if (!is_zero_ether_addr(mac))
fip->update_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
}
@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
{
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_interface *interface = port->priv;
struct fcoe_ctlr *fip = &interface->ctlr;
struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {

View File

@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
#define DRV_MODULE_RELDATE "Jan 22, 2011"
#define DRV_MODULE_RELDATE "Apr 24, 2012"
static char version[] __devinitdata =
@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
static struct libfc_function_template bnx2fc_libfc_fcn_templ;
static struct scsi_host_template bnx2fc_shost_template;
static struct fc_function_template bnx2fc_transport_function;
static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
static struct fc_function_template bnx2fc_vport_xport_function;
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
__fcoe_get_lesb(lport, fc_lesb, netdev);
}
static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
{
struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
struct net_device *netdev = bnx2fc_netdev(fip->lp);
struct fcoe_fc_els_lesb *fcoe_lesb;
struct fc_els_lesb fc_lesb;
__fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
ctlr_dev->lesb.lesb_link_fail =
ntohl(fcoe_lesb->lesb_link_fail);
ctlr_dev->lesb.lesb_vlink_fail =
ntohl(fcoe_lesb->lesb_vlink_fail);
ctlr_dev->lesb.lesb_miss_fka =
ntohl(fcoe_lesb->lesb_miss_fka);
ctlr_dev->lesb.lesb_symb_err =
ntohl(fcoe_lesb->lesb_symb_err);
ctlr_dev->lesb.lesb_err_block =
ntohl(fcoe_lesb->lesb_err_block);
ctlr_dev->lesb.lesb_fcs_error =
ntohl(fcoe_lesb->lesb_fcs_error);
}
EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
fcf_dev->vlan_id = fcoe->vlan_id;
}
static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
{
struct fcoe_percpu_s *bg;
@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct sk_buff *skb;
struct fc_frame_header *fh;
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
struct bnx2fc_hba *hba;
struct fcoe_port *port;
struct fcoe_hdr *hp;
@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
port = (struct fcoe_port *)lport_priv(lport);
interface = port->priv;
ctlr = bnx2fc_to_ctlr(interface);
hba = interface->hba;
fh = fc_frame_header_get(fp);
@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
if (!interface->ctlr.sel_fcf) {
if (!ctlr->sel_fcf) {
BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
kfree_skb(skb);
return -EINVAL;
}
if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
if (fcoe_ctlr_els_send(ctlr, lport, skb))
return 0;
}
@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
if (interface->ctlr.map_dest)
if (ctlr->map_dest)
fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
else
/* insert GW address */
memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct fc_lport *lport;
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
struct fc_frame_header *fh;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
lport = interface->ctlr.lp;
ctlr = bnx2fc_to_ctlr(interface);
lport = ctlr->lp;
if (unlikely(lport == NULL)) {
printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
{
struct bnx2fc_hba *hba;
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
u64 wwnn, wwpn;
port = lport_priv(lport);
interface = port->priv;
ctlr = bnx2fc_to_ctlr(interface);
hba = interface->hba;
/* require support for get_pauseparam ethtool op. */
@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
1, 0);
BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
2, 0);
BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
struct fc_lport *lport;
struct fc_lport *vport;
struct bnx2fc_interface *interface, *tmp;
struct fcoe_ctlr *ctlr;
int wait_for_upload = 0;
u32 link_possible = 1;
@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
if (interface->hba != hba)
continue;
lport = interface->ctlr.lp;
ctlr = bnx2fc_to_ctlr(interface);
lport = ctlr->lp;
BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
interface->netdev->name, event);
@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
* on a stale vlan
*/
if (interface->enabled)
fcoe_ctlr_link_up(&interface->ctlr);
} else if (fcoe_ctlr_link_down(&interface->ctlr)) {
fcoe_ctlr_link_up(ctlr);
} else if (fcoe_ctlr_link_down(ctlr)) {
mutex_lock(&lport->lp_mutex);
list_for_each_entry(vport, &lport->vports, list)
fc_host_port_type(vport->host) =
@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
struct net_device *orig_dev)
{
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
interface = container_of(ptype, struct bnx2fc_interface,
fip_packet_type);
fcoe_ctlr_recv(&interface->ctlr, skb);
ctlr = bnx2fc_to_ctlr(interface);
fcoe_ctlr_recv(ctlr, skb);
return 0;
}
@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
{
struct net_device *netdev = interface->netdev;
struct net_device *physdev = interface->hba->phys_dev;
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct netdev_hw_addr *ha;
int sel_san_mac = 0;
@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
(is_valid_ether_addr(ha->addr))) {
memcpy(interface->ctlr.ctl_src_addr, ha->addr,
memcpy(ctlr->ctl_src_addr, ha->addr,
ETH_ALEN);
sel_san_mac = 1;
BNX2FC_MISC_DBG("Found SAN MAC\n");
@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
static void bnx2fc_interface_release(struct kref *kref)
{
struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
struct net_device *netdev;
interface = container_of(kref, struct bnx2fc_interface, kref);
BNX2FC_MISC_DBG("Interface is being released\n");
ctlr = bnx2fc_to_ctlr(interface);
ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
netdev = interface->netdev;
/* tear-down FIP controller */
if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
fcoe_ctlr_destroy(&interface->ctlr);
fcoe_ctlr_destroy(ctlr);
kfree(interface);
fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
@ -1329,33 +1381,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
struct net_device *netdev,
enum fip_state fip_mode)
{
struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
int size;
int rc = 0;
interface = kzalloc(sizeof(*interface), GFP_KERNEL);
if (!interface) {
size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
size);
if (!ctlr_dev) {
printk(KERN_ERR PFX "Unable to allocate interface structure\n");
return NULL;
}
ctlr = fcoe_ctlr_device_priv(ctlr_dev);
interface = fcoe_ctlr_priv(ctlr);
dev_hold(netdev);
kref_init(&interface->kref);
interface->hba = hba;
interface->netdev = netdev;
/* Initialize FIP */
fcoe_ctlr_init(&interface->ctlr, fip_mode);
interface->ctlr.send = bnx2fc_fip_send;
interface->ctlr.update_mac = bnx2fc_update_src_mac;
interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
fcoe_ctlr_init(ctlr, fip_mode);
ctlr->send = bnx2fc_fip_send;
ctlr->update_mac = bnx2fc_update_src_mac;
ctlr->get_src_addr = bnx2fc_get_src_mac;
set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
rc = bnx2fc_interface_setup(interface);
if (!rc)
return interface;
fcoe_ctlr_destroy(&interface->ctlr);
fcoe_ctlr_destroy(ctlr);
dev_put(netdev);
kfree(interface);
fcoe_ctlr_device_delete(ctlr_dev);
return NULL;
}
@ -1373,6 +1432,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv)
{
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
struct Scsi_Host *shost;
@ -1383,7 +1443,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
if (!blport) {
BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
return NULL;
}
@ -1479,7 +1539,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
{
struct fc_lport *lport = interface->ctlr.lp;
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport = ctlr->lp;
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_hba *hba = interface->hba;
@ -1519,7 +1580,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
{
struct fc_lport *lport = interface->ctlr.lp;
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport = ctlr->lp;
struct fcoe_port *port = lport_priv(lport);
bnx2fc_interface_cleanup(interface);
@ -1543,13 +1605,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
{
struct bnx2fc_interface *interface = NULL;
struct workqueue_struct *timer_work_queue;
struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
if (!interface || !interface->ctlr.lp) {
ctlr = bnx2fc_to_ctlr(interface);
if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
goto netdev_err;
@ -1646,6 +1710,7 @@ static void bnx2fc_ulp_start(void *handle)
{
struct bnx2fc_hba *hba = handle;
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
struct fc_lport *lport;
mutex_lock(&bnx2fc_dev_lock);
@ -1657,7 +1722,8 @@ static void bnx2fc_ulp_start(void *handle)
list_for_each_entry(interface, &if_list, list) {
if (interface->hba == hba) {
lport = interface->ctlr.lp;
ctlr = bnx2fc_to_ctlr(interface);
lport = ctlr->lp;
/* Kick off Fabric discovery*/
printk(KERN_ERR PFX "ulp_init: start discovery\n");
lport->tt.frame_send = bnx2fc_xmit;
@ -1677,13 +1743,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
static void bnx2fc_stop(struct bnx2fc_interface *interface)
{
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport;
struct fc_lport *vport;
if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
return;
lport = interface->ctlr.lp;
lport = ctlr->lp;
bnx2fc_port_shutdown(lport);
mutex_lock(&lport->lp_mutex);
@ -1692,7 +1759,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
FC_PORTTYPE_UNKNOWN;
mutex_unlock(&lport->lp_mutex);
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
fcoe_ctlr_link_down(&interface->ctlr);
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(lport);
}
@ -1804,6 +1871,7 @@ exit:
static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
{
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport;
int wait_cnt = 0;
@ -1814,18 +1882,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
return;
}
lport = interface->ctlr.lp;
lport = ctlr->lp;
BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
if (!bnx2fc_link_ok(lport) && interface->enabled) {
BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
fcoe_ctlr_link_up(&interface->ctlr);
fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
/* wait for the FCF to be selected before issuing FLOGI */
while (!interface->ctlr.sel_fcf) {
while (!ctlr->sel_fcf) {
msleep(250);
/* give up after 3 secs */
if (++wait_cnt > 12)
@ -1889,19 +1957,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
static int bnx2fc_disable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
if (!interface || !interface->ctlr.lp) {
ctlr = bnx2fc_to_ctlr(interface);
if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
} else {
interface->enabled = false;
fcoe_ctlr_link_down(&interface->ctlr);
fcoe_clean_pending_queue(interface->ctlr.lp);
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
}
mutex_unlock(&bnx2fc_dev_lock);
@ -1913,17 +1983,19 @@ static int bnx2fc_disable(struct net_device *netdev)
static int bnx2fc_enable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
if (!interface || !interface->ctlr.lp) {
ctlr = bnx2fc_to_ctlr(interface);
if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
} else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
fcoe_ctlr_link_up(&interface->ctlr);
} else if (!bnx2fc_link_ok(ctlr->lp)) {
fcoe_ctlr_link_up(ctlr);
interface->enabled = true;
}
@ -1944,6 +2016,7 @@ static int bnx2fc_enable(struct net_device *netdev)
*/
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
{
struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
struct net_device *phys_dev;
@ -2010,6 +2083,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto ifput_err;
}
ctlr = bnx2fc_to_ctlr(interface);
interface->vlan_id = vlan_id;
interface->vlan_enabled = 1;
@ -2035,10 +2109,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
lport->boot_time = jiffies;
/* Make this master N_port */
interface->ctlr.lp = lport;
ctlr->lp = lport;
if (!bnx2fc_link_ok(lport)) {
fcoe_ctlr_link_up(&interface->ctlr);
fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
@ -2439,6 +2513,19 @@ static void __exit bnx2fc_mod_exit(void)
module_init(bnx2fc_mod_init);
module_exit(bnx2fc_mod_exit);
static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
.get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
.get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
.get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
.get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
.get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
.get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
.get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
.get_fcoe_fcf_selected = fcoe_fcf_get_selected,
.get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
};
static struct fc_function_template bnx2fc_transport_function = {
.show_host_node_name = 1,
.show_host_port_name = 1,

View File

@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
{
struct fc_lport *lport = port->lport;
struct bnx2fc_interface *interface = port->priv;
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct kwqe *kwqe_arr[4];
struct fcoe_kwqe_conn_offload1 ofld_req1;
@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
/* fcf mac */
ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
{
struct kwqe *kwqe_arr[2];
struct bnx2fc_interface *interface = port->priv;
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable enbl_req;
struct fc_lport *lport = port->lport;
@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
port_id = fc_host_port_id(lport->host);
if (port_id != tgt->sid) {
@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct bnx2fc_interface *interface = port->priv;
struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable disable_req;
struct kwqe *kwqe_arr[2];
@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
port_id = tgt->sid;
disable_req.s_id[0] = (port_id & 0x000000FF);

View File

@ -810,8 +810,22 @@ retry_tmf:
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
if (io_req->on_tmf_queue) {
list_del_init(&io_req->link);
io_req->on_tmf_queue = 0;
}
io_req->wait_for_comp = 1;
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
rc = wait_for_completion_timeout(&io_req->tm_done,
BNX2FC_FW_TIMEOUT);
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
if (!rc)
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
spin_unlock_bh(&tgt->tgt_lock);
@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
}
int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
{
struct bnx2fc_rport *tgt = io_req->tgt;
struct fc_rport_priv *rdata = tgt->rdata;
int logo_issued;
int rc = SUCCESS;
int wait_cnt = 0;
BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
tgt->flags);
logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
&tgt->flags);
io_req->wait_for_comp = 1;
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
wait_for_completion(&io_req->tm_done);
io_req->wait_for_comp = 0;
/*
* release the reference taken in eh_abort to allow the
* target to re-login after flushing IOs
*/
kref_put(&io_req->refcount, bnx2fc_cmd_release);
if (!logo_issued) {
clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
mutex_lock(&lport->disc.disc_mutex);
lport->tt.rport_logoff(rdata);
mutex_unlock(&lport->disc.disc_mutex);
do {
msleep(BNX2FC_RELOGIN_WAIT_TIME);
if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
rc = FAILED;
break;
}
} while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
}
spin_lock_bh(&tgt->tgt_lock);
return rc;
}
/**
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
* SCSI command
@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct bnx2fc_cmd *io_req;
struct fc_lport *lport;
struct fc_rport_priv *rdata;
struct bnx2fc_rport *tgt;
int logo_issued;
int wait_cnt = 0;
int rc = FAILED;
@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
list_add_tail(&io_req->link, &tgt->io_retire_queue);
init_completion(&io_req->tm_done);
io_req->wait_for_comp = 1;
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
/* Cancel the current timer running on this io_req */
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
rc = bnx2fc_initiate_abts(io_req);
} else {
if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"already in abts processing\n", io_req->xid);
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
rc = bnx2fc_expl_logo(lport, io_req);
goto out;
}
/* Cancel the current timer running on this io_req */
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
io_req->wait_for_comp = 1;
rc = bnx2fc_initiate_abts(io_req);
if (rc == FAILED) {
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
wait_for_completion(&io_req->tm_done);
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
rdata = io_req->tgt->rdata;
logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
&tgt->flags);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
if (!logo_issued) {
BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
tgt->flags);
mutex_lock(&lport->disc.disc_mutex);
lport->tt.rport_logoff(rdata);
mutex_unlock(&lport->disc.disc_mutex);
do {
msleep(BNX2FC_RELOGIN_WAIT_TIME);
/*
* If session not recovered, let SCSI-ml
* escalate error recovery.
*/
if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
return FAILED;
} while (!test_bit(BNX2FC_FLAG_SESSION_READY,
&tgt->flags));
}
return SUCCESS;
}
if (rc == FAILED) {
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
return rc;
goto done;
}
spin_unlock_bh(&tgt->tgt_lock);
@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
rc = FAILED;
rc = bnx2fc_expl_logo(lport, io_req);
goto out;
} else {
/*
* We come here even when there was a race condition
@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
bnx2fc_scsi_done(io_req, DID_ABORT);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
done:
/* release the reference taken in eh_abort */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
out:
spin_unlock_bh(&tgt->tgt_lock);
return rc;
}

View File

@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
BUG_ON(rc);
}
list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
i++;
io_req = (struct bnx2fc_cmd *)list;
list_del_init(&io_req->link);
io_req->on_tmf_queue = 0;
BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
if (io_req->wait_for_comp)
complete(&io_req->tm_done);
}
list_for_each_safe(list, tmp, &tgt->els_queue) {
i++;
io_req = (struct bnx2fc_cmd *)list;
@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
if (cancel_delayed_work(&io_req->timeout_work))
if (cancel_delayed_work(&io_req->timeout_work)) {
if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
&io_req->req_flags)) {
/* Handle eh_abort timeout */
BNX2FC_IO_DBG(io_req, "eh_abort for IO "
"in retire_q\n");
if (io_req->wait_for_comp)
complete(&io_req->tm_done);
}
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
}

View File

@ -1,4 +1,4 @@
obj-$(CONFIG_FCOE) += fcoe.o
obj-$(CONFIG_LIBFCOE) += libfcoe.o
libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o

View File

@ -41,6 +41,7 @@
#include <scsi/fc/fc_encaps.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
.get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
.get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
.get_fcoe_fcf_selected = fcoe_fcf_get_selected,
.get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
};
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
static int fcoe_interface_setup(struct fcoe_interface *fcoe,
struct net_device *netdev)
{
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct netdev_hw_addr *ha;
struct net_device *real_dev;
u8 flogi_maddr[ETH_ALEN];
@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
enum fip_state fip_mode)
{
struct fcoe_ctlr_device *ctlr_dev;
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int size;
int err;
if (!try_module_get(THIS_MODULE)) {
@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
goto out;
}
fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
if (!fcoe) {
FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
size);
if (!ctlr_dev) {
FCOE_DBG("Failed to add fcoe_ctlr_device\n");
fcoe = ERR_PTR(-ENOMEM);
goto out_putmod;
}
ctlr = fcoe_ctlr_device_priv(ctlr_dev);
fcoe = fcoe_ctlr_priv(ctlr);
dev_hold(netdev);
/*
* Initialize FIP.
*/
fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
fcoe->ctlr.send = fcoe_fip_send;
fcoe->ctlr.update_mac = fcoe_update_src_mac;
fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
fcoe_ctlr_init(ctlr, fip_mode);
ctlr->send = fcoe_fip_send;
ctlr->update_mac = fcoe_update_src_mac;
ctlr->get_src_addr = fcoe_get_src_mac;
err = fcoe_interface_setup(fcoe, netdev);
if (err) {
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
fcoe_ctlr_destroy(ctlr);
fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
fcoe = ERR_PTR(err);
goto out_putmod;
@ -419,7 +443,7 @@ out:
static void fcoe_interface_remove(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
rtnl_lock();
if (!fcoe->removed)
@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
/* Release the self-reference taken during fcoe_interface_create() */
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
scsi_host_put(fcoe->ctlr.lp->host);
kfree(fcoe);
scsi_host_put(fip->lp->host);
fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
}
@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
struct net_device *orig_dev)
{
struct fcoe_interface *fcoe;
struct fcoe_ctlr *ctlr;
fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
fcoe_ctlr_recv(&fcoe->ctlr, skb);
ctlr = fcoe_to_ctlr(fcoe);
fcoe_ctlr_recv(ctlr, skb);
return 0;
}
@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
u32 mfs;
u64 wwnn, wwpn;
struct fcoe_interface *fcoe;
struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
fcoe = port->priv;
ctlr = fcoe_to_ctlr(fcoe);
/*
* Determine max frame size based on underlying device and optional
@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
2, 0);
fc_set_wwpn(lport, wwpn);
}
@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct net_device *netdev = fcoe->netdev;
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
/* Initialize the library */
rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
"interface\n");
@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
{
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct fcoe_percpu_s *fps;
@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
unsigned int cpu;
fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
lport = fcoe->ctlr.lp;
ctlr = fcoe_to_ctlr(fcoe);
lport = ctlr->lp;
if (unlikely(!lport)) {
FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
goto err2;
@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
eh = eth_hdr(skb);
if (is_fip_mode(&fcoe->ctlr) &&
compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
if (is_fip_mode(ctlr) &&
compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
eh->h_source);
goto err;
@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
u8 sof, eof;
struct fcoe_hdr *hp;
@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
fcoe_ctlr_els_send(ctlr, lport, skb))
return 0;
sof = fr_sof(fp);
@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
if (fcoe->ctlr.map_dest)
memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
if (ctlr->map_dest)
memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fc_frame *fp)
{
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
ctlr = fcoe_to_ctlr(fcoe);
if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
return -EINVAL;
@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct dcb_app_type *entry = ptr;
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct net_device *netdev;
struct fcoe_port *port;
@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
if (!fcoe)
return NOTIFY_OK;
ctlr = fcoe_to_ctlr(fcoe);
if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
prio = ffs(entry->app.priority) - 1;
else
@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
if (entry->app.protocol == ETH_P_FIP ||
entry->app.protocol == ETH_P_FCOE)
fcoe->ctlr.priority = prio;
ctlr->priority = prio;
if (entry->app.protocol == ETH_P_FCOE) {
port = lport_priv(fcoe->ctlr.lp);
port = lport_priv(ctlr->lp);
port->priority = prio;
}
@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
{
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fcoe_port *port;
struct fcoe_dev_stats *stats;
@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_for_each_entry(fcoe, &fcoe_hostlist, list) {
if (fcoe->netdev == netdev) {
lport = fcoe->ctlr.lp;
ctlr = fcoe_to_ctlr(fcoe);
lport = ctlr->lp;
break;
}
}
@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
break;
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
port = lport_priv(ctlr->lp);
queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
fcoe_link_speed_update(lport);
if (link_possible && !fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
fcoe_ctlr_link_up(ctlr);
else if (fcoe_ctlr_link_down(ctlr)) {
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->LinkFailureCount++;
put_cpu();
@ -2003,6 +2043,7 @@ out:
*/
static int fcoe_disable(struct net_device *netdev)
{
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int rc = 0;
@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
rtnl_unlock();
if (fcoe) {
fcoe_ctlr_link_down(&fcoe->ctlr);
fcoe_clean_pending_queue(fcoe->ctlr.lp);
ctlr = fcoe_to_ctlr(fcoe);
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
} else
rc = -ENODEV;
@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
*/
static int fcoe_enable(struct net_device *netdev)
{
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int rc = 0;
@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
if (!fcoe)
if (!fcoe) {
rc = -ENODEV;
else if (!fcoe_link_ok(fcoe->ctlr.lp))
fcoe_ctlr_link_up(&fcoe->ctlr);
goto out;
}
ctlr = fcoe_to_ctlr(fcoe);
if (!fcoe_link_ok(ctlr->lp))
fcoe_ctlr_link_up(ctlr);
out:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
*/
static int fcoe_destroy(struct net_device *netdev)
{
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
struct fcoe_port *port;
@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
rc = -ENODEV;
goto out_nodev;
}
lport = fcoe->ctlr.lp;
ctlr = fcoe_to_ctlr(fcoe);
lport = ctlr->lp;
port = lport_priv(lport);
list_del(&fcoe->list);
queue_work(fcoe_wq, &port->destroy_work);
@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
int dcbx;
u8 fup, up;
struct net_device *netdev = fcoe->realdev;
struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct fcoe_port *port = lport_priv(ctlr->lp);
struct dcb_app app = {
.priority = 0,
.protocol = ETH_P_FCOE
@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
}
port->priority = ffs(up) ? ffs(up) - 1 : 0;
fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
}
#endif
}
@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
int rc = 0;
struct fcoe_ctlr_device *ctlr_dev;
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
goto out_nodev;
}
lport = fcoe_if_create(fcoe, &netdev->dev, 0);
ctlr = fcoe_to_ctlr(fcoe);
ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
if (IS_ERR(lport)) {
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
}
/* Make this the "master" N_Port */
fcoe->ctlr.lp = lport;
ctlr->lp = lport;
/* setup DCB priority attributes. */
fcoe_dcb_create(fcoe);
@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
fc_fabric_login(lport);
if (!fcoe_link_ok(lport)) {
rtnl_unlock();
fcoe_ctlr_link_up(&fcoe->ctlr);
fcoe_ctlr_link_up(ctlr);
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
fcoe_ctlr_link_down(&fcoe->ctlr);
fcoe_clean_pending_queue(fcoe->ctlr.lp);
if (!fcoe_link_ok(fcoe->ctlr.lp))
fcoe_ctlr_link_up(&fcoe->ctlr);
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
if (!fcoe_link_ok(ctlr->lp))
fcoe_ctlr_link_up(ctlr);
return 0;
}
@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
*/
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
{
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
fcoe = fcoe_hostlist_lookup_port(netdev);
return (fcoe) ? fcoe->ctlr.lp : NULL;
ctlr = fcoe_to_ctlr(fcoe);
return (fcoe) ? ctlr->lp : NULL;
}
/**
@ -2466,6 +2525,7 @@ module_init(fcoe_init);
static void __exit fcoe_exit(void)
{
struct fcoe_interface *fcoe, *tmp;
struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
unsigned int cpu;
@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
ctlr = fcoe_to_ctlr(fcoe);
port = lport_priv(ctlr->lp);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
__fcoe_get_lesb(lport, fc_lesb, netdev);
}
static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
{
struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
struct net_device *netdev = fcoe_netdev(fip->lp);
struct fcoe_fc_els_lesb *fcoe_lesb;
struct fc_els_lesb fc_lesb;
__fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
ctlr_dev->lesb.lesb_link_fail =
ntohl(fcoe_lesb->lesb_link_fail);
ctlr_dev->lesb.lesb_vlink_fail =
ntohl(fcoe_lesb->lesb_vlink_fail);
ctlr_dev->lesb.lesb_miss_fka =
ntohl(fcoe_lesb->lesb_miss_fka);
ctlr_dev->lesb.lesb_symb_err =
ntohl(fcoe_lesb->lesb_symb_err);
ctlr_dev->lesb.lesb_err_block =
ntohl(fcoe_lesb->lesb_err_block);
ctlr_dev->lesb.lesb_fcs_error =
ntohl(fcoe_lesb->lesb_fcs_error);
}
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
}
/**
* fcoe_set_port_id() - Callback from libfc when Port_ID is set.
* @lport: the local port
@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
fcoe_ctlr_recv_flogi(ctlr, lport, fp);
}

View File

@ -68,7 +68,6 @@ do { \
* @netdev: The associated net device
* @fcoe_packet_type: FCoE packet type
* @fip_packet_type: FIP packet type
* @ctlr: The FCoE controller (for FIP)
* @oem: The offload exchange manager for all local port
* instances associated with this port
* @removed: Indicates fcoe interface removed from net device
@ -80,12 +79,15 @@ struct fcoe_interface {
struct net_device *realdev;
struct packet_type fcoe_packet_type;
struct packet_type fip_packet_type;
struct fcoe_ctlr ctlr;
struct fc_exch_mgr *oem;
u8 removed;
};
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
#define fcoe_to_ctlr(x) \
(struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
#define fcoe_from_ctlr(x) \
((struct fcoe_interface *)((x) + 1))
/**
* fcoe_netdev() - Return the net device associated with a local port

View File

@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
}
EXPORT_SYMBOL(fcoe_ctlr_init);
static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
{
struct fcoe_ctlr *fip = new->fip;
struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
struct fcoe_fcf_device temp, *fcf_dev;
int rc = 0;
LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
new->fabric_name, new->fcf_mac);
mutex_lock(&ctlr_dev->lock);
temp.fabric_name = new->fabric_name;
temp.switch_name = new->switch_name;
temp.fc_map = new->fc_map;
temp.vfid = new->vfid;
memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
temp.priority = new->pri;
temp.fka_period = new->fka_period;
temp.selected = 0; /* default to unselected */
fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
if (unlikely(!fcf_dev)) {
rc = -ENOMEM;
goto out;
}
/*
* The fcoe_sysfs layer can return a CONNECTED fcf that
* has a priv (fcf was never deleted) or a CONNECTED fcf
* that doesn't have a priv (fcf was deleted). However,
* libfcoe will always delete FCFs before trying to add
* them. This is ensured because both recv_adv and
* age_fcfs are protected by the the fcoe_ctlr's mutex.
* This means that we should never get a FCF with a
* non-NULL priv pointer.
*/
BUG_ON(fcf_dev->priv);
fcf_dev->priv = new;
new->fcf_dev = fcf_dev;
list_add(&new->list, &fip->fcfs);
fip->fcf_count++;
out:
mutex_unlock(&ctlr_dev->lock);
return rc;
}
static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
{
struct fcoe_ctlr *fip = new->fip;
struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
struct fcoe_fcf_device *fcf_dev;
list_del(&new->list);
fip->fcf_count--;
mutex_lock(&ctlr_dev->lock);
fcf_dev = fcoe_fcf_to_fcf_dev(new);
WARN_ON(!fcf_dev);
new->fcf_dev = NULL;
fcoe_fcf_device_delete(fcf_dev);
kfree(new);
mutex_unlock(&ctlr_dev->lock);
}
/**
* fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
* @fip: The FCoE controller whose FCFs are to be reset
@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
fip->sel_fcf = NULL;
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
list_del(&fcf->list);
kfree(fcf);
fcoe_sysfs_fcf_del(fcf);
}
fip->fcf_count = 0;
WARN_ON(fip->fcf_count);
fip->sel_time = 0;
}
@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
unsigned long deadline;
unsigned long sel_time = 0;
struct list_head del_list;
struct fcoe_dev_stats *stats;
INIT_LIST_HEAD(&del_list);
stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
if (time_after_eq(jiffies, deadline)) {
if (fip->sel_fcf == fcf)
fip->sel_fcf = NULL;
/*
* Move to delete list so we can call
* fcoe_sysfs_fcf_del (which can sleep)
* after the put_cpu().
*/
list_del(&fcf->list);
WARN_ON(!fip->fcf_count);
fip->fcf_count--;
kfree(fcf);
list_add(&fcf->list, &del_list);
stats->VLinkFailureCount++;
} else {
if (time_after(next_timer, deadline))
@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
}
}
put_cpu();
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
fcoe_sysfs_fcf_del(fcf);
}
if (sel_time && !fip->sel_fcf && !fip->sel_time) {
sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
fip->sel_time = sel_time;
@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fcoe_fcf *fcf;
struct fcoe_fcf new;
struct fcoe_fcf *found;
unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
int first = 0;
int mtu_valid;
int found = 0;
int rc = 0;
if (fcoe_ctlr_parse_adv(fip, skb, &new))
return;
mutex_lock(&fip->ctlr_mutex);
first = list_empty(&fip->fcfs);
found = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
if (fcf->switch_name == new.switch_name &&
fcf->fabric_name == new.fabric_name &&
fcf->fc_map == new.fc_map &&
compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
found = fcf;
found = 1;
break;
}
}
@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (!fcf)
goto out;
fip->fcf_count++;
memcpy(fcf, &new, sizeof(new));
list_add(&fcf->list, &fip->fcfs);
fcf->fip = fip;
rc = fcoe_sysfs_fcf_add(fcf);
if (rc) {
printk(KERN_ERR "Failed to allocate sysfs instance "
"for FCF, fab %16.16llx mac %pM\n",
new.fabric_name, new.fcf_mac);
kfree(fcf);
goto out;
}
} else {
/*
* Update the FCF's keep-alive descriptor flags.
@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
fcf->fka_period = new.fka_period;
memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
}
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
if (!found)
@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
time_before(fip->sel_time, fip->timer.expires))
mod_timer(&fip->timer, fip->sel_time);
}
out:
mutex_unlock(&fip->ctlr_mutex);
}
@ -2718,9 +2809,9 @@ unlock:
/**
* fcoe_libfc_config() - Sets up libfc related properties for local port
* @lp: The local port to configure libfc for
* @fip: The FCoE controller in use by the local port
* @tt: The libfc function template
* @lport: The local port to configure libfc for
* @fip: The FCoE controller in use by the local port
* @tt: The libfc function template
* @init_fcp: If non-zero, the FCP portion of libfc should be initialized
*
* Returns : 0 for success
@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_libfc_config);
void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
struct fcoe_fcf *fcf;
mutex_lock(&fip->ctlr_mutex);
mutex_lock(&ctlr_dev->lock);
fcf = fcoe_fcf_device_priv(fcf_dev);
if (fcf)
fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
else
fcf_dev->selected = 0;
mutex_unlock(&ctlr_dev->lock);
mutex_unlock(&fip->ctlr_mutex);
}
EXPORT_SYMBOL(fcoe_fcf_get_selected);
void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
{
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
mutex_lock(&ctlr->ctlr_mutex);
switch (ctlr->mode) {
case FIP_MODE_FABRIC:
ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
break;
case FIP_MODE_VN2VN:
ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
break;
default:
ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
break;
}
mutex_unlock(&ctlr->ctlr_mutex);
}
EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);

View File

@ -0,0 +1,832 @@
/*
* Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <scsi/fcoe_sysfs.h>
static atomic_t ctlr_num;
static atomic_t fcf_num;
/*
* fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
* should insulate the loss of a fcf.
*/
static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fcf_dev_loss_tmo,
"Maximum number of seconds that libfcoe should"
" insulate the loss of a fcf. Once this value is"
" exceeded, the fcf is removed.");
/*
* These are used by the fcoe_*_show_function routines, they
* are intentionally placed in the .c file as they're not intended
* for use throughout the code.
*/
#define fcoe_ctlr_id(x) \
((x)->id)
#define fcoe_ctlr_work_q_name(x) \
((x)->work_q_name)
#define fcoe_ctlr_work_q(x) \
((x)->work_q)
#define fcoe_ctlr_devloss_work_q_name(x) \
((x)->devloss_work_q_name)
#define fcoe_ctlr_devloss_work_q(x) \
((x)->devloss_work_q)
#define fcoe_ctlr_mode(x) \
((x)->mode)
#define fcoe_ctlr_fcf_dev_loss_tmo(x) \
((x)->fcf_dev_loss_tmo)
#define fcoe_ctlr_link_fail(x) \
((x)->lesb.lesb_link_fail)
#define fcoe_ctlr_vlink_fail(x) \
((x)->lesb.lesb_vlink_fail)
#define fcoe_ctlr_miss_fka(x) \
((x)->lesb.lesb_miss_fka)
#define fcoe_ctlr_symb_err(x) \
((x)->lesb.lesb_symb_err)
#define fcoe_ctlr_err_block(x) \
((x)->lesb.lesb_err_block)
#define fcoe_ctlr_fcs_error(x) \
((x)->lesb.lesb_fcs_error)
#define fcoe_fcf_state(x) \
((x)->state)
#define fcoe_fcf_fabric_name(x) \
((x)->fabric_name)
#define fcoe_fcf_switch_name(x) \
((x)->switch_name)
#define fcoe_fcf_fc_map(x) \
((x)->fc_map)
#define fcoe_fcf_vfid(x) \
((x)->vfid)
#define fcoe_fcf_mac(x) \
((x)->mac)
#define fcoe_fcf_priority(x) \
((x)->priority)
#define fcoe_fcf_fka_period(x) \
((x)->fka_period)
#define fcoe_fcf_dev_loss_tmo(x) \
((x)->dev_loss_tmo)
#define fcoe_fcf_selected(x) \
((x)->selected)
#define fcoe_fcf_vlan_id(x) \
((x)->vlan_id)
/*
* dev_loss_tmo attribute
*/
static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
{
int ret;
ret = kstrtoul(buf, 0, val);
if (ret || *val < 0)
return -EINVAL;
/*
* Check for overflow; dev_loss_tmo is u32
*/
if (*val > UINT_MAX)
return -EINVAL;
return 0;
}
static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
unsigned long val)
{
if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
(fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
(fcf->state == FCOE_FCF_STATE_DELETED))
return -EBUSY;
/*
* Check for overflow; dev_loss_tmo is u32
*/
if (val > UINT_MAX)
return -EINVAL;
fcoe_fcf_dev_loss_tmo(fcf) = val;
return 0;
}
#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
__ATTR(_name, _mode, _show, _store)
#define fcoe_ctlr_show_function(field, format_string, sz, cast) \
static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
if (ctlr->f->get_fcoe_ctlr_##field) \
ctlr->f->get_fcoe_ctlr_##field(ctlr); \
return snprintf(buf, sz, format_string, \
cast fcoe_ctlr_##field(ctlr)); \
}
#define fcoe_fcf_show_function(field, format_string, sz, cast) \
static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
if (ctlr->f->get_fcoe_fcf_##field) \
ctlr->f->get_fcoe_fcf_##field(fcf); \
return snprintf(buf, sz, format_string, \
cast fcoe_fcf_##field(fcf)); \
}
#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
}
#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
}
#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
fcoe_ctlr_private_show_function(field, format_string, sz, ) \
static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
show_fcoe_ctlr_device_##field, NULL)
#define fcoe_ctlr_rd_attr(field, format_string, sz) \
fcoe_ctlr_show_function(field, format_string, sz, ) \
static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
show_fcoe_ctlr_device_##field, NULL)
#define fcoe_fcf_rd_attr(field, format_string, sz) \
fcoe_fcf_show_function(field, format_string, sz, ) \
static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
show_fcoe_fcf_device_##field, NULL)
#define fcoe_fcf_private_rd_attr(field, format_string, sz) \
fcoe_fcf_private_show_function(field, format_string, sz, ) \
static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
show_fcoe_fcf_device_##field, NULL)
#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
show_fcoe_ctlr_device_##field, NULL)
#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
show_fcoe_fcf_device_##field, NULL)
#define fcoe_enum_name_search(title, table_type, table) \
static const char *get_fcoe_##title##_name(enum table_type table_key) \
{ \
int i; \
char *name = NULL; \
\
for (i = 0; i < ARRAY_SIZE(table); i++) { \
if (table[i].value == table_key) { \
name = table[i].name; \
break; \
} \
} \
return name; \
}
static struct {
enum fcf_state value;
char *name;
} fcf_state_names[] = {
{ FCOE_FCF_STATE_UNKNOWN, "Unknown" },
{ FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
{ FCOE_FCF_STATE_CONNECTED, "Connected" },
};
fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
#define FCOE_FCF_STATE_MAX_NAMELEN 50
static ssize_t show_fcf_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
const char *name;
name = get_fcoe_fcf_state_name(fcf->state);
if (!name)
return -EINVAL;
return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
}
static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
static struct {
enum fip_conn_type value;
char *name;
} fip_conn_type_names[] = {
{ FIP_CONN_TYPE_UNKNOWN, "Unknown" },
{ FIP_CONN_TYPE_FABRIC, "Fabric" },
{ FIP_CONN_TYPE_VN2VN, "VN2VN" },
};
fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
#define FCOE_CTLR_MODE_MAX_NAMELEN 50
static ssize_t show_ctlr_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
const char *name;
if (ctlr->f->get_fcoe_ctlr_mode)
ctlr->f->get_fcoe_ctlr_mode(ctlr);
name = get_fcoe_ctlr_mode_name(ctlr->mode);
if (!name)
return -EINVAL;
return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
"%s\n", name);
}
static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
show_ctlr_mode, NULL);
static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
struct fcoe_fcf_device *fcf;
unsigned long val;
int rc;
rc = fcoe_str_to_dev_loss(buf, &val);
if (rc)
return rc;
fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
mutex_lock(&ctlr->lock);
list_for_each_entry(fcf, &ctlr->fcfs, peers)
fcoe_fcf_set_dev_loss_tmo(fcf, val);
mutex_unlock(&ctlr->lock);
return count;
}
fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
show_fcoe_ctlr_device_fcf_dev_loss_tmo,
store_private_fcoe_ctlr_fcf_dev_loss_tmo);
/* Link Error Status Block (LESB) */
fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
fcoe_fcf_rd_attr(selected, "%u\n", 20);
fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
static ssize_t
store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
unsigned long val;
int rc;
rc = fcoe_str_to_dev_loss(buf, &val);
if (rc)
return rc;
rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
if (rc)
return rc;
return count;
}
static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
show_fcoe_fcf_device_dev_loss_tmo,
store_fcoe_fcf_dev_loss_tmo);
static struct attribute *fcoe_ctlr_lesb_attrs[] = {
&device_attr_fcoe_ctlr_link_fail.attr,
&device_attr_fcoe_ctlr_vlink_fail.attr,
&device_attr_fcoe_ctlr_miss_fka.attr,
&device_attr_fcoe_ctlr_symb_err.attr,
&device_attr_fcoe_ctlr_err_block.attr,
&device_attr_fcoe_ctlr_fcs_error.attr,
NULL,
};
static struct attribute_group fcoe_ctlr_lesb_attr_group = {
.name = "lesb",
.attrs = fcoe_ctlr_lesb_attrs,
};
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
};
static struct attribute_group fcoe_ctlr_attr_group = {
.attrs = fcoe_ctlr_attrs,
};
static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
&fcoe_ctlr_attr_group,
&fcoe_ctlr_lesb_attr_group,
NULL,
};
static struct attribute *fcoe_fcf_attrs[] = {
&device_attr_fcoe_fcf_fabric_name.attr,
&device_attr_fcoe_fcf_switch_name.attr,
&device_attr_fcoe_fcf_dev_loss_tmo.attr,
&device_attr_fcoe_fcf_fc_map.attr,
&device_attr_fcoe_fcf_vfid.attr,
&device_attr_fcoe_fcf_mac.attr,
&device_attr_fcoe_fcf_priority.attr,
&device_attr_fcoe_fcf_fka_period.attr,
&device_attr_fcoe_fcf_state.attr,
&device_attr_fcoe_fcf_selected.attr,
&device_attr_fcoe_fcf_vlan_id.attr,
NULL
};
static struct attribute_group fcoe_fcf_attr_group = {
.attrs = fcoe_fcf_attrs,
};
static const struct attribute_group *fcoe_fcf_attr_groups[] = {
&fcoe_fcf_attr_group,
NULL,
};
struct bus_type fcoe_bus_type;
static int fcoe_bus_match(struct device *dev,
struct device_driver *drv)
{
if (dev->bus == &fcoe_bus_type)
return 1;
return 0;
}
/**
* fcoe_ctlr_device_release() - Release the FIP ctlr memory
* @dev: Pointer to the FIP ctlr's embedded device
*
* Called when the last FIP ctlr reference is released.
*/
static void fcoe_ctlr_device_release(struct device *dev)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
kfree(ctlr);
}
/**
* fcoe_fcf_device_release() - Release the FIP fcf memory
* @dev: Pointer to the fcf's embedded device
*
* Called when the last FIP fcf reference is released.
*/
static void fcoe_fcf_device_release(struct device *dev)
{
struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
kfree(fcf);
}
struct device_type fcoe_ctlr_device_type = {
.name = "fcoe_ctlr",
.groups = fcoe_ctlr_attr_groups,
.release = fcoe_ctlr_device_release,
};
struct device_type fcoe_fcf_device_type = {
.name = "fcoe_fcf",
.groups = fcoe_fcf_attr_groups,
.release = fcoe_fcf_device_release,
};
struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
};
/**
* fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
* @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
*/
void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
{
if (!fcoe_ctlr_work_q(ctlr)) {
printk(KERN_ERR
"ERROR: FIP Ctlr '%d' attempted to flush work, "
"when no workqueue created.\n", ctlr->id);
dump_stack();
return;
}
flush_workqueue(fcoe_ctlr_work_q(ctlr));
}
/**
* fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
* @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
* @work: Work to queue for execution
*
* Return value:
* 1 on success / 0 already queued / < 0 for error
*/
int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
struct work_struct *work)
{
if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
printk(KERN_ERR
"ERROR: FIP Ctlr '%d' attempted to queue work, "
"when no workqueue created.\n", ctlr->id);
dump_stack();
return -EINVAL;
}
return queue_work(fcoe_ctlr_work_q(ctlr), work);
}
/**
* fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
* @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
*/
void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
{
if (!fcoe_ctlr_devloss_work_q(ctlr)) {
printk(KERN_ERR
"ERROR: FIP Ctlr '%d' attempted to flush work, "
"when no workqueue created.\n", ctlr->id);
dump_stack();
return;
}
flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
}
/**
* fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
* @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
* @work: Work to queue for execution
* @delay: jiffies to delay the work queuing
*
* Return value:
* 1 on success / 0 already queued / < 0 for error
*/
int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
struct delayed_work *work,
unsigned long delay)
{
if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
printk(KERN_ERR
"ERROR: FIP Ctlr '%d' attempted to queue work, "
"when no workqueue created.\n", ctlr->id);
dump_stack();
return -EINVAL;
}
return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
}
static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
struct fcoe_fcf_device *old)
{
if (new->switch_name == old->switch_name &&
new->fabric_name == old->fabric_name &&
new->fc_map == old->fc_map &&
compare_ether_addr(new->mac, old->mac) == 0)
return 1;
return 0;
}
/**
* fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
* @parent: The parent device to which the fcoe_ctlr instance
* should be attached
* @f: The LLD's FCoE sysfs function template pointer
* @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
*
* This routine allocates a FIP ctlr object with some additional memory
* for the LLD. The FIP ctlr is initialized, added to sysfs and then
* attributes are added to it.
*/
struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
struct fcoe_sysfs_function_template *f,
int priv_size)
{
struct fcoe_ctlr_device *ctlr;
int error = 0;
ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
GFP_KERNEL);
if (!ctlr)
goto out;
ctlr->id = atomic_inc_return(&ctlr_num) - 1;
ctlr->f = f;
INIT_LIST_HEAD(&ctlr->fcfs);
mutex_init(&ctlr->lock);
ctlr->dev.parent = parent;
ctlr->dev.bus = &fcoe_bus_type;
ctlr->dev.type = &fcoe_ctlr_device_type;
ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
"ctlr_wq_%d", ctlr->id);
ctlr->work_q = create_singlethread_workqueue(
ctlr->work_q_name);
if (!ctlr->work_q)
goto out_del;
snprintf(ctlr->devloss_work_q_name,
sizeof(ctlr->devloss_work_q_name),
"ctlr_dl_wq_%d", ctlr->id);
ctlr->devloss_work_q = create_singlethread_workqueue(
ctlr->devloss_work_q_name);
if (!ctlr->devloss_work_q)
goto out_del_q;
dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
error = device_register(&ctlr->dev);
if (error)
goto out_del_q2;
return ctlr;
out_del_q2:
destroy_workqueue(ctlr->devloss_work_q);
ctlr->devloss_work_q = NULL;
out_del_q:
destroy_workqueue(ctlr->work_q);
ctlr->work_q = NULL;
out_del:
kfree(ctlr);
out:
return NULL;
}
EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
/**
* fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
* @ctlr: A pointer to the ctlr to be deleted
*
* Deletes a FIP ctlr and any fcfs attached
* to it. Deleting fcfs will cause their childen
* to be deleted as well.
*
* The ctlr is detached from sysfs and it's resources
* are freed (work q), but the memory is not freed
* until its last reference is released.
*
* This routine expects no locks to be held before
* calling.
*
* TODO: Currently there are no callbacks to clean up LLD data
* for a fcoe_fcf_device. LLDs must keep this in mind as they need
* to clean up each of their LLD data for all fcoe_fcf_device before
* calling fcoe_ctlr_device_delete.
*/
void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
{
struct fcoe_fcf_device *fcf, *next;
/* Remove any attached fcfs */
mutex_lock(&ctlr->lock);
list_for_each_entry_safe(fcf, next,
&ctlr->fcfs, peers) {
list_del(&fcf->peers);
fcf->state = FCOE_FCF_STATE_DELETED;
fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
}
mutex_unlock(&ctlr->lock);
fcoe_ctlr_device_flush_work(ctlr);
destroy_workqueue(ctlr->devloss_work_q);
ctlr->devloss_work_q = NULL;
destroy_workqueue(ctlr->work_q);
ctlr->work_q = NULL;
device_unregister(&ctlr->dev);
}
EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
/**
* fcoe_fcf_device_final_delete() - Final delete routine
* @work: The FIP fcf's embedded work struct
*
* It is expected that the fcf has been removed from
* the FIP ctlr's list before calling this routine.
*/
static void fcoe_fcf_device_final_delete(struct work_struct *work)
{
struct fcoe_fcf_device *fcf =
container_of(work, struct fcoe_fcf_device, delete_work);
struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
/*
* Cancel any outstanding timers. These should really exist
* only when rmmod'ing the LLDD and we're asking for
* immediate termination of the rports
*/
if (!cancel_delayed_work(&fcf->dev_loss_work))
fcoe_ctlr_device_flush_devloss(ctlr);
device_unregister(&fcf->dev);
}
/**
* fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
* @work: The FIP fcf's embedded work struct
*
* Removes the fcf from the FIP ctlr's list of fcfs and
* queues the final deletion.
*/
static void fip_timeout_deleted_fcf(struct work_struct *work)
{
struct fcoe_fcf_device *fcf =
container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
mutex_lock(&ctlr->lock);
/*
* If the fcf is deleted or reconnected before the timer
* fires the devloss queue will be flushed, but the state will
* either be CONNECTED or DELETED. If that is the case we
* cancel deleting the fcf.
*/
if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
goto out;
dev_printk(KERN_ERR, &fcf->dev,
"FIP fcf connection time out: removing fcf\n");
list_del(&fcf->peers);
fcf->state = FCOE_FCF_STATE_DELETED;
fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
out:
mutex_unlock(&ctlr->lock);
}
/**
* fcoe_fcf_device_delete() - Delete a FIP fcf
* @fcf: Pointer to the fcf which is to be deleted
*
* Queues the FIP fcf on the devloss workqueue
*
* Expects the ctlr_attrs mutex to be held for fcf
* state change.
*/
void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
{
struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
int timeout = fcf->dev_loss_tmo;
if (fcf->state != FCOE_FCF_STATE_CONNECTED)
return;
fcf->state = FCOE_FCF_STATE_DISCONNECTED;
/*
* FCF will only be re-connected by the LLD calling
* fcoe_fcf_device_add, and it should be setting up
* priv then.
*/
fcf->priv = NULL;
fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
timeout * HZ);
}
EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
/**
* fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
* @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
* @new_fcf: A temporary FCF used for lookups on the current list of fcfs
*
* Expects to be called with the ctlr->lock held
*/
struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
struct fcoe_fcf_device *new_fcf)
{
struct fcoe_fcf_device *fcf;
int error = 0;
list_for_each_entry(fcf, &ctlr->fcfs, peers) {
if (fcoe_fcf_device_match(new_fcf, fcf)) {
if (fcf->state == FCOE_FCF_STATE_CONNECTED)
return fcf;
fcf->state = FCOE_FCF_STATE_CONNECTED;
if (!cancel_delayed_work(&fcf->dev_loss_work))
fcoe_ctlr_device_flush_devloss(ctlr);
return fcf;
}
}
fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
if (unlikely(!fcf))
goto out;
INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
fcf->dev.parent = &ctlr->dev;
fcf->dev.bus = &fcoe_bus_type;
fcf->dev.type = &fcoe_fcf_device_type;
fcf->id = atomic_inc_return(&fcf_num) - 1;
fcf->state = FCOE_FCF_STATE_UNKNOWN;
fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
fcf->fabric_name = new_fcf->fabric_name;
fcf->switch_name = new_fcf->switch_name;
fcf->fc_map = new_fcf->fc_map;
fcf->vfid = new_fcf->vfid;
memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
fcf->priority = new_fcf->priority;
fcf->fka_period = new_fcf->fka_period;
fcf->selected = new_fcf->selected;
error = device_register(&fcf->dev);
if (error)
goto out_del;
fcf->state = FCOE_FCF_STATE_CONNECTED;
list_add_tail(&fcf->peers, &ctlr->fcfs);
return fcf;
out_del:
kfree(fcf);
out:
return NULL;
}
EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
int __init fcoe_sysfs_setup(void)
{
int error;
atomic_set(&ctlr_num, 0);
atomic_set(&fcf_num, 0);
error = bus_register(&fcoe_bus_type);
if (error)
return error;
return 0;
}
void __exit fcoe_sysfs_teardown(void)
{
bus_unregister(&fcoe_bus_type);
}

View File

@ -815,9 +815,17 @@ out_nodev:
*/
static int __init libfcoe_init(void)
{
fcoe_transport_init();
int rc = 0;
return 0;
rc = fcoe_transport_init();
if (rc)
return rc;
rc = fcoe_sysfs_setup();
if (rc)
fcoe_transport_exit();
return rc;
}
module_init(libfcoe_init);
@ -826,6 +834,7 @@ module_init(libfcoe_init);
*/
static void __exit libfcoe_exit(void)
{
fcoe_sysfs_teardown();
fcoe_transport_exit();
}
module_exit(libfcoe_exit);

View File

@ -25,3 +25,12 @@ config SCSI_QLA_FC
Firmware images can be retrieved from:
ftp://ftp.qlogic.com/outgoing/linux/firmware/
config TCM_QLA2XXX
tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
depends on SCSI_QLA_FC && TARGET_CORE
select LIBFC
select BTREE
default n
---help---
Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs

View File

@ -1,5 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
qla_nx.o
qla_nx.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o

View File

@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
#include <linux/kthread.h>
#include <linux/vmalloc.h>
@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_block_requests(vha->host);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_no_md_cap = 1;
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
if (!IS_QLA81XX(ha))
if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d\n",
ha->qla_stats.total_isp_aborts);
vha->qla_stats.total_isp_aborts);
}
static ssize_t
@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
dma_addr_t stats_dma;
struct fc_host_statistics *pfc_host_stat;
pfc_host_stat = &ha->fc_host_stat;
pfc_host_stat = &vha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
if (test_bit(UNLOADING, &vha->dpc_flags))
@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->dumped_frames = stats->dumped_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
}
pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_host_supported_speeds(vha->host) =
fc_host_supported_speeds(base_vha->host);
qlt_vport_create(vha, ha);
qla24xx_vport_disable(fc_vport, disable);
if (ha->flags.cpu_affinity_enabled) {
@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
if (IS_CNA_CAPABLE(ha))
speed = FC_PORTSPEED_10GBIT;
else if (IS_QLA2031(ha))
speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
FC_PORTSPEED_4GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;

View File

@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa =
bsg_job->request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
int rval = 0;
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_set_internal;
new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
if ((ha->current_topology == ISP_CFG_F ||
(atomic_read(&vha->loop_state) == LOOP_DOWN) ||
((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
if (rval)
return rval;
/* Set the isp82xx_no_md_cap not to capture minidump */
ha->flags.isp82xx_no_md_cap = 1;
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);

View File

@ -11,27 +11,31 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x113e | 0x112c-0x112e |
* | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x1140 | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2086 | 0x2020-0x2022 |
* | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401c | |
* | Async Events | 0x505d | 0x502b-0x502f |
* | DPC Thread | 0x401c | 0x4002,0x4013 |
* | Async Events | 0x505f | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | 0x600e-0x600f |
* | Timer Routines | 0x6011 | |
* | User Space Interactions | 0x709f | 0x7018,0x702e, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x900f | |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
* | ISP82XX Specific | 0xb054 | 0xb053 |
* | ISP82XX Specific | 0xb054 | 0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
* | Target Mode | 0xe06f | |
* | Target Mode Management | 0xf071 | |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
@ -378,6 +382,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
return (char *)iter_reg + ntohl(fcec->size);
}
static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
uint32_t **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
uint32_t num_queues;
int que;
struct {
int length;
void *ring;
} aq, *aqp;
if (!ha->tgt.atio_q_length)
return ptr;
num_queues = 1;
aqp = &aq;
aqp->length = ha->tgt.atio_q_length;
aqp->ring = ha->tgt.atio_ring;
for (que = 0; que < num_queues; que++) {
/* aqp = ha->atio_q_map[que]; */
q = ptr;
*last_chain = &q->type;
q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
q->chain_size = htonl(
sizeof(struct qla2xxx_mqueue_chain) +
sizeof(struct qla2xxx_mqueue_header) +
(aqp->length * sizeof(request_t)));
ptr += sizeof(struct qla2xxx_mqueue_chain);
/* Add header. */
qh = ptr;
qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
qh->number = htonl(que);
qh->size = htonl(aqp->length * sizeof(request_t));
ptr += sizeof(struct qla2xxx_mqueue_header);
/* Add data. */
memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
ptr += aqp->length * sizeof(request_t);
}
return ptr;
}
static inline void *
qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
struct qla24xx_fw_dump *fw;
uint32_t ext_mem_cnt;
void *nxt;
void *nxt_chain;
uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
if (IS_QLA82XX(ha))
@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_copy_eft(ha, nxt);
nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
}
/* Adjust valid length. */
ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@ -2218,6 +2284,7 @@ copy_queue:
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);

View File

@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
uint32_t queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
#define TYPE_ATIO_QUEUE 0x3
uint32_t number;
uint32_t size;
};
@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
* not covered by upper categories
*/
#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
* This is to be used with other levels where
* more verbosity is required. It might not
* be applicable to all the levels.
*/
#define ql_dbg_tgt 0x00004000 /* Target mode */
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */

View File

@ -186,6 +186,7 @@
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
struct req_que;
@ -1234,11 +1235,27 @@ typedef struct {
* ISP queue - response queue entry definition.
*/
typedef struct {
uint8_t data[60];
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
uint8_t data[52];
uint32_t signature;
#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
} response_t;
/*
* ISP queue - ATIO queue entry definition.
*/
struct atio {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t data[58];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
};
typedef union {
uint16_t extended;
struct {
@ -1719,11 +1736,13 @@ typedef struct fc_port {
struct fc_rport *rport, *drport;
u32 supported_classes;
uint16_t vp_idx;
uint8_t fc4_type;
uint8_t scan_state;
} fc_port_t;
#define QLA_FCPORT_SCAN_NONE 0
#define QLA_FCPORT_SCAN_FOUND 1
/*
* Fibre channel port/lun states.
*/
@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
#define FCF_LOGIN_NEEDED BIT_1
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@ -2419,6 +2439,40 @@ struct qlfc_fw {
uint32_t len;
};
struct qlt_hw_data {
/* Protected by hw lock */
uint32_t enable_class_2:1;
uint32_t enable_explicit_conf:1;
uint32_t ini_mode_force_reverse:1;
uint32_t node_name_set:1;
dma_addr_t atio_dma; /* Physical address. */
struct atio *atio_ring; /* Base virtual address */
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
void *target_lport_ptr;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt *qla_tgt;
struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
uint16_t current_handle;
struct qla_tgt_vp_map *tgt_vp_map;
struct mutex tgt_mutex;
struct mutex tgt_host_action_mutex;
int saved_set;
uint16_t saved_exchange_count;
uint32_t saved_firmware_options_1;
uint32_t saved_firmware_options_2;
uint32_t saved_firmware_options_3;
uint8_t saved_firmware_options[2];
uint8_t saved_add_firmware_options[2];
uint8_t tgt_node_name[WWN_SIZE];
};
/*
* Qlogic host adapter specific data structure.
*/
@ -2460,7 +2514,9 @@ struct qla_hw_data {
uint32_t thermal_supported:1;
uint32_t isp82xx_reset_hdlr_active:1;
uint32_t isp82xx_reset_owner:1;
/* 28 bits */
uint32_t isp82xx_no_md_cap:1;
uint32_t host_shutting_down:1;
/* 30 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@ -2804,7 +2860,6 @@ struct qla_hw_data {
/* ISP2322: red, green, amber. */
uint16_t zio_mode;
uint16_t zio_timer;
struct fc_host_statistics fc_host_stat;
struct qla_msix_entry *msix_entries;
@ -2817,7 +2872,6 @@ struct qla_hw_data {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
@ -2863,6 +2917,8 @@ struct qla_hw_data {
dma_addr_t md_tmplt_hdr_dma;
void *md_dump;
uint32_t md_dump_size;
struct qlt_hw_data tgt;
};
/*
@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
#define SCR_PENDING 21 /* SCR in target mode */
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
struct fc_host_statistics fc_host_stat;
struct qla_statistics qla_stats;
atomic_t vref_count;
} scsi_qla_host_t;
#define SET_VP_IDX 1
#define SET_AL_PA 2
#define RESET_VP_IDX 3
#define RESET_AL_PA 4
struct qla_tgt_vp_map {
uint8_t idx;
scsi_qla_host_t *vha;
};
/*
* Macros to help code, maintain, etc.
*/

View File

@ -175,6 +175,7 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/
extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
/*
* Global Function Prototypes in qla_mbx.c source file.
@ -238,6 +241,9 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *);
extern int
qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
extern int
qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
/*
* Global Function Prototypes in qla_sup.c source file.
*/
@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
extern void qla2x00_sp_timeout(unsigned long);
extern void qla2x00_bsg_job_done(void *, void *, int);
extern void qla2x00_bsg_sp_free(void *, void *);
extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);

View File

@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
ct_req->req.rff_id.fc4_feature = BIT_1;
qlt_rff_id(vha, ct_req);
ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
/* Execute MS IOCB */

View File

@ -17,6 +17,9 @@
#include <asm/prom.h>
#endif
#include <target/target_core_base.h>
#include "qla_target.h"
/*
* QLogic ISP2x00 Hardware Support Function Prototypes.
*/
@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
}
rval = qla2x00_init_rings(vha);
if (qla_ini_mode_enabled(vha))
rval = qla2x00_init_rings(vha);
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mq_size += ha->max_rsp_queues *
(rsp->length * sizeof(response_t));
}
if (ha->tgt.atio_q_length)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto try_eft;
@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
/* Setup ATIO queue dma pointers for target mode */
icb->atio_q_inpointer = __constant_cpu_to_le16(0);
icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
if (ha->mqenable || IS_QLA83XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
qlt_24xx_config_rings(vha, reg);
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
}
@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock(&ha->vport_slock);
ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
ha->tgt.atio_ring_index = 0;
/* Initialize ATIO queue entries */
qlt_init_atio_q_entries(vha);
ha->isp_ops->config_rings(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
vha->d_id.b.area = area;
vha->d_id.b.al_pa = al_pa;
spin_lock(&ha->vport_slock);
qlt_update_vp_map(vha, SET_AL_PA);
spin_unlock(&ha->vport_slock);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
"Topology - %s, Host Loop address 0x%x.\n",
@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x0064,
"Inconisistent NVRAM "
"Inconsistent NVRAM "
"detected: checksum=0x%x id=%c version=0x%x.\n",
chksum, nv->id[0], nv->nvram_version);
ql_log(ql_log_warn, vha, 0x0065,
@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (IS_QLA23XX(ha)) {
nv->firmware_options[0] |= BIT_2;
nv->firmware_options[0] &= ~BIT_3;
nv->firmware_options[0] &= ~BIT_6;
nv->special_options[0] &= ~BIT_6;
nv->add_firmware_options[1] |= BIT_5 | BIT_4;
if (IS_QLA2300(ha)) {
@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
scsi_qla_host_t *vha = fcport->vha;
unsigned long flags;
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
if (rport)
if (rport) {
fc_remote_port_delete(rport);
/*
* Release the target mode FC NEXUS in qla_target.c code
* if target mod is enabled.
*/
qlt_fc_port_deleted(vha, fcport);
}
}
/**
@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
/* Setup fcport template structure. */
fcport->vha = vha;
fcport->vp_idx = vha->vp_idx;
fcport->port_type = FCT_UNKNOWN;
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
new_fcport->vp_idx = vha->vp_idx;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x201a,
@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (!found) {
/* New device, add to fcports list. */
if (vha->vp_idx) {
new_fcport->vha = vha;
new_fcport->vp_idx = vha->vp_idx;
}
list_add_tail(&new_fcport->list, &vha->vp_fcports);
/* Allocate a new replacement fcport. */
@ -2800,8 +2827,6 @@ cleanup_allocation:
static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
#define LS_UNKNOWN 2
static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
char *link_speed;
int rval;
uint16_t mb[4];
@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]);
} else {
link_speed = link_speeds[LS_UNKNOWN];
if (fcport->fp_speed < 5)
link_speed = link_speeds[fcport->fp_speed];
else if (fcport->fp_speed == 0x13)
link_speed = link_speeds[5];
link_speed = qla2x00_get_link_speed_str(ha);
ql_dbg(ql_dbg_disc, vha, 0x2005,
"iIDMA adjusted to %s GB/s "
"on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port.\n");
return;
}
/*
* Create target mode FC NEXUS in qla_target.c if target mode is
* enabled..
*/
qlt_fc_port_added(vha, fcport);
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@ -2921,7 +2948,7 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
fc_port_t *fcport, *fcptemp;
fc_port_t *fcport;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
return rval;
break;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
#define QLA_FCPORT_SCAN 1
#define QLA_FCPORT_FOUND 2
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = QLA_FCPORT_SCAN;
}
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
/*
* Logout all previous fabric devices marked lost, except
* FCP2 devices.
*/
/* Add new ports to existing port list */
list_splice_tail_init(&new_fcports, &vha->vp_fcports);
/* Starting free loop ID. */
next_loopid = ha->min_external_loopid;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
if (fcport->scan_state == QLA_FCPORT_SCAN &&
/* Logout lost/gone fabric devices (non-FCP2) */
if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
}
}
}
/* Starting free loop ID. */
next_loopid = ha->min_external_loopid;
/*
* Scan through our port list and login entries that need to be
* logged in.
*/
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (atomic_read(&vha->loop_down_timer) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
(fcport->flags & FCF_LOGIN_NEEDED) == 0)
continue;
}
fcport->scan_state = QLA_FCPORT_SCAN_NONE;
if (fcport->loop_id == FC_NO_LOOP_ID) {
fcport->loop_id = next_loopid;
rval = qla2x00_find_new_loop_id(
base_vha, fcport);
if (rval != QLA_SUCCESS) {
/* Ran out of IDs to use */
break;
/* Login fabric devices that need a login */
if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
atomic_read(&vha->loop_down_timer) == 0) {
if (fcport->loop_id == FC_NO_LOOP_ID) {
fcport->loop_id = next_loopid;
rval = qla2x00_find_new_loop_id(
base_vha, fcport);
if (rval != QLA_SUCCESS) {
/* Ran out of IDs to use */
continue;
}
}
}
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
}
/* Exit if out of loop IDs. */
if (rval != QLA_SUCCESS) {
break;
}
/*
* Login and add the new devices to our port list.
*/
list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
if (atomic_read(&vha->loop_down_timer) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
/* Find a new loop ID to use. */
fcport->loop_id = next_loopid;
rval = qla2x00_find_new_loop_id(base_vha, fcport);
if (rval != QLA_SUCCESS) {
/* Ran out of IDs to use */
break;
}
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
if (vha->vp_idx) {
fcport->vha = vha;
fcport->vp_idx = vha->vp_idx;
}
list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
/* Free all new device structures not processed. */
list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
list_del(&fcport->list);
kfree(fcport);
}
if (rval) {
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
found++;
@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
if (mb[10] & BIT_1)
fcport->supported_classes |= FC_COS_CLASS3;
if (IS_FWI2_CAPABLE(ha)) {
if (mb[10] & BIT_7)
fcport->flags |=
FCF_CONF_COMP_SUPPORTED;
}
rval = QLA_SUCCESS;
break;
} else if (mb[0] == MBS_LOOP_ID_USED) {
@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ha->qla_stats.total_isp_aborts++;
vha->qla_stats.total_isp_aborts++;
ql_log(ql_log_info, vha, 0x00af,
"Performing ISP error recovery - ha=%p.\n", ha);
@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
unsigned long flags;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
vha->flags.online = 1;
/*
* Process any ATIO queue entries that came in
* while we weren't online.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
if (qla_tgt_mode_enabled(vha))
qlt_24xx_process_atio_queue(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x006b,
"Inconisistent NVRAM detected: checksum=0x%x id=%c "
"Inconsistent NVRAM detected: checksum=0x%x id=%c "
"version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
ql_log(ql_log_warn, vha, 0x006c,
"Falling back to functioning (yet invalid -- WWPN) "
@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
rval = 1;
}
if (!qla_ini_mode_enabled(vha)) {
/* Don't enable full login after initial LIP */
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
/* Don't enable LIP full login for initiator */
nv->host_p &= __constant_cpu_to_le32(~BIT_10);
}
qlt_24xx_config_nvram_stage1(vha, nv);
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLA2462");
/* Use alternate WWN? */
qlt_24xx_config_nvram_stage2(vha, icb);
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
/* Use alternate WWN? */
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
}
@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_info, vha, 0x0073,
"Inconisistent NVRAM detected: checksum=0x%x id=%c "
"Inconsistent NVRAM detected: checksum=0x%x id=%c "
"version=0x%x.\n", chksum, nv->id[0],
le16_to_cpu(nv->nvram_version));
ql_log(ql_log_info, vha, 0x0074,

View File

@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
#include <linux/blkdev.h>
#include <linux/delay.h>
@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_qla_host *vha = sp->fcport->vha;
cflags = 0;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(cmd);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(cmd);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
return (cflags);
}
@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
/* If still no head room then bail out */
if (req->cnt < (req_cnt + 2))
goto queuing_error;
}
if (req->cnt < (req_cnt + 2))
goto queuing_error;
/* Build command packet */
req->current_outstanding_cmd = handle;
@ -470,7 +471,7 @@ queuing_error:
/**
* qla2x00_start_iocbs() - Execute the IOCB command
*/
static void
void
qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
return (ret);
}
/*
* qla2x00_issue_marker
*
* Issue marker
* Caller CAN have hardware lock held as specified by ha_locked parameter.
* Might release it, then reaquire.
*/
int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
{
if (ha_locked) {
if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
MK_SYNC_ALL) != QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
} else {
if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
MK_SYNC_ALL) != QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
}
vha->marker_needed = 0;
return QLA_SUCCESS;
}
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_WRITE_DATA);
ha->qla_stats.output_bytes += scsi_bufflen(cmd);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_READ_DATA);
ha->qla_stats.input_bytes += scsi_bufflen(cmd);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
cur_seg = scsi_sglist(cmd);
@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(cmd);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(cmd);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
}
cmd_pkt->vp_index = sp->fcport->vp_idx;
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
if (req->cnt < (req_cnt + 2))
goto queuing_error;
}
if (req->cnt < (req_cnt + 2))
goto queuing_error;
/* Build command packet. */
req->current_outstanding_cmd = handle;
@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
if (req->cnt < (req_cnt + 2))
goto queuing_error;
}
if (req->cnt < (req_cnt + 2))
goto queuing_error;
status |= QDSS_GOT_Q_SPACE;
/* Build header part of command packet (excluding the OPCODE). */
@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
logio->vp_index = sp->fcport->vp_idx;
logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
}
static void
@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
logio->vp_index = sp->fcport->vp_idx;
logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
/* Implicit: mbx->mbx10 = 0. */
}
@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->vp_index = sp->fcport->vp_idx;
logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
}
static void
@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->port_id[0] = fcport->d_id.b.al_pa;
tsk->port_id[1] = fcport->d_id.b.area;
tsk->port_id[2] = fcport->d_id.b.domain;
tsk->vp_index = fcport->vp_idx;
tsk->vp_index = fcport->vha->vp_idx;
if (flags == TCF_LUN_RESET) {
int_to_scsilun(lun, &tsk->lun);
@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
els_iocb->vp_index = sp->fcport->vp_idx;
els_iocb->vp_index = sp->fcport->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->handle = sp->handle;
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
ct_iocb->vp_index = sp->fcport->vp_idx;
ct_iocb->vp_index = sp->fcport->vha->vp_idx;
ct_iocb->comp_status = __constant_cpu_to_le16(0);
ct_iocb->cmd_dsd_count =
@ -2343,11 +2364,10 @@ sufficient_dsds:
else
req->cnt = req->length -
(req->ring_index - cnt);
if (req->cnt < (req_cnt + 2))
goto queuing_error;
}
if (req->cnt < (req_cnt + 2))
goto queuing_error;
ctx = sp->u.scmd.ctx =
mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!ctx) {
@ -2362,7 +2382,7 @@ sufficient_dsds:
if (!ctx->fcp_cmnd) {
ql_log(ql_log_fatal, vha, 0x3011,
"Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
goto queuing_error_fcp_cmnd;
goto queuing_error;
}
/* Initialize the DSD list and dma handle */
@ -2400,7 +2420,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* Build IOCB segments */
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@ -2489,7 +2509,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
cmd_pkt->vp_index = sp->fcport->vp_idx;
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,

View File

@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
#include <linux/delay.h>
#include <linux/slab.h>
@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
"IDC failed to post ACK.\n");
}
#define LS_UNKNOWN 2
char *
qla2x00_get_link_speed_str(struct qla_hw_data *ha)
{
static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
char *link_speed;
int fw_speed = ha->link_data_rate;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
link_speed = link_speeds[0];
else if (fw_speed == 0x13)
link_speed = link_speeds[6];
else {
link_speed = link_speeds[LS_UNKNOWN];
if (fw_speed < 6)
link_speed =
link_speeds[fw_speed];
}
return link_speed;
}
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
void
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
{
#define LS_UNKNOWN 2
static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
char *link_speed;
uint16_t handle_cnt;
uint16_t cnt, mbx;
uint32_t handles[5];
@ -454,8 +474,8 @@ skip_rio:
case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
ql_dbg(ql_dbg_async, vha, 0x5008,
"Asynchronous WAKEUP_THRES.\n");
break;
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@ -479,20 +499,14 @@ skip_rio:
break;
case MBA_LOOP_UP: /* Loop Up Event */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
link_speed = link_speeds[0];
if (IS_QLA2100(ha) || IS_QLA2200(ha))
ha->link_data_rate = PORT_SPEED_1GB;
} else {
link_speed = link_speeds[LS_UNKNOWN];
if (mb[1] < 6)
link_speed = link_speeds[mb[1]];
else if (mb[1] == 0x13)
link_speed = link_speeds[6];
else
ha->link_data_rate = mb[1];
}
ql_dbg(ql_dbg_async, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n", link_speed);
"LOOP UP detected (%s Gbps).\n",
qla2x00_get_link_speed_str(ha));
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@ -638,6 +652,8 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5010,
"Port unavailable %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
ql_log(ql_log_warn, vha, 0x505e,
"Link is offline.\n");
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@ -670,12 +686,17 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
qlt_async_event(mb[0], vha, mb);
break;
}
ql_dbg(ql_dbg_async, vha, 0x5012,
"Port database changed %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
ql_log(ql_log_warn, vha, 0x505f,
"Link is operational (%s Gbps).\n",
qla2x00_get_link_speed_str(ha));
/*
* Mark all devices as missing so we will login again.
@ -684,8 +705,13 @@ skip_rio:
qla2x00_mark_all_devices_lost(vha, 1);
if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
set_bit(SCR_PENDING, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
qlt_async_event(mb[0], vha, mb);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
@ -807,6 +833,8 @@ skip_rio:
mb[0], mb[1], mb[2], mb[3]);
}
qlt_async_event(mb[0], vha, mb);
if (!vha->vp_idx && ha->num_vhosts)
qla2x00_alert_all_vps(rsp, mb);
}
@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
} else if (iop[0] & BIT_5)
fcport->port_type = FCT_INITIATOR;
if (iop[0] & BIT_7)
fcport->flags |= FCF_CONF_COMP_SUPPORTED;
if (logio->io_parameter[7] || logio->io_parameter[8])
fcport->supported_classes |= FC_COS_CLASS2;
if (logio->io_parameter[9] || logio->io_parameter[10])
@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (pkt->entry_status != 0) {
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
(void)qlt_24xx_process_response_error(vha, pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
continue;
@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
case ELS_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
/* ensure that the ATIO queue is empty */
qlt_24xx_process_atio_queue(vha);
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case NOTIFY_ACK_TYPE:
qlt_response_pkt_all_vps(vha, (response_t *)pkt);
break;
case MARKER_TYPE:
/* Do nothing in this case, this check is to prevent it
* from falling into default case
@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
case 0x14:
qla24xx_process_response_queue(vha, rsp);
break;
case 0x1C: /* ATIO queue updated */
qlt_24xx_process_atio_queue(vha);
break;
case 0x1D: /* ATIO and response queues updated */
qlt_24xx_process_atio_queue(vha);
qla24xx_process_response_queue(vha, rsp);
break;
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
case 0x14:
qla24xx_process_response_queue(vha, rsp);
break;
case 0x1C: /* ATIO queue updated */
qlt_24xx_process_atio_queue(vha);
break;
case 0x1D: /* ATIO and response queues updated */
qlt_24xx_process_atio_queue(vha);
qla24xx_process_response_queue(vha, rsp);
break;
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@ -2564,7 +2620,15 @@ void
qla2x00_free_irqs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct rsp_que *rsp = ha->rsp_q_map[0];
struct rsp_que *rsp;
/*
* We need to check that ha->rsp_q_map is valid in case we are called
* from a probe failure context.
*/
if (!ha->rsp_q_map || !ha->rsp_q_map[0])
return;
rsp = ha->rsp_q_map[0];
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);

File diff suppressed because it is too large Load Diff

View File

@ -6,6 +6,7 @@
*/
#include "qla_def.h"
#include "qla_gbl.h"
#include "qla_target.h"
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
qlt_update_vp_map(vha, SET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
}
list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
vp_id = vha->vp_idx;
@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_vport, vha, 0xa001,
"Marking port dead, loop_id=0x%04x : %x.\n",
fcport->loop_id, fcport->vp_idx);
fcport->loop_id, fcport->vha->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
qlt_update_vp_map(vha, RESET_AL_PA);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
vha->flags.management_server_logged_in = 0;
@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
ql_dbg(ql_dbg_dpc, vha, 0x4012,
"Entering %s.\n", __func__);
ql_dbg(ql_dbg_dpc, vha, 0x4013,
"vp_flags: 0x%lx.\n", vha->vp_flags);
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
ql_dbg(ql_dbg_dpc, vha, 0x401c,
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
"Exiting %s.\n", __func__);
return 0;
}

View File

@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
}
/* Offset in flash = lower 16 bits
* Number of enteries = upper 16 bits
* Number of entries = upper 16 bits
*/
offset = n & 0xffffU;
n = (n >> 16) & 0xffffU;
/* number of addr/value pair should not exceed 1024 enteries */
/* number of addr/value pair should not exceed 1024 entries */
if (n >= 1024) {
ql_log(ql_log_fatal, vha, 0x0071,
"Card flash not initialized:n=0x%x.\n", n);
@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
ql_log(ql_log_info, NULL, 0xb054,
ql_log(ql_log_info, NULL, 0xb053,
"%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0x00a1,
"Firmware loaded successully from flash.\n");
"Firmware loaded successfully from flash.\n");
return QLA_SUCCESS;
} else {
ql_log(ql_log_warn, vha, 0x0108,
@ -2461,7 +2461,7 @@ try_blob_fw:
blob = ha->hablob = qla2x00_request_firmware(vha);
if (!blob) {
ql_log(ql_log_fatal, vha, 0x00a3,
"Firmware image not preset.\n");
"Firmware image not present.\n");
goto fw_load_failed;
}
@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
if (!optrom) {
ql_log(ql_log_warn, vha, 0xb01b,
"Unable to allocate memory "
"for optron burst write (%x KB).\n",
"for optrom burst write (%x KB).\n",
OPTROM_BURST_SIZE / 1024);
}
}
@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
* changing the state to DEV_READY
*/
ql_log(ql_log_info, vha, 0xb023,
"%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
ql_log(ql_log_info, vha, 0xb024,
"DRV_ACTIVE:%d DRV_STATE:%d.\n",
"%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
"DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
drv_active, drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
if (ql2xmdenable) {
if (qla82xx_md_collect(vha))
ql_log(ql_log_warn, vha, 0xb02c,
"Not able to collect minidump.\n");
"Minidump not collected.\n");
} else
ql_log(ql_log_warn, vha, 0xb04f,
"Minidump disabled.\n");
@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
"Firmware version differs "
"Previous version: %d:%d:%d - "
"New version: %d:%d:%d\n",
fw_major_version, fw_minor_version,
fw_subminor_version,
ha->fw_major_version,
ha->fw_minor_version,
ha->fw_subminor_version,
fw_major_version, fw_minor_version,
fw_subminor_version);
ha->fw_subminor_version);
/* Release MiniDump resources */
qla82xx_md_free(vha);
/* ALlocate MiniDump resources */
@ -3325,6 +3324,30 @@ exit:
return rval;
}
static int qla82xx_check_temp(scsi_qla_host_t *vha)
{
uint32_t temp, temp_state, temp_val;
struct qla_hw_data *ha = vha->hw;
temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
temp_state = qla82xx_get_temp_state(temp);
temp_val = qla82xx_get_temp_val(temp);
if (temp_state == QLA82XX_TEMP_PANIC) {
ql_log(ql_log_warn, vha, 0x600e,
"Device temperature %d degrees C exceeds "
" maximum allowed. Hardware has been shut down.\n",
temp_val);
return 1;
} else if (temp_state == QLA82XX_TEMP_WARN) {
ql_log(ql_log_warn, vha, 0x600f,
"Device temperature %d degrees C exceeds "
"operating range. Immediate action needed.\n",
temp_val);
}
return 0;
}
void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
/* don't poll if reset is going on */
if (!ha->flags.isp82xx_reset_hdlr_active) {
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA82XX_DEV_NEED_RESET &&
if (qla82xx_check_temp(vha)) {
set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
ha->flags.isp82xx_fw_hung = 1;
qla82xx_clear_pending_mbx(vha);
} else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6001,
"Adapter reset needed.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6002,
"Quiescent needed.\n");
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
ql_dbg(ql_dbg_timer, vha, 0x6011,
@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
qla82xx_clear_pending_mbx(vha);
@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
if (ha->flags.isp82xx_no_md_cap) {
ql_log(ql_log_warn, vha, 0xb054,
"Forced reset from application, "
"ignore minidump capture\n");
ha->flags.isp82xx_no_md_cap = 0;
goto md_failed;
}
if (qla82xx_validate_template_chksum(vha)) {
ql_log(ql_log_info, vha, 0xb039,
"Template checksum validation error\n");

View File

@ -26,6 +26,7 @@
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@ -561,7 +562,6 @@
#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
#define PCIE_CHICKEN3 (0x120c8)
#define PCIE_SETUP_FUNCTION (0x12040)
#define PCIE_SETUP_FUNCTION2 (0x12048)
@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
#define qla82xx_get_temp_val(x) ((x) >> 16)
#define qla82xx_get_temp_state(x) ((x) & 0xffff)
#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
/*
* Temperature control.
*/
enum {
QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
};
#endif

View File

@ -13,12 +13,13 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include "qla_target.h"
/*
* Driver version
*/
@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
*/
int ql_errlev = ql_log_all;
int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2,
"Specify if Class 2 operations are supported from the very "
"beginning. Default is 0 - class 2 not supported.");
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
MODULE_PARM_DESC(ql2xlogintimeout,
@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
.max_sectors = 0xFFFF,
.shost_attrs = qla2x00_host_attrs,
.supported_mode = MODE_INITIATOR,
};
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
struct rsp_que *rsp)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
/*
* Make sure we record at least the request and response queue zero in
* case we need to free them if part of the probe fails.
*/
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
set_bit(0, ha->rsp_qid_map);
set_bit(0, ha->req_qid_map);
return 1;
@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_io, vha, 0x3001,
ql_dbg(ql_dbg_aer, vha, 0x9010,
"PCI Channel IO permanent failure, exiting "
"cmd=%p.\n", cmd);
cmd->result = DID_NO_CONNECT << 16;
} else {
ql_dbg(ql_dbg_io, vha, 0x3002,
ql_dbg(ql_dbg_aer, vha, 0x9011,
"EEH_Busy, Requeuing the cmd=%p.\n", cmd);
cmd->result = DID_REQUEUE << 16;
}
@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
rval = fc_remote_port_chkready(rport);
if (rval) {
cmd->result = rval;
ql_dbg(ql_dbg_io, vha, 0x3003,
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
"fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
cmd, rval);
goto qc24_fail_command;
@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
ret = FAILED;
ql_log(ql_log_info, vha, 0x8012,
"BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
"BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
ha->tgt.enable_class_2 = ql2xenableclass2;
/* Clear our data area */
ha->bars = bars;
@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->gid_list_info_size = 8;
@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->gid_list_info_size = 8;
@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
que_init:
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
ql_log(ql_log_fatal, base_vha, 0x003d,
"Failed to allocate memory for queue pointers..."
"aborting.\n");
goto probe_init_failed;
}
qlt_probe_one_stage1(base_vha, ha);
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
/* Alloc arrays of request and response ring ptrs */
que_init:
if (!qla2x00_alloc_queues(ha)) {
ql_log(ql_log_fatal, base_vha, 0x003d,
"Failed to allocate memory for queue pointers.. aborting.\n");
goto probe_init_failed;
}
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
/* Assign back pointers */
rsp->req = req;
req->rsp = rsp;
set_bit(0, ha->req_qid_map);
set_bit(0, ha->rsp_qid_map);
/* FWI2-capable only. */
req->req_q_in = &ha->iobase->isp24.req_q_in;
req->req_q_out = &ha->iobase->isp24.req_q_out;
@ -2514,6 +2534,14 @@ que_init:
ql_dbg(ql_dbg_init, base_vha, 0x00ee,
"DPC thread started successfully.\n");
/*
* If we're not coming up in initiator mode, we might sit for
* a while without waking up the dpc thread, which leads to a
* stuck process warning. So just kick the dpc once here and
* let the kthread start (and go back to sleep in qla2x00_do_dpc).
*/
qla2xxx_wake_dpc(base_vha);
skip_dpc:
list_add_tail(&base_vha->list, &ha->vp_list);
base_vha->host->irq = ha->pdev->irq;
@ -2559,7 +2587,11 @@ skip_dpc:
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
"Init done and hba is online.\n");
scsi_scan_host(host);
if (qla_ini_mode_enabled(base_vha))
scsi_scan_host(host);
else
ql_dbg(ql_dbg_init, base_vha, 0x0122,
"skipping scsi_scan_host() for non-initiator port\n");
qla2x00_alloc_sysfs_attr(base_vha);
@ -2577,11 +2609,17 @@ skip_dpc:
base_vha->host_no,
ha->isp_ops->fw_version_str(base_vha, fw_str));
qlt_add_target(ha, base_vha);
return 0;
probe_init_failed:
qla2x00_free_req_que(ha, req);
ha->req_q_map[0] = NULL;
clear_bit(0, ha->req_qid_map);
qla2x00_free_rsp_que(ha, rsp);
ha->rsp_q_map[0] = NULL;
clear_bit(0, ha->rsp_qid_map);
ha->max_req_queues = ha->max_rsp_queues = 0;
probe_failed:
@ -2620,6 +2658,22 @@ probe_out:
return ret;
}
static void
qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
struct task_struct *t = ha->dpc_thread;
if (ha->dpc_thread == NULL)
return;
/*
* qla2xxx_wake_dpc checks for ->dpc_thread
* so we need to zero it out.
*/
ha->dpc_thread = NULL;
kthread_stop(t);
}
static void
qla2x00_shutdown(struct pci_dev *pdev)
{
@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
struct qla_hw_data *ha;
unsigned long flags;
/*
* If the PCI device is disabled that means that probe failed and any
* resources should be have cleaned up on probe exit.
*/
if (!atomic_read(&pdev->enable_cnt))
return;
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
ha->flags.host_shutting_down = 1;
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
struct Scsi_Host *scsi_host;
@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->dpc_thread = NULL;
kthread_stop(t);
}
qlt_remove_target(ha, base_vha);
qla2x00_free_sysfs_attr(base_vha);
@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (vha->timer_active)
qla2x00_stop_timer(vha);
/* Kill the kernel thread for this host */
if (ha->dpc_thread) {
struct task_struct *t = ha->dpc_thread;
/*
* qla2xxx_wake_dpc checks for ->dpc_thread
* so we need to zero it out.
*/
ha->dpc_thread = NULL;
kthread_stop(t);
}
qla2x00_stop_dpc_thread(vha);
qla25xx_delete_queues(vha);
@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
spin_unlock_irqrestore(vha->host->host_lock, flags);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
} else
} else {
fc_remote_port_delete(rport);
qlt_fc_port_deleted(vha, fcport);
}
}
/*
@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
int do_login, int defer)
{
if (atomic_read(&fcport->state) == FCS_ONLINE &&
vha->vp_idx == fcport->vp_idx) {
vha->vp_idx == fcport->vha->vp_idx) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport, defer);
}
@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
/*
@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
if (defer)
qla2x00_schedule_rport_del(vha, fcport, defer);
else if (vha->vp_idx == fcport->vp_idx)
else if (vha->vp_idx == fcport->vha->vp_idx)
qla2x00_schedule_rport_del(vha, fcport, defer);
}
}
@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->init_cb)
goto fail;
if (qlt_mem_alloc(ha) < 0)
goto fail_free_init_cb;
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
goto fail_free_init_cb;
goto fail_free_tgt_mem;
ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (!ha->srb_mempool)
@ -3167,6 +3226,8 @@ fail_free_gid_list:
ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
fail_free_tgt_mem:
qlt_mem_free(ha);
fail_free_init_cb:
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
ha->init_cb_dma);
@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->ctx_mempool)
mempool_destroy(ha->ctx_mempool);
qlt_mem_free(ha);
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
ha->tgt.atio_ring = NULL;
ha->tgt.atio_dma = 0;
ha->tgt.tgt_vp_map = NULL;
}
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
ha->dpc_active = 1;
ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
"DPC handler waking up.\n");
ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
"dpc_flags=0x%lx.\n", base_vha->dpc_flags);
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
"DPC handler waking up, dpc_flags=0x%lx.\n",
base_vha->dpc_flags);
qla2x00_do_work(base_vha);
@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
}
if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
int ret;
ret = qla2x00_send_change_request(base_vha, 0x3, 0);
if (ret != QLA_SUCCESS)
ql_log(ql_log_warn, base_vha, 0x121,
"Failed to enable receiving of RSCN "
"requests: 0x%x.\n", ret);
clear_bit(SCR_PENDING, &base_vha->dpc_flags);
}
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
@ -4457,6 +4533,21 @@ qla2x00_module_init(void)
return -ENOMEM;
}
/* Initialize target kmem_cache and mem_pools */
ret = qlt_init();
if (ret < 0) {
kmem_cache_destroy(srb_cachep);
return ret;
} else if (ret > 0) {
/*
* If initiator mode is explictly disabled by qlt_init(),
* prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
* performing scsi_scan_target() during LOOP UP event.
*/
qla2xxx_transport_functions.disable_target_scan = 1;
qla2xxx_transport_vport_functions.disable_target_scan = 1;
}
/* Derive version string. */
strcpy(qla2x00_version_str, QLA2XXX_VERSION);
if (ql2xextended_error_logging)
@ -4468,6 +4559,7 @@ qla2x00_module_init(void)
kmem_cache_destroy(srb_cachep);
ql_log(ql_log_fatal, NULL, 0x0002,
"fc_attach_transport failed...Failing load!.\n");
qlt_exit();
return -ENODEV;
}
@ -4481,6 +4573,7 @@ qla2x00_module_init(void)
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
kmem_cache_destroy(srb_cachep);
qlt_exit();
fc_release_transport(qla2xxx_transport_template);
ql_log(ql_log_fatal, NULL, 0x0004,
"fc_attach_transport vport failed...Failing load!.\n");
@ -4492,6 +4585,7 @@ qla2x00_module_init(void)
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
kmem_cache_destroy(srb_cachep);
qlt_exit();
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
ql_log(ql_log_fatal, NULL, 0x0006,
@ -4511,6 +4605,7 @@ qla2x00_module_exit(void)
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
qlt_exit();
if (ctx_cachep)
kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,82 @@
#include <target/target_core_base.h>
#include <linux/btree.h>
#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
#define TCM_QLA2XXX_NPIV_NAMELEN 66
#include "qla_target.h"
struct tcm_qla2xxx_nacl {
/* From libfc struct fc_rport->port_id */
u32 nport_id;
/* Binary World Wide unique Node Name for remote FC Initiator Nport */
u64 nport_wwnn;
/* ASCII formatted WWPN for FC Initiator Nport */
char nport_name[TCM_QLA2XXX_NAMELEN];
/* Pointer to qla_tgt_sess */
struct qla_tgt_sess *qla_tgt_sess;
/* Pointer to TCM FC nexus */
struct se_session *nport_nexus;
/* Returned by tcm_qla2xxx_make_nodeacl() */
struct se_node_acl se_node_acl;
};
struct tcm_qla2xxx_tpg_attrib {
int generate_node_acls;
int cache_dynamic_acls;
int demo_mode_write_protect;
int prod_mode_write_protect;
};
struct tcm_qla2xxx_tpg {
/* FC lport target portal group tag for TCM */
u16 lport_tpgt;
/* Atomic bit to determine TPG active status */
atomic_t lport_tpg_enabled;
/* Pointer back to tcm_qla2xxx_lport */
struct tcm_qla2xxx_lport *lport;
/* Used by tcm_qla2xxx_tpg_attrib_cit */
struct tcm_qla2xxx_tpg_attrib tpg_attrib;
/* Returned by tcm_qla2xxx_make_tpg() */
struct se_portal_group se_tpg;
};
#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
struct tcm_qla2xxx_fc_loopid {
struct se_node_acl *se_nacl;
};
struct tcm_qla2xxx_lport {
/* SCSI protocol the lport is providing */
u8 lport_proto_id;
/* Binary World Wide unique Port Name for FC Target Lport */
u64 lport_wwpn;
/* Binary World Wide unique Port Name for FC NPIV Target Lport */
u64 lport_npiv_wwpn;
/* Binary World Wide unique Node Name for FC NPIV Target Lport */
u64 lport_npiv_wwnn;
/* ASCII formatted WWPN for FC Target Lport */
char lport_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
/* map for fc_port pointers in 24-bit FC Port ID space */
struct btree_head32 lport_fcport_map;
/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
/* Pointer to struct scsi_qla_host from qla2xxx LLD */
struct scsi_qla_host *qla_vha;
/* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
struct scsi_qla_host *qla_npiv_vp;
/* Pointer to struct qla_tgt pointer */
struct qla_tgt lport_qla_tgt;
/* Pointer to struct fc_vport for NPIV vport from libfc */
struct fc_vport *npiv_vport;
/* Pointer to TPG=1 for non NPIV mode */
struct tcm_qla2xxx_tpg *tpg_1;
/* Returned by tcm_qla2xxx_make_lport() */
struct se_wwn lport_wwn;
};

View File

@ -9,6 +9,140 @@
#include "ql4_glbl.h"
#include "ql4_dbg.h"
static ssize_t
qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
if (!is_qla8022(ha))
return -EINVAL;
if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
return 0;
return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_size);
}
static ssize_t
qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
uint32_t dev_state;
long reading;
int ret = 0;
if (!is_qla8022(ha))
return -EINVAL;
if (off != 0)
return ret;
buf[1] = 0;
ret = kstrtol(buf, 10, &reading);
if (ret) {
ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
__func__, ret);
return ret;
}
switch (reading) {
case 0:
/* clear dump collection flags */
if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
/* Reload minidump template */
qla4xxx_alloc_fw_dump(ha);
DEBUG2(ql4_printk(KERN_INFO, ha,
"Firmware template reloaded\n"));
}
break;
case 1:
/* Set flag to read dump */
if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
!test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
set_bit(AF_82XX_DUMP_READING, &ha->flags);
DEBUG2(ql4_printk(KERN_INFO, ha,
"Raw firmware dump ready for read on (%ld).\n",
ha->host_no));
}
break;
case 2:
/* Reset HBA */
qla4_8xxx_idc_lock(ha);
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA82XX_DEV_READY) {
ql4_printk(KERN_INFO, ha,
"%s: Setting Need reset, reset_owner is 0x%x.\n",
__func__, ha->func_num);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_NEED_RESET);
set_bit(AF_82XX_RST_OWNER, &ha->flags);
} else
ql4_printk(KERN_INFO, ha,
"%s: Reset not performed as device state is 0x%x\n",
__func__, dev_state);
qla4_8xxx_idc_unlock(ha);
break;
default:
/* do nothing */
break;
}
return count;
}
static struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
.read = qla4_8xxx_sysfs_read_fw_dump,
.write = qla4_8xxx_sysfs_write_fw_dump,
};
static struct sysfs_entry {
char *name;
struct bin_attribute *attr;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr },
{ NULL },
};
void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
{
struct Scsi_Host *host = ha->host;
struct sysfs_entry *iter;
int ret;
for (iter = bin_file_entries; iter->name; iter++) {
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
if (ret)
ql4_printk(KERN_ERR, ha,
"Unable to create sysfs %s binary attribute (%d).\n",
iter->name, ret);
}
}
void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
{
struct Scsi_Host *host = ha->host;
struct sysfs_entry *iter;
for (iter = bin_file_entries; iter->name; iter++)
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
}
/* Scsi_Host attributes. */
static ssize_t
qla4xxx_fw_version_show(struct device *dev,

View File

@ -398,6 +398,16 @@ struct isp_operations {
int (*get_sys_info) (struct scsi_qla_host *);
};
struct ql4_mdump_size_table {
uint32_t size;
uint32_t size_cmask_02;
uint32_t size_cmask_04;
uint32_t size_cmask_08;
uint32_t size_cmask_10;
uint32_t size_cmask_FF;
uint32_t version;
};
/*qla4xxx ipaddress configuration details */
struct ipaddress_config {
uint16_t ipv4_options;
@ -485,6 +495,10 @@ struct scsi_qla_host {
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
#define AF_82XX_RST_OWNER 25 /* 0x02000000 */
#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@ -662,6 +676,11 @@ struct scsi_qla_host {
uint32_t nx_dev_init_timeout;
uint32_t nx_reset_timeout;
void *fw_dump;
uint32_t fw_dump_size;
uint32_t fw_dump_capture_mask;
void *fw_dump_tmplt_hdr;
uint32_t fw_dump_tmplt_size;
struct completion mbx_intr_comp;
@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
#define PROCESS_ALL_AENS 0
#define FLUSH_DDB_CHANGED_AENS 1
/* Defines for udev events */
#define QL4_UEVENT_CODE_FW_DUMP 0
#endif /*_QLA4XXX_H */

View File

@ -385,6 +385,11 @@ struct qla_flt_region {
#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
#define MBOX_CMD_MINIDUMP 0x0129
/* Minidump subcommand */
#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
uint8_t reserved2[264]; /* 0x0308 - 0x040F */
};
#define QLA82XX_DBG_STATE_ARRAY_LEN 16
#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
struct qla4_8xxx_minidump_template_hdr {
uint32_t entry_type;
uint32_t first_entry_offset;
uint32_t size_of_template;
uint32_t capture_debug_level;
uint32_t num_of_entries;
uint32_t version;
uint32_t driver_timestamp;
uint32_t checksum;
uint32_t driver_capture_mask;
uint32_t driver_info_word2;
uint32_t driver_info_word3;
uint32_t driver_info_word4;
uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
};
#endif /* _QLA4X_FW_H */

View File

@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
dma_addr_t phys_addr);
int qla4xxx_req_template_size(struct scsi_qla_host *ha);
void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
extern int ql4xmdcapmask;
extern int ql4xenablemd;
extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */

View File

@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
return ipv4_wait|ipv6_wait;
}
/**
* qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
* @ha: pointer to host adapter structure.
**/
void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
{
int status;
uint32_t capture_debug_level;
int hdr_entry_bit, k;
void *md_tmp;
dma_addr_t md_tmp_dma;
struct qla4_8xxx_minidump_template_hdr *md_hdr;
if (ha->fw_dump) {
ql4_printk(KERN_WARNING, ha,
"Firmware dump previously allocated.\n");
return;
}
status = qla4xxx_req_template_size(ha);
if (status != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha,
"scsi%ld: Failed to get template size\n",
ha->host_no);
return;
}
clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
/* Allocate memory for saving the template */
md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
&md_tmp_dma, GFP_KERNEL);
/* Request template */
status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
if (status != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha,
"scsi%ld: Failed to get minidump template\n",
ha->host_no);
goto alloc_cleanup;
}
md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
capture_debug_level = md_hdr->capture_debug_level;
/* Get capture mask based on module loadtime setting. */
if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
ha->fw_dump_capture_mask = ql4xmdcapmask;
else
ha->fw_dump_capture_mask = capture_debug_level;
md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
md_hdr->num_of_entries));
DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n",
ha->fw_dump_tmplt_size));
DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
ha->fw_dump_capture_mask));
/* Calculate fw_dump_size */
for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
hdr_entry_bit <<= 1, k++) {
if (hdr_entry_bit & ha->fw_dump_capture_mask)
ha->fw_dump_size += md_hdr->capture_size_array[k];
}
/* Total firmware dump size including command header */
ha->fw_dump_size += ha->fw_dump_tmplt_size;
ha->fw_dump = vmalloc(ha->fw_dump_size);
if (!ha->fw_dump)
goto alloc_cleanup;
DEBUG2(ql4_printk(KERN_INFO, ha,
"Minidump Tempalate Size = 0x%x KB\n",
ha->fw_dump_tmplt_size));
DEBUG2(ql4_printk(KERN_INFO, ha,
"Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
ha->fw_dump_tmplt_hdr = ha->fw_dump;
alloc_cleanup:
dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
md_tmp, md_tmp_dma);
}
static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
{
uint32_t timeout_count;
@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
"control block\n", ha->host_no, __func__));
return status;
}
if (!qla4xxx_fw_ready(ha))
return status;
if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
qla4xxx_alloc_fw_dump(ha);
return qla4xxx_get_firmware_status(ha);
}
@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
ddb_entry->unblock_sess(ddb_entry->sess);
status = QLA_SUCCESS;
break;
case DDB_DS_SESSION_FAILED:
@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
}
break;
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
switch (state) {
case DDB_DS_SESSION_FAILED:
/*

View File

@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
}
}
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
"prematurely completing mbx cmd as firmware "
"recovery detected\n", ha->host_no, __func__));
return status;
}
/* Do not send any mbx cmd if h/w is in failed state*/
qla4_8xxx_idc_lock(ha);
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
qla4_8xxx_idc_unlock(ha);
if (dev_state == QLA82XX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
"failed state, do not send any mailbox commands\n",
ha->host_no, __func__);
return status;
}
}
if ((is_aer_supported(ha)) &&
(test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha,
"scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
ha->host_no, __func__));
goto mbox_exit;
}
/* Do not send any mbx cmd if h/w is in failed state*/
qla4_8xxx_idc_lock(ha);
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
qla4_8xxx_idc_unlock(ha);
if (dev_state == QLA82XX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha,
"scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
ha->host_no, __func__);
goto mbox_exit;
}
}
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->mbox_status_count = outCount;
@ -270,6 +270,79 @@ mbox_exit:
return status;
}
/**
* qla4xxx_get_minidump_template - Get the firmware template
* @ha: Pointer to host adapter structure.
* @phys_addr: dma address for template
*
* Obtain the minidump template from firmware during initialization
* as it may not be available when minidump is desired.
**/
int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
dma_addr_t phys_addr)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_MINIDUMP;
mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
mbox_cmd[2] = LSDW(phys_addr);
mbox_cmd[3] = MSDW(phys_addr);
mbox_cmd[4] = ha->fw_dump_tmplt_size;
mbox_cmd[5] = 0;
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
&mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
ha->host_no, __func__, mbox_cmd[0],
mbox_sts[0], mbox_sts[1]));
}
return status;
}
/**
* qla4xxx_req_template_size - Get minidump template size from firmware.
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_req_template_size(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_MINIDUMP;
mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
&mbox_sts[0]);
if (status == QLA_SUCCESS) {
ha->fw_dump_tmplt_size = mbox_sts[1];
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
__func__, mbox_sts[0], mbox_sts[1],
mbox_sts[2], mbox_sts[3], mbox_sts[4],
mbox_sts[5], mbox_sts[6], mbox_sts[7]));
if (ha->fw_dump_tmplt_size == 0)
status = QLA_ERROR;
} else {
ql4_printk(KERN_WARNING, ha,
"%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
__func__, mbox_sts[0], mbox_sts[1]);
status = QLA_ERROR;
}
return status;
}
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
{
set_bit(AF_FW_RECOVERY, &ha->flags);

View File

@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
return data;
}
/* Minidump related functions */
static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
u32 data, uint8_t flag)
{
uint32_t win_read, off_value, rval = QLA_SUCCESS;
off_value = off & 0xFFFF0000;
writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
/* Read back value to make sure write has gone through before trying
* to use it.
*/
win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
if (win_read != off_value) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
__func__, off_value, win_read, off));
return QLA_ERROR;
}
off_value = off & 0x0000FFFF;
if (flag)
writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
ha->nx_pcibase));
else
rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
ha->nx_pcibase));
return rval;
}
#define CRB_WIN_LOCK_TIMEOUT 100000000
int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
}
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
ql4_printk(KERN_ERR, ha,
"failed to read through agent\n");
printk_ratelimited(KERN_ERR
"%s: failed to read through agent\n",
__func__);
break;
}
@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
ql4_printk(KERN_ERR, ha,
"failed to write through agent\n");
"%s: failed to read through agent\n",
__func__);
ret = -1;
break;
}
@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_active |= (1 << (ha->func_num * 4));
ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
__func__, ha->host_no, drv_active);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
}
@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_active &= ~(1 << (ha->func_num * 4));
ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
__func__, ha->host_no, drv_active);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
}
@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_state |= (1 << (ha->func_num * 4));
ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
__func__, ha->host_no, drv_state);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_state &= ~(1 << (ha->func_num * 4));
ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
__func__, ha->host_no, drv_state);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
qla4_8xxx_rom_unlock(ha);
}
static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_minidump_entry_crb *crb_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
r_addr = crb_hdr->addr;
r_stride = crb_hdr->crb_strd.addr_stride;
loop_cnt = crb_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
*data_ptr++ = cpu_to_le32(r_addr);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
*d_ptr = data_ptr;
}
static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t addr, r_addr, c_addr, t_r_addr;
uint32_t i, k, loop_count, t_value, r_cnt, r_value;
unsigned long p_wait, w_time, p_mask;
uint32_t c_value_w, c_value_r;
struct qla82xx_minidump_entry_cache *cache_hdr;
int rval = QLA_ERROR;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
r_addr = cache_hdr->read_addr;
c_addr = cache_hdr->control_addr;
c_value_w = cache_hdr->cache_ctrl.write_value;
t_r_addr = cache_hdr->tag_reg_addr;
t_value = cache_hdr->addr_ctrl.init_tag_value;
r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
p_wait = cache_hdr->cache_ctrl.poll_wait;
p_mask = cache_hdr->cache_ctrl.poll_mask;
for (i = 0; i < loop_count; i++) {
qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
if (c_value_w)
qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
if (p_mask) {
w_time = jiffies + p_wait;
do {
c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
0, 0);
if ((c_value_r & p_mask) == 0) {
break;
} else if (time_after_eq(jiffies, w_time)) {
/* capturing dump failed */
return rval;
}
} while (1);
}
addr = r_addr;
for (k = 0; k < r_cnt; k++) {
r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
*data_ptr++ = cpu_to_le32(r_value);
addr += cache_hdr->read_ctrl.read_addr_stride;
}
t_value += cache_hdr->addr_ctrl.tag_value_stride;
}
*d_ptr = data_ptr;
return QLA_SUCCESS;
}
static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr)
{
struct qla82xx_minidump_entry_crb *crb_entry;
uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
uint32_t crb_addr;
unsigned long wtime;
struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
int i;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
ha->fw_dump_tmplt_hdr;
crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
crb_addr = crb_entry->addr;
for (i = 0; i < crb_entry->op_count; i++) {
opcode = crb_entry->crb_ctrl.opcode;
if (opcode & QLA82XX_DBG_OPCODE_WR) {
qla4_8xxx_md_rw_32(ha, crb_addr,
crb_entry->value_1, 1);
opcode &= ~QLA82XX_DBG_OPCODE_WR;
}
if (opcode & QLA82XX_DBG_OPCODE_RW) {
read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
opcode &= ~QLA82XX_DBG_OPCODE_RW;
}
if (opcode & QLA82XX_DBG_OPCODE_AND) {
read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
read_value &= crb_entry->value_2;
opcode &= ~QLA82XX_DBG_OPCODE_AND;
if (opcode & QLA82XX_DBG_OPCODE_OR) {
read_value |= crb_entry->value_3;
opcode &= ~QLA82XX_DBG_OPCODE_OR;
}
qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
}
if (opcode & QLA82XX_DBG_OPCODE_OR) {
read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
read_value |= crb_entry->value_3;
qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
opcode &= ~QLA82XX_DBG_OPCODE_OR;
}
if (opcode & QLA82XX_DBG_OPCODE_POLL) {
poll_time = crb_entry->crb_strd.poll_timeout;
wtime = jiffies + poll_time;
read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
do {
if ((read_value & crb_entry->value_2) ==
crb_entry->value_1)
break;
else if (time_after_eq(jiffies, wtime)) {
/* capturing dump failed */
rval = QLA_ERROR;
break;
} else
read_value = qla4_8xxx_md_rw_32(ha,
crb_addr, 0, 0);
} while (1);
opcode &= ~QLA82XX_DBG_OPCODE_POLL;
}
if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
if (crb_entry->crb_strd.state_index_a) {
index = crb_entry->crb_strd.state_index_a;
addr = tmplt_hdr->saved_state_array[index];
} else {
addr = crb_addr;
}
read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
index = crb_entry->crb_ctrl.state_index_v;
tmplt_hdr->saved_state_array[index] = read_value;
opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
}
if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
if (crb_entry->crb_strd.state_index_a) {
index = crb_entry->crb_strd.state_index_a;
addr = tmplt_hdr->saved_state_array[index];
} else {
addr = crb_addr;
}
if (crb_entry->crb_ctrl.state_index_v) {
index = crb_entry->crb_ctrl.state_index_v;
read_value =
tmplt_hdr->saved_state_array[index];
} else {
read_value = crb_entry->value_1;
}
qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
}
if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
index = crb_entry->crb_ctrl.state_index_v;
read_value = tmplt_hdr->saved_state_array[index];
read_value <<= crb_entry->crb_ctrl.shl;
read_value >>= crb_entry->crb_ctrl.shr;
if (crb_entry->value_2)
read_value &= crb_entry->value_2;
read_value |= crb_entry->value_3;
read_value += crb_entry->value_1;
tmplt_hdr->saved_state_array[index] = read_value;
opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
}
crb_addr += crb_entry->crb_strd.addr_stride;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
return rval;
}
static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_minidump_entry_rdocm *ocm_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
r_addr = ocm_hdr->read_addr;
r_stride = ocm_hdr->read_addr_stride;
loop_cnt = ocm_hdr->op_count;
DEBUG2(ql4_printk(KERN_INFO, ha,
"[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
__func__, r_addr, r_stride, loop_cnt));
for (i = 0; i < loop_cnt; i++) {
r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
__func__, (loop_cnt * sizeof(uint32_t))));
*d_ptr = data_ptr;
}
static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
struct qla82xx_minidump_entry_mux *mux_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
r_addr = mux_hdr->read_addr;
s_addr = mux_hdr->select_addr;
s_stride = mux_hdr->select_value_stride;
s_value = mux_hdr->select_value;
loop_cnt = mux_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
*data_ptr++ = cpu_to_le32(s_value);
*data_ptr++ = cpu_to_le32(r_value);
s_value += s_stride;
}
*d_ptr = data_ptr;
}
static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t addr, r_addr, c_addr, t_r_addr;
uint32_t i, k, loop_count, t_value, r_cnt, r_value;
uint32_t c_value_w;
struct qla82xx_minidump_entry_cache *cache_hdr;
uint32_t *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
r_addr = cache_hdr->read_addr;
c_addr = cache_hdr->control_addr;
c_value_w = cache_hdr->cache_ctrl.write_value;
t_r_addr = cache_hdr->tag_reg_addr;
t_value = cache_hdr->addr_ctrl.init_tag_value;
r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
for (i = 0; i < loop_count; i++) {
qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
addr = r_addr;
for (k = 0; k < r_cnt; k++) {
r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
*data_ptr++ = cpu_to_le32(r_value);
addr += cache_hdr->read_ctrl.read_addr_stride;
}
t_value += cache_hdr->addr_ctrl.tag_value_stride;
}
*d_ptr = data_ptr;
}
static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t s_addr, r_addr;
uint32_t r_stride, r_value, r_cnt, qid = 0;
uint32_t i, k, loop_cnt;
struct qla82xx_minidump_entry_queue *q_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
s_addr = q_hdr->select_addr;
r_cnt = q_hdr->rd_strd.read_addr_cnt;
r_stride = q_hdr->rd_strd.read_addr_stride;
loop_cnt = q_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
r_addr = q_hdr->read_addr;
for (k = 0; k < r_cnt; k++) {
r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
qid += q_hdr->q_strd.queue_id_stride;
}
*d_ptr = data_ptr;
}
#define MD_DIRECT_ROM_WINDOW 0x42110030
#define MD_DIRECT_ROM_READ_BASE 0x42150000
static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, r_value;
uint32_t i, loop_cnt;
struct qla82xx_minidump_entry_rdrom *rom_hdr;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
r_addr = rom_hdr->read_addr;
loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
DEBUG2(ql4_printk(KERN_INFO, ha,
"[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
__func__, r_addr, loop_cnt));
for (i = 0; i < loop_cnt; i++) {
qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
(r_addr & 0xFFFF0000), 1);
r_value = qla4_8xxx_md_rw_32(ha,
MD_DIRECT_ROM_READ_BASE +
(r_addr & 0x0000FFFF), 0, 0);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += sizeof(uint32_t);
}
*d_ptr = data_ptr;
}
#define MD_MIU_TEST_AGT_CTRL 0x41000090
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
uint32_t r_addr, r_value, r_data;
uint32_t i, j, loop_cnt;
struct qla82xx_minidump_entry_rdmem *m_hdr;
unsigned long flags;
uint32_t *data_ptr = *d_ptr;
DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
r_addr = m_hdr->read_addr;
loop_cnt = m_hdr->read_data_size/16;
DEBUG2(ql4_printk(KERN_INFO, ha,
"[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
__func__, r_addr, m_hdr->read_data_size));
if (r_addr & 0xf) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"[%s]: Read addr 0x%x not 16 bytes alligned\n",
__func__, r_addr));
return QLA_ERROR;
}
if (m_hdr->read_data_size % 16) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"[%s]: Read data[0x%x] not multiple of 16 bytes\n",
__func__, m_hdr->read_data_size));
return QLA_ERROR;
}
DEBUG2(ql4_printk(KERN_INFO, ha,
"[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
__func__, r_addr, m_hdr->read_data_size, loop_cnt));
write_lock_irqsave(&ha->hw_lock, flags);
for (i = 0; i < loop_cnt; i++) {
qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
r_value = 0;
qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
r_value = MIU_TA_CTL_ENABLE;
qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
for (j = 0; j < MAX_CTL_CHECK; j++) {
r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
0, 0);
if ((r_value & MIU_TA_CTL_BUSY) == 0)
break;
}
if (j >= MAX_CTL_CHECK) {
printk_ratelimited(KERN_ERR
"%s: failed to read through agent\n",
__func__);
write_unlock_irqrestore(&ha->hw_lock, flags);
return QLA_SUCCESS;
}
for (j = 0; j < 4; j++) {
r_data = qla4_8xxx_md_rw_32(ha,
MD_MIU_TEST_AGT_RDDATA[j],
0, 0);
*data_ptr++ = cpu_to_le32(r_data);
}
r_addr += 16;
}
write_unlock_irqrestore(&ha->hw_lock, flags);
DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
__func__, (loop_cnt * 16)));
*d_ptr = data_ptr;
return QLA_SUCCESS;
}
static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
struct qla82xx_minidump_entry_hdr *entry_hdr,
int index)
{
entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
ha->host_no, index, entry_hdr->entry_type,
entry_hdr->d_ctrl.entry_capture_mask));
}
/**
* qla82xx_collect_md_data - Retrieve firmware minidump data.
* @ha: pointer to adapter structure
**/
static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
{
int num_entry_hdr = 0;
struct qla82xx_minidump_entry_hdr *entry_hdr;
struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
uint32_t *data_ptr;
uint32_t data_collected = 0;
int i, rval = QLA_ERROR;
uint64_t now;
uint32_t timestamp;
if (!ha->fw_dump) {
ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
__func__, ha->host_no);
return rval;
}
tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
ha->fw_dump_tmplt_hdr;
data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
ha->fw_dump_tmplt_size);
data_collected += ha->fw_dump_tmplt_size;
num_entry_hdr = tmplt_hdr->num_of_entries;
ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
__func__, data_ptr);
ql4_printk(KERN_INFO, ha,
"[%s]: no of entry headers in Template: 0x%x\n",
__func__, num_entry_hdr);
ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
__func__, ha->fw_dump_capture_mask);
ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
__func__, ha->fw_dump_size, ha->fw_dump_size);
/* Update current timestamp before taking dump */
now = get_jiffies_64();
timestamp = (u32)(jiffies_to_msecs(now) / 1000);
tmplt_hdr->driver_timestamp = timestamp;
entry_hdr = (struct qla82xx_minidump_entry_hdr *)
(((uint8_t *)ha->fw_dump_tmplt_hdr) +
tmplt_hdr->first_entry_offset);
/* Walk through the entry headers - validate/perform required action */
for (i = 0; i < num_entry_hdr; i++) {
if (data_collected >= ha->fw_dump_size) {
ql4_printk(KERN_INFO, ha,
"Data collected: [0x%x], Total Dump size: [0x%x]\n",
data_collected, ha->fw_dump_size);
return rval;
}
if (!(entry_hdr->d_ctrl.entry_capture_mask &
ha->fw_dump_capture_mask)) {
entry_hdr->d_ctrl.driver_flags |=
QLA82XX_DBG_SKIPPED_FLAG;
goto skip_nxt_entry;
}
DEBUG2(ql4_printk(KERN_INFO, ha,
"Data collected: [0x%x], Dump size left:[0x%x]\n",
data_collected,
(ha->fw_dump_size - data_collected)));
/* Decode the entry type and take required action to capture
* debug data
*/
switch (entry_hdr->entry_type) {
case QLA82XX_RDEND:
ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
case QLA82XX_CNTRL:
rval = qla4_8xxx_minidump_process_control(ha,
entry_hdr);
if (rval != QLA_SUCCESS) {
ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
goto md_failed;
}
break;
case QLA82XX_RDCRB:
qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
&data_ptr);
break;
case QLA82XX_RDMEM:
rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
&data_ptr);
if (rval != QLA_SUCCESS) {
ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
goto md_failed;
}
break;
case QLA82XX_BOARD:
case QLA82XX_RDROM:
qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
&data_ptr);
break;
case QLA82XX_L2DTG:
case QLA82XX_L2ITG:
case QLA82XX_L2DAT:
case QLA82XX_L2INS:
rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
&data_ptr);
if (rval != QLA_SUCCESS) {
ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
goto md_failed;
}
break;
case QLA82XX_L1DAT:
case QLA82XX_L1INS:
qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
&data_ptr);
break;
case QLA82XX_RDOCM:
qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
&data_ptr);
break;
case QLA82XX_RDMUX:
qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
&data_ptr);
break;
case QLA82XX_QUEUE:
qla4_8xxx_minidump_process_queue(ha, entry_hdr,
&data_ptr);
break;
case QLA82XX_RDNOP:
default:
ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
}
data_collected = (uint8_t *)data_ptr -
((uint8_t *)((uint8_t *)ha->fw_dump +
ha->fw_dump_tmplt_size));
skip_nxt_entry:
/* next entry in the template */
entry_hdr = (struct qla82xx_minidump_entry_hdr *)
(((uint8_t *)entry_hdr) +
entry_hdr->entry_size);
}
if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
ql4_printk(KERN_INFO, ha,
"Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
data_collected, ha->fw_dump_size);
goto md_failed;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
__func__, i));
md_failed:
return rval;
}
/**
* qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
* @ha: pointer to adapter structure
**/
static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
{
char event_string[40];
char *envp[] = { event_string, NULL };
switch (code) {
case QL4_UEVENT_CODE_FW_DUMP:
snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
ha->host_no);
break;
default:
/*do nothing*/
break;
}
kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
}
/**
* qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
* @ha: pointer to adapter structure
@ -1659,6 +2324,15 @@ dev_initialize:
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
qla4_8xxx_idc_unlock(ha);
if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
!test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
if (!qla4_8xxx_collect_md_data(ha)) {
qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
} else {
ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
}
}
rval = qla4_8xxx_try_start_fw(ha);
qla4_8xxx_idc_lock(ha);
@ -1686,6 +2360,7 @@ static void
qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
{
uint32_t dev_state, drv_state, drv_active;
uint32_t active_mask = 0xFFFFFFFF;
unsigned long reset_timeout;
ql4_printk(KERN_INFO, ha,
@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
qla4_8xxx_idc_lock(ha);
}
qla4_8xxx_set_rst_ready(ha);
if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s(%ld): reset acknowledged\n",
__func__, ha->host_no));
qla4_8xxx_set_rst_ready(ha);
} else {
active_mask = (~(1 << (ha->func_num * 4)));
}
/* wait for 10 seconds for reset ack from all functions */
reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
__func__, ha->host_no, drv_state, drv_active);
while (drv_state != drv_active) {
while (drv_state != (drv_active & active_mask)) {
if (time_after_eq(jiffies, reset_timeout)) {
printk("%s: RESET TIMEOUT!\n", DRIVER_NAME);
ql4_printk(KERN_INFO, ha,
"%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
DRIVER_NAME, drv_state, drv_active);
break;
}
/*
* When reset_owner times out, check which functions
* acked/did not ack
*/
if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
ql4_printk(KERN_INFO, ha,
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
__func__, ha->host_no, drv_state,
drv_active);
}
qla4_8xxx_idc_unlock(ha);
msleep(1000);
qla4_8xxx_idc_lock(ha);
@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
}
/* Clear RESET OWNER as we are not going to use it any further */
clear_bit(AF_82XX_RST_OWNER, &ha->flags);
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA82XX_DEV_INITIALIZING) {
ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
qla4_8xxx_set_rst_ready(ha);
}
}
@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
}
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
dev_state, dev_state < MAX_STATES ?
qdev_state[dev_state] : "Unknown"));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
while (1) {
if (time_after_eq(jiffies, dev_init_timeout)) {
ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
ql4_printk(KERN_WARNING, ha,
"%s: Device Init Failed 0x%x = %s\n",
DRIVER_NAME,
dev_state, dev_state < MAX_STATES ?
qdev_state[dev_state] : "Unknown");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
}
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
ql4_printk(KERN_INFO, ha,
"2:Device state is 0x%x = %s\n", dev_state,
dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
dev_state, dev_state < MAX_STATES ?
qdev_state[dev_state] : "Unknown");
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_NEED_RESET);
set_bit(AF_82XX_RST_OWNER, &ha->flags);
} else
ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
qla4_8xxx_clear_rst_ready(ha);
qla4_8xxx_idc_unlock(ha);
if (rval == QLA_SUCCESS)
if (rval == QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
clear_bit(AF_FW_RECOVERY, &ha->flags);
}
return rval;
}

View File

@ -792,4 +792,196 @@ struct crb_addr_pair {
#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
/* Minidump related */
/* Entry Type Defines */
#define QLA82XX_RDNOP 0
#define QLA82XX_RDCRB 1
#define QLA82XX_RDMUX 2
#define QLA82XX_QUEUE 3
#define QLA82XX_BOARD 4
#define QLA82XX_RDOCM 6
#define QLA82XX_PREGS 7
#define QLA82XX_L1DTG 8
#define QLA82XX_L1ITG 9
#define QLA82XX_L1DAT 11
#define QLA82XX_L1INS 12
#define QLA82XX_L2DTG 21
#define QLA82XX_L2ITG 22
#define QLA82XX_L2DAT 23
#define QLA82XX_L2INS 24
#define QLA82XX_RDROM 71
#define QLA82XX_RDMEM 72
#define QLA82XX_CNTRL 98
#define QLA82XX_RDEND 255
/* Opcodes for Control Entries.
* These Flags are bit fields.
*/
#define QLA82XX_DBG_OPCODE_WR 0x01
#define QLA82XX_DBG_OPCODE_RW 0x02
#define QLA82XX_DBG_OPCODE_AND 0x04
#define QLA82XX_DBG_OPCODE_OR 0x08
#define QLA82XX_DBG_OPCODE_POLL 0x10
#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
/* Driver Flags */
#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
* mismatch */
/* Driver_code is for driver to write some info about the entry
* currently not used.
*/
struct qla82xx_minidump_entry_hdr {
uint32_t entry_type;
uint32_t entry_size;
uint32_t entry_capture_size;
struct {
uint8_t entry_capture_mask;
uint8_t entry_code;
uint8_t driver_code;
uint8_t driver_flags;
} d_ctrl;
};
/* Read CRB entry header */
struct qla82xx_minidump_entry_crb {
struct qla82xx_minidump_entry_hdr h;
uint32_t addr;
struct {
uint8_t addr_stride;
uint8_t state_index_a;
uint16_t poll_timeout;
} crb_strd;
uint32_t data_size;
uint32_t op_count;
struct {
uint8_t opcode;
uint8_t state_index_v;
uint8_t shl;
uint8_t shr;
} crb_ctrl;
uint32_t value_1;
uint32_t value_2;
uint32_t value_3;
};
struct qla82xx_minidump_entry_cache {
struct qla82xx_minidump_entry_hdr h;
uint32_t tag_reg_addr;
struct {
uint16_t tag_value_stride;
uint16_t init_tag_value;
} addr_ctrl;
uint32_t data_size;
uint32_t op_count;
uint32_t control_addr;
struct {
uint16_t write_value;
uint8_t poll_mask;
uint8_t poll_wait;
} cache_ctrl;
uint32_t read_addr;
struct {
uint8_t read_addr_stride;
uint8_t read_addr_cnt;
uint16_t rsvd_1;
} read_ctrl;
};
/* Read OCM */
struct qla82xx_minidump_entry_rdocm {
struct qla82xx_minidump_entry_hdr h;
uint32_t rsvd_0;
uint32_t rsvd_1;
uint32_t data_size;
uint32_t op_count;
uint32_t rsvd_2;
uint32_t rsvd_3;
uint32_t read_addr;
uint32_t read_addr_stride;
};
/* Read Memory */
struct qla82xx_minidump_entry_rdmem {
struct qla82xx_minidump_entry_hdr h;
uint32_t rsvd[6];
uint32_t read_addr;
uint32_t read_data_size;
};
/* Read ROM */
struct qla82xx_minidump_entry_rdrom {
struct qla82xx_minidump_entry_hdr h;
uint32_t rsvd[6];
uint32_t read_addr;
uint32_t read_data_size;
};
/* Mux entry */
struct qla82xx_minidump_entry_mux {
struct qla82xx_minidump_entry_hdr h;
uint32_t select_addr;
uint32_t rsvd_0;
uint32_t data_size;
uint32_t op_count;
uint32_t select_value;
uint32_t select_value_stride;
uint32_t read_addr;
uint32_t rsvd_1;
};
/* Queue entry */
struct qla82xx_minidump_entry_queue {
struct qla82xx_minidump_entry_hdr h;
uint32_t select_addr;
struct {
uint16_t queue_id_stride;
uint16_t rsvd_0;
} q_strd;
uint32_t data_size;
uint32_t op_count;
uint32_t rsvd_1;
uint32_t rsvd_2;
uint32_t read_addr;
struct {
uint8_t read_addr_stride;
uint8_t read_addr_cnt;
uint16_t rsvd_3;
} rd_strd;
};
#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
#define QLA82XX_MINIDUMP_MEM_SIZE 0
#define QLA82XX_MAX_ENTRY_HDR 4
struct qla82xx_minidump {
uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
};
#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
#define RQST_TMPLT_SIZE 0x0
#define RQST_TMPLT 0x1
#define MD_DIRECT_ROM_WINDOW 0x42110030
#define MD_DIRECT_ROM_READ_BASE 0x42150000
#define MD_MIU_TEST_AGT_CTRL 0x41000090
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
0x410000AC, 0x410000B8, 0x410000BC };
#endif

View File

@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
" Maximum queue depth to report for target devices.\n"
"\t\t Default: 32.");
static int ql4xqfulltracking = 1;
module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xqfulltracking,
" Enable or disable dynamic tracking and adjustment of\n"
"\t\t scsi device queue depth.\n"
"\t\t 0 - Disable.\n"
"\t\t 1 - Enable. (Default)");
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
" Target Session Recovery Timeout.\n"
"\t\t Default: 120 sec.");
int ql4xmdcapmask = 0x1F;
module_param(ql4xmdcapmask, int, S_IRUGO);
MODULE_PARM_DESC(ql4xmdcapmask,
" Set the Minidump driver capture mask level.\n"
"\t\t Default is 0x1F.\n"
"\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
int ql4xenablemd = 1;
module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xenablemd,
" Set to enable minidump.\n"
"\t\t 0 - disable minidump\n"
"\t\t 1 - enable minidump (Default)");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
* SCSI host template entry points
@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
static umode_t ql4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
int reason);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.slave_configure = qla4xxx_slave_configure,
.slave_alloc = qla4xxx_slave_alloc,
.slave_destroy = qla4xxx_slave_destroy,
.change_queue_depth = qla4xxx_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
unsigned long flags;
unsigned long flags, wtime;
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint32_t ddb_state;
int ret;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
goto destroy_session;
}
wtime = jiffies + (HZ * LOGOUT_TOV);
do {
ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
fw_ddb_entry, fw_ddb_entry_dma,
NULL, NULL, &ddb_state, NULL,
NULL, NULL);
if (ret == QLA_ERROR)
goto destroy_session;
if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
(ddb_state == DDB_DS_SESSION_FAILED))
goto destroy_session;
schedule_timeout_uninterruptible(HZ);
} while ((time_after(wtime, jiffies)));
destroy_session:
qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
iscsi_session_teardown(cls_sess);
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
}
static struct iscsi_cls_conn *
@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
if (ha->fw_dump)
vfree(ha->fw_dump);
ha->queues_len = 0;
ha->queues = NULL;
ha->queues_dma = 0;
@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
ha->response_dma = 0;
ha->shadow_regs = NULL;
ha->shadow_regs_dma = 0;
ha->fw_dump = NULL;
ha->fw_dump_size = 0;
/* Free srb pool. */
if (ha->srb_mempool)
@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
set_bit(AF_INIT_DONE, &ha->flags);
qla4_8xxx_alloc_sysfs_attr(ha);
printk(KERN_INFO
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
iscsi_boot_destroy_kset(ha->boot_kset);
qla4xxx_destroy_fw_ddb_session(ha);
qla4_8xxx_free_sysfs_attr(ha);
scsi_remove_host(ha->host);
@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
scsi_deactivate_tcq(sdev, 1);
}
static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
int reason)
{
if (!ql4xqfulltracking)
return -EOPNOTSUPP;
return iscsi_change_queue_depth(sdev, qdepth, reason);
}
/**
* qla4xxx_del_from_active_array - returns an active srb
* @ha: Pointer to host adapter structure.

View File

@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"

View File

@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
struct scsi_target *starget;
if (!sdev)
return 0;
shost = sdev->host;
starget = scsi_target(sdev);
if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
/*
* Ignore host/starget busy state.
* Since block layer does not have a concept of fairness across
* multiple queues, congestion of host/starget needs to be handled
* in SCSI layer.
*/
if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
return 1;
return 0;

View File

@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
drv = dev->driver;
if (drv && drv->suspend)
if (drv && drv->suspend) {
err = drv->suspend(dev, msg);
if (err)
scsi_device_resume(to_scsi_device(dev));
}
}
dev_dbg(dev, "scsi suspend: %d\n", err);
return err;

View File

@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
do {
if (list_empty(&scanning_hosts))
return 0;
goto out;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
}
done:
spin_unlock(&async_scan_lock);
kfree(data);
out:
async_synchronize_full_domain(&scsi_sd_probe_domain);
return 0;
}

View File

@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <scsi/scsi_scan.h>
#include "scsi_priv.h"
static int __init wait_scan_init(void)
{

View File

@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_request_regions(pdev, UFSHCD);
if (err < 0) {
dev_err(&pdev->dev, "request regions failed\n");
goto out_disable;
goto out_host_put;
}
hba->mmio_base = pci_ioremap_bar(pdev, 0);
@ -1925,8 +1925,9 @@ out_iounmap:
iounmap(hba->mmio_base);
out_release_regions:
pci_release_regions(pdev);
out_disable:
out_host_put:
scsi_host_put(host);
out_disable:
pci_clear_master(pdev);
pci_disable_device(pdev);
out_error:

124
include/scsi/fcoe_sysfs.h Normal file
View File

@ -0,0 +1,124 @@
/*
* Copyright (c) 2011-2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
#ifndef FCOE_SYSFS
#define FCOE_SYSFS
#include <linux/if_ether.h>
#include <linux/device.h>
#include <scsi/fc/fc_fcoe.h>
struct fcoe_ctlr_device;
struct fcoe_fcf_device;
struct fcoe_sysfs_function_template {
void (*get_fcoe_ctlr_link_fail)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_vlink_fail)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_miss_fka)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
};
#define dev_to_ctlr(d) \
container_of((d), struct fcoe_ctlr_device, dev)
enum fip_conn_type {
FIP_CONN_TYPE_UNKNOWN,
FIP_CONN_TYPE_FABRIC,
FIP_CONN_TYPE_VN2VN,
};
struct fcoe_ctlr_device {
u32 id;
struct device dev;
struct fcoe_sysfs_function_template *f;
struct list_head fcfs;
char work_q_name[20];
struct workqueue_struct *work_q;
char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
struct mutex lock;
int fcf_dev_loss_tmo;
enum fip_conn_type mode;
/* expected in host order for displaying */
struct fcoe_fc_els_lesb lesb;
};
static inline void *fcoe_ctlr_device_priv(const struct fcoe_ctlr_device *ctlr)
{
return (void *)(ctlr + 1);
}
/* fcf states */
enum fcf_state {
FCOE_FCF_STATE_UNKNOWN,
FCOE_FCF_STATE_DISCONNECTED,
FCOE_FCF_STATE_CONNECTED,
FCOE_FCF_STATE_DELETED,
};
struct fcoe_fcf_device {
u32 id;
struct device dev;
struct list_head peers;
struct work_struct delete_work;
struct delayed_work dev_loss_work;
u32 dev_loss_tmo;
void *priv;
enum fcf_state state;
u64 fabric_name;
u64 switch_name;
u32 fc_map;
u16 vfid;
u8 mac[ETH_ALEN];
u8 priority;
u32 fka_period;
u8 selected;
u16 vlan_id;
};
#define dev_to_fcf(d) \
container_of((d), struct fcoe_fcf_device, dev)
/* parentage should never be missing */
#define fcoe_fcf_dev_to_ctlr_dev(x) \
dev_to_ctlr((x)->dev.parent)
#define fcoe_fcf_device_priv(x) \
((x)->priv)
struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
struct fcoe_sysfs_function_template *f,
int priv_size);
void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *);
struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *,
struct fcoe_fcf_device *);
void fcoe_fcf_device_delete(struct fcoe_fcf_device *);
int __init fcoe_sysfs_setup(void);
void __exit fcoe_sysfs_teardown(void);
#endif /* FCOE_SYSFS */

View File

@ -29,6 +29,7 @@
#include <linux/random.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fcoe_sysfs.h>
#define FCOE_MAX_CMD_LEN 16 /* Supported CDB length */
@ -158,9 +159,25 @@ struct fcoe_ctlr {
spinlock_t ctlr_lock;
};
/**
* fcoe_ctlr_priv() - Return the private data from a fcoe_ctlr
* @cltr: The fcoe_ctlr whose private data will be returned
*/
static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
{
return (void *)(ctlr + 1);
}
#define fcoe_ctlr_to_ctlr_dev(x) \
(struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1)
/**
* struct fcoe_fcf - Fibre-Channel Forwarder
* @list: list linkage
* @event_work: Work for FC Transport actions queue
* @event: The event to be processed
* @fip: The controller that the FCF was discovered on
* @fcf_dev: The associated fcoe_fcf_device instance
* @time: system time (jiffies) when an advertisement was last received
* @switch_name: WWN of switch from advertisement
* @fabric_name: WWN of fabric from advertisement
@ -182,6 +199,9 @@ struct fcoe_ctlr {
*/
struct fcoe_fcf {
struct list_head list;
struct work_struct event_work;
struct fcoe_ctlr *fip;
struct fcoe_fcf_device *fcf_dev;
unsigned long time;
u64 switch_name;
@ -198,6 +218,9 @@ struct fcoe_fcf {
u8 fd_flags:1;
};
#define fcoe_fcf_to_fcf_dev(x) \
((x)->fcf_dev)
/**
* struct fcoe_rport - VN2VN remote port
* @time: time of create or last beacon packet received from node
@ -333,6 +356,10 @@ void fcoe_queue_timer(ulong lport);
int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
struct fcoe_percpu_s *fps);
/* FCoE Sysfs helpers */
void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
/**
* struct netdev_list
* A mapping from netdevice to fcoe_transport