SCSI misc on 20240718

Updates to the usual drivers (ufs, lpfc, qla2xxx, mpi3mr) plus some
 misc small fixes. The only core changes are to both bsg and scsi to
 pass in the device instead of setting it afterwards as q->queuedata,
 so no functional change.
 
 Signed-off-by: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCZpl6BiYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishVkAAQCOLA0V
 TFI4RfRjk7TW/6ZgKVS5A4NNLG8p8r9F7Y/QswEAlT4NrYnHiHQwBYEiTw6w02J8
 SqiHtHKv/SQ7LIwEJlQ=
 =WhCT
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "Updates to the usual drivers (ufs, lpfc, qla2xxx, mpi3mr) plus some
  misc small fixes.

  The only core changes are to both bsg and scsi to pass in the device
  instead of setting it afterwards as q->queuedata, so no functional
  change"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (69 commits)
  scsi: aha152x: Use DECLARE_COMPLETION_ONSTACK for non-constant completion
  scsi: qla2xxx: Convert comma to semicolon
  scsi: qla2xxx: Update version to 10.02.09.300-k
  scsi: qla2xxx: Use QP lock to search for bsg
  scsi: qla2xxx: Reduce fabric scan duplicate code
  scsi: qla2xxx: Fix optrom version displayed in FDMI
  scsi: qla2xxx: During vport delete send async logout explicitly
  scsi: qla2xxx: Complete command early within lock
  scsi: qla2xxx: Fix flash read failure
  scsi: qla2xxx: Return ENOBUFS if sg_cnt is more than one for ELS cmds
  scsi: qla2xxx: Fix for possible memory corruption
  scsi: qla2xxx: validate nvme_local_port correctly
  scsi: qla2xxx: Unable to act on RSCN for port online
  scsi: ufs: exynos: Add support for Flash Memory Protector (FMP)
  scsi: ufs: core: Add UFSHCD_QUIRK_KEYS_IN_PRDT
  scsi: ufs: core: Add fill_crypto_prdt variant op
  scsi: ufs: core: Add UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE
  scsi: ufs: core: fold ufshcd_clear_keyslot() into its caller
  scsi: ufs: core: Add UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE
  scsi: ufs: mcq: Make .get_hba_mac() optional
  ...
This commit is contained in:
Linus Torvalds 2024-07-19 10:56:58 -07:00
commit 4305ca0087
65 changed files with 3144 additions and 618 deletions

View file

@ -920,14 +920,16 @@ Description: This file shows whether the configuration descriptor is locked.
What: /sys/bus/platform/drivers/ufshcd/*/attributes/max_number_of_rtt
What: /sys/bus/platform/devices/*.ufs/attributes/max_number_of_rtt
Date: February 2018
Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
Date: May 2024
Contact: Avri Altman <avri.altman@wdc.com>
Description: This file provides the maximum current number of
outstanding RTTs in device that is allowed. The full
information about the attribute could be found at
UFS specifications 2.1.
outstanding RTTs in device that is allowed. bMaxNumOfRTT is a
read-write persistent attribute and is equal to two after device
manufacturing. It shall not be set to a value greater than
bDeviceRTTCap value, and it may be set only when the hw queues are
empty.
The file is read only.
The file is read write.
What: /sys/bus/platform/drivers/ufshcd/*/attributes/exception_event_control
What: /sys/bus/platform/devices/*.ufs/attributes/exception_event_control

View file

@ -385,13 +385,12 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
if (blk_mq_alloc_tag_set(set))
goto out_tag_set;
q = blk_mq_alloc_queue(set, lim, NULL);
q = blk_mq_alloc_queue(set, lim, dev);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out_queue;
}
q->queuedata = dev;
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);

View file

@ -78,6 +78,7 @@ static struct blogic_drvr_options blogic_drvr_options[BLOGIC_MAX_ADAPTERS];
BusLogic can be assigned a string by insmod.
*/
MODULE_DESCRIPTION("BusLogic MultiMaster and FlashPoint SCSI Host Adapter driver");
MODULE_LICENSE("GPL");
#ifdef MODULE
static char *BusLogic;

View file

@ -11545,6 +11545,7 @@ static void __exit advansys_exit(void)
module_init(advansys_init);
module_exit(advansys_exit);
MODULE_DESCRIPTION("AdvanSys SCSI Adapter driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("advansys/mcode.bin");
MODULE_FIRMWARE("advansys/3550.bin");

View file

@ -1072,7 +1072,7 @@ static int aha152x_abort(struct scsi_cmnd *SCpnt)
static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
DECLARE_COMPLETION(done);
DECLARE_COMPLETION_ONSTACK(done);
int ret, issued, disconnected;
unsigned char old_cmd_len = SCpnt->cmd_len;
unsigned long flags;

View file

@ -1009,6 +1009,8 @@ static int aha1542_biosparam(struct scsi_device *sdev,
return 0;
}
MODULE_DESCRIPTION("Adaptec AHA-1542 SCSI host adapter driver");
MODULE_LICENSE("GPL");
static int aha1542_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)

View file

@ -681,4 +681,5 @@ static __exit void aha1740_exit (void)
module_init (aha1740_init);
module_exit (aha1740_exit);
MODULE_DESCRIPTION("Adaptec AHA1740 SCSI host adapter driver");
MODULE_LICENSE("GPL");

View file

@ -2450,7 +2450,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
return 0;
}
DEF_SCSI_QCMD(acornscsi_queuecmd)
static DEF_SCSI_QCMD(acornscsi_queuecmd)
enum res_abort { res_not_running, res_success, res_success_clear, res_snooze };
@ -2552,7 +2552,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
* Params : SCpnt - command to abort
* Returns : one of SCSI_ABORT_ macros
*/
int acornscsi_abort(struct scsi_cmnd *SCpnt)
static int acornscsi_abort(struct scsi_cmnd *SCpnt)
{
AS_Host *host = (AS_Host *) SCpnt->device->host->hostdata;
int result;
@ -2634,7 +2634,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
* Params : SCpnt - command causing reset
* Returns : one of SCSI_RESET_ macros
*/
int acornscsi_host_reset(struct scsi_cmnd *SCpnt)
static int acornscsi_host_reset(struct scsi_cmnd *SCpnt)
{
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
struct scsi_cmnd *SCptr;
@ -2679,8 +2679,7 @@ int acornscsi_host_reset(struct scsi_cmnd *SCpnt)
* Params : host - host to give information on
* Returns : a constant string
*/
const
char *acornscsi_info(struct Scsi_Host *host)
static const char *acornscsi_info(struct Scsi_Host *host)
{
static char string[100], *p;

View file

@ -296,7 +296,7 @@ cumanascsi_2_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp)
* Params : host - driver host structure to return info for.
* Returns : pointer to a static buffer containing null terminated string.
*/
const char *cumanascsi_2_info(struct Scsi_Host *host)
static const char *cumanascsi_2_info(struct Scsi_Host *host)
{
struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
static char string[150];

View file

@ -381,7 +381,7 @@ eesoxscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp)
* Params : host - driver host structure to return info for.
* Returns : pointer to a static buffer containing null terminated string.
*/
const char *eesoxscsi_info(struct Scsi_Host *host)
static const char *eesoxscsi_info(struct Scsi_Host *host)
{
struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
static char string[150];

View file

@ -184,7 +184,7 @@ powertecscsi_dma_stop(struct Scsi_Host *host, struct scsi_pointer *SCp)
* Params : host - driver host structure to return info for.
* Returns : pointer to a static buffer containing null terminated string.
*/
const char *powertecscsi_info(struct Scsi_Host *host)
static const char *powertecscsi_info(struct Scsi_Host *host)
{
struct powertec_info *info = (struct powertec_info *)host->hostdata;
static char string[150];

View file

@ -894,4 +894,5 @@ static struct platform_driver atari_scsi_driver __refdata = {
module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe);
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
MODULE_DESCRIPTION("Atari TT/Falcon NCR5380 SCSI driver");
MODULE_LICENSE("GPL");

View file

@ -1724,6 +1724,8 @@ static void atp870u_remove (struct pci_dev *pdev)
atp870u_free_tables(pshost);
scsi_host_put(pshost);
}
MODULE_DESCRIPTION("ACARD SCSI host adapter driver");
MODULE_LICENSE("GPL");
static const struct scsi_host_template atp870u_template = {

View file

@ -778,5 +778,6 @@ static void __exit efct_exit(void)
module_init(efct_init);
module_exit(efct_exit);
MODULE_VERSION(EFCT_DRIVER_VERSION);
MODULE_DESCRIPTION("Emulex Fibre Channel Target driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Broadcom");

View file

@ -110,6 +110,7 @@ module_param_array(card, int, NULL, 0);
MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
MODULE_ALIAS("g_NCR5380_mmio");
MODULE_DESCRIPTION("Generic NCR5380/NCR53C400 SCSI driver");
MODULE_LICENSE("GPL");
static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)

View file

@ -1279,4 +1279,5 @@ static struct parport_driver imm_driver = {
};
module_parport_driver(imm_driver);
MODULE_DESCRIPTION("IOMEGA MatchMaker parallel port SCSI host adapter driver");
MODULE_LICENSE("GPL");

View file

@ -758,6 +758,7 @@ static __exit void isci_exit(void)
sas_release_transport(isci_transport_template);
}
MODULE_DESCRIPTION("Intel(R) C600 Series Chipset SAS Controller driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(ISCI_FW_NAME);
module_init(isci_init);

View file

@ -1831,6 +1831,7 @@ static int
lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
{
LPFC_MBOXQ_t *mbox = NULL;
u32 payload_len;
unsigned long val = 0;
char *pval = NULL;
int rc = 0;
@ -1869,9 +1870,11 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
if (!mbox)
return -ENOMEM;
payload_len = sizeof(struct lpfc_mbx_set_trunk_mode) -
sizeof(struct lpfc_sli4_cfg_mhdr);
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
12, LPFC_SLI4_MBX_EMBED);
payload_len, LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_set_trunk_mode,
&mbox->u.mqe.un.set_trunk_mode,
@ -1907,6 +1910,11 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr,
/* Get transceiver information */
rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL);
if (!rdp_context) {
len = scnprintf(buf, PAGE_SIZE - len,
"SPF info NA: alloc failure\n");
return len;
}
rc = lpfc_get_sfp_info_wait(phba, rdp_context);
if (rc) {

View file

@ -1553,22 +1553,14 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
ndlp->nlp_fc4_type) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
/* This is a fabric topology so if discovery
* started with an unsolicited PLOGI, don't
* send a PRLI. Targets don't issue PLOGI or
* PRLI when acting as a target. Likely this is
* an initiator function.
*/
if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
}
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} else if (!ndlp->nlp_fc4_type) {
/* If fc4 type is still unknown, then LOGO */
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY | LOG_NODE,
"6443 Sending LOGO ndlp x%px,"
"6443 Sending LOGO ndlp x%px, "
"DID x%06x with fc4_type: "
"x%08x, state: %d\n",
ndlp, did, ndlp->nlp_fc4_type,

View file

@ -7302,13 +7302,13 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
}
mbox->vport = phba->pport;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO);
if (rc == MBX_NOT_FINISHED) {
rc = 1;
goto error;
}
if (rc == MBX_TIMEOUT)
goto error;
if (phba->sli_rev == LPFC_SLI_REV4)
mp = mbox->ctx_buf;
else
@ -7361,7 +7361,10 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
}
rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_SLI4_CONFIG_TMO);
if (rc == MBX_TIMEOUT)
goto error;
if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) {
rc = 1;
goto error;
@ -7372,8 +7375,10 @@ int lpfc_get_sfp_info_wait(struct lpfc_hba *phba,
DMP_SFF_PAGE_A2_SIZE);
error:
mbox->ctx_buf = mpsave;
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
if (mbox->mbox_flag & LPFC_MBX_WAKE) {
mbox->ctx_buf = mpsave;
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
}
return rc;
@ -9665,7 +9670,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
if (mbx_tmo_err)
if (mbx_tmo_err || !(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
list_move_tail(&piocb->list, &cancel_list);
else
lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);

View file

@ -214,6 +214,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
/* check for recovered fabric node */
if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
ndlp->nlp_DID == Fabric_DID)
return;
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6789 rport name %llx != node port name %llx",
@ -546,6 +551,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag,
vport->port_state);
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
spin_unlock_irqrestore(&ndlp->lock, iflags);
return fcf_inuse;
}
@ -5725,7 +5733,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
return ndlp;
if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
}

View file

@ -10579,10 +10579,11 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
struct sli4_sge *sgl;
struct sli4_sge_le *sgl;
u32 type_size;
/* 128 byte wqe support here */
sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl;
if (phba->fcp_embed_io) {
struct fcp_cmnd *fcp_cmnd;
@ -10591,9 +10592,9 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
fcp_cmnd = lpfc_cmd->fcp_cmnd;
/* Word 0-2 - FCP_CMND */
wqe->generic.bde.tus.f.bdeFlags =
BUFF_TYPE_BDE_IMMED;
wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
type_size = le32_to_cpu(sgl->sge_len);
type_size |= ULP_BDE64_TYPE_BDE_IMMED;
wqe->generic.bde.tus.w = type_size;
wqe->generic.bde.addrHigh = 0;
wqe->generic.bde.addrLow = 72; /* Word 18 */
@ -10602,13 +10603,13 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
/* Word 18-29 FCP CMND Payload */
ptr = &wqe->words[18];
memcpy(ptr, fcp_cmnd, sgl->sge_len);
lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len));
} else {
/* Word 0-2 - Inline BDE */
wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
wqe->generic.bde.addrHigh = sgl->addr_hi;
wqe->generic.bde.addrLow = sgl->addr_lo;
wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi);
wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo);
/* Word 10 */
bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
@ -12301,18 +12302,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto release_iocb;
}
}
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb x%px "
"with io cmd xri %x abort tag : x%x, "
"abort status %x abort code %x\n",
cmdiocb, get_job_abtsiotag(phba, cmdiocb),
(phba->sli_rev == LPFC_SLI_REV4) ?
get_wqe_reqtag(cmdiocb) :
cmdiocb->iocb.un.acxri.abortContextTag,
ulp_status, ulp_word4);
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
"0327 Abort els iocb complete x%px with io cmd xri %x "
"abort tag x%x abort status %x abort code %x\n",
cmdiocb, get_job_abtsiotag(phba, cmdiocb),
(phba->sli_rev == LPFC_SLI_REV4) ?
get_wqe_reqtag(cmdiocb) :
cmdiocb->iocb.ulpIoTag,
ulp_status, ulp_word4);
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
@ -12509,10 +12508,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0339 Abort IO XRI x%x, Original iotag x%x, "
"abort tag x%x Cmdjob : x%px Abortjob : x%px "
"retval x%x : IA %d\n",
"retval x%x : IA %d cmd_cmpl %ps\n",
ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
retval, ia);
retval, ia, abtsiocbp->cmd_cmpl);
if (retval) {
cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
__lpfc_sli_release_iocbq(phba, abtsiocbp);

View file

@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "14.4.0.2"
#define LPFC_DRIVER_VERSION "14.4.0.3"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */

View file

@ -550,4 +550,5 @@ static struct platform_driver mac_scsi_driver __refdata = {
module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe);
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
MODULE_DESCRIPTION("Macintosh NCR5380 SCSI driver");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2016-2024 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_TOOL_H
#define MPI30_TOOL_H 1
#define MPI3_DIAG_BUFFER_TYPE_TRACE (0x01)
#define MPI3_DIAG_BUFFER_TYPE_FW (0x02)
#define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01)
struct mpi3_diag_buffer_post_request {
__le16 host_tag;
u8 ioc_use_only02;
u8 function;
__le16 ioc_use_only04;
u8 ioc_use_only06;
u8 msg_flags;
__le16 change_count;
__le16 reserved0a;
u8 type;
u8 reserved0d;
__le16 reserved0e;
__le64 address;
__le32 length;
__le32 reserved1c;
};
struct mpi3_diag_buffer_manage_request {
__le16 host_tag;
u8 ioc_use_only02;
u8 function;
__le16 ioc_use_only04;
u8 ioc_use_only06;
u8 msg_flags;
__le16 change_count;
__le16 reserved0a;
u8 type;
u8 action;
__le16 reserved0e;
};
#endif

View file

@ -23,6 +23,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
@ -47,6 +48,7 @@
#include "mpi/mpi30_ioc.h"
#include "mpi/mpi30_sas.h"
#include "mpi/mpi30_pci.h"
#include "mpi/mpi30_tool.h"
#include "mpi3mr_debug.h"
/* Global list and lock for storing multiple adapters managed by the driver */
@ -55,8 +57,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
#define MPI3MR_DRIVER_VERSION "8.8.1.0.50"
#define MPI3MR_DRIVER_RELDATE "5-March-2024"
#define MPI3MR_DRIVER_VERSION "8.9.1.0.51"
#define MPI3MR_DRIVER_RELDATE "29-May-2024"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@ -128,6 +130,7 @@ extern atomic64_t event_counter;
#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180
#define MPI3MR_RESET_ACK_TIMEOUT 30
#define MPI3MR_MUR_TIMEOUT 120
#define MPI3MR_RESET_TIMEOUT 510
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
@ -187,6 +190,30 @@ extern atomic64_t event_counter;
#define MPI3MR_HARD_SECURE_DEVICE 0x08
#define MPI3MR_TAMPERED_DEVICE 0x0C
#define MPI3MR_DEFAULT_HDB_MAX_SZ (4 * 1024 * 1024)
#define MPI3MR_DEFAULT_HDB_DEC_SZ (1 * 1024 * 1024)
#define MPI3MR_DEFAULT_HDB_MIN_SZ (2 * 1024 * 1024)
#define MPI3MR_MAX_NUM_HDB 2
#define MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN 0
#define MPI3MR_HDB_TRIGGER_TYPE_FAULT 1
#define MPI3MR_HDB_TRIGGER_TYPE_ELEMENT 2
#define MPI3MR_HDB_TRIGGER_TYPE_GLOBAL 3
#define MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET 4
#define MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED 5
#define MPI3MR_HDB_REFRESH_TYPE_RESERVED 0
#define MPI3MR_HDB_REFRESH_TYPE_CURRENT 1
#define MPI3MR_HDB_REFRESH_TYPE_DEFAULT 2
#define MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT 3
#define MPI3MR_DEFAULT_HDB_SZ (4 * 1024 * 1024)
#define MPI3MR_MAX_NUM_HDB 2
#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_INDEX 0
#define MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA 1
/* SGE Flag definition */
#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
(MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \
@ -210,6 +237,8 @@ extern atomic64_t event_counter;
#define MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS 256
#define MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS 2048
#define MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER (0xFFFD)
/**
* struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe
* Encapsulated commands.
@ -289,9 +318,12 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
MPI3MR_RESET_FROM_SYSFS = 23,
MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT = 25,
MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT = 26,
MPI3MR_RESET_FROM_FIRMWARE = 27,
MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
MPI3MR_RESET_FROM_TRIGGER = 31,
};
#define MPI3MR_RESET_REASON_OSTYPE_LINUX 1
@ -327,6 +359,9 @@ struct mpi3mr_ioc_facts {
u32 ioc_capabilities;
struct mpi3mr_compimg_ver fw_ver;
u32 mpi_version;
u32 diag_trace_sz;
u32 diag_fw_sz;
u32 diag_drvr_sz;
u16 max_reqs;
u16 product_id;
u16 op_req_sz;
@ -484,6 +519,7 @@ struct mpi3mr_throttle_group_info {
/* HBA port flags */
#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
#define MPI3MR_HBA_PORT_FLAG_NEW 0x02
/* IOCTL data transfer sge*/
#define MPI3MR_NUM_IOCTL_SGE 256
@ -852,6 +888,59 @@ struct mpi3mr_drv_cmd {
struct mpi3mr_drv_cmd *drv_cmd);
};
/**
* union mpi3mr_trigger_data - Trigger data information
* @fault: Fault code
* @global: Global trigger data
* @element: element trigger data
*/
union mpi3mr_trigger_data {
u16 fault;
u64 global;
union mpi3_driver2_trigger_element element;
};
/**
* struct trigger_event_data - store trigger related
* information.
*
* @trace_hdb: Trace diag buffer descriptor reference
* @fw_hdb: FW diag buffer descriptor reference
* @trigger_type: Trigger type
* @trigger_specific_data: Trigger specific data
* @snapdump: Snapdump enable or disable flag
*/
struct trigger_event_data {
struct diag_buffer_desc *trace_hdb;
struct diag_buffer_desc *fw_hdb;
u8 trigger_type;
union mpi3mr_trigger_data trigger_specific_data;
bool snapdump;
};
/**
* struct diag_buffer_desc - memory descriptor structure to
* store virtual, dma addresses, size, buffer status for host
* diagnostic buffers.
*
* @type: Buffer type
* @trigger_data: Trigger data
* @trigger_type: Trigger type
* @status: Buffer status
* @size: Buffer size
* @addr: Virtual address
* @dma_addr: Buffer DMA address
*/
struct diag_buffer_desc {
u8 type;
union mpi3mr_trigger_data trigger_data;
u8 trigger_type;
u8 status;
u32 size;
void *addr;
dma_addr_t dma_addr;
};
/**
* struct dma_memory_desc - memory descriptor structure to store
* virtual address, dma address and size for any generic dma
@ -1054,11 +1143,21 @@ struct scmd_priv {
* @sas_node_lock: Lock to protect SAS node list
* @hba_port_table_list: List of HBA Ports
* @enclosure_list: List of Enclosure objects
* @diag_buffers: Host diagnostic buffers
* @driver_pg2: Driver page 2 pointer
* @reply_trigger_present: Reply trigger present flag
* @event_trigger_present: Event trigger present flag
* @scsisense_trigger_present: Scsi sense trigger present flag
* @ioctl_dma_pool: DMA pool for IOCTL data buffers
* @ioctl_sge: DMA buffer descriptors for IOCTL data
* @ioctl_chain_sge: DMA buffer descriptor for IOCTL chain
* @ioctl_resp_sge: DMA buffer descriptor for Mgmt cmd response
* @ioctl_sges_allocated: Flag for IOCTL SGEs allocated or not
* @trace_release_trigger_active: Trace trigger active flag
* @fw_release_trigger_active: Fw release trigger active flag
* @snapdump_trigger_active: Snapdump trigger active flag
* @pci_err_recovery: PCI error recovery in progress
* @block_on_pci_err: Block IO during PCI error recovery
*/
struct mpi3mr_ioc {
struct list_head list;
@ -1250,6 +1349,17 @@ struct mpi3mr_ioc {
struct dma_memory_desc ioctl_chain_sge;
struct dma_memory_desc ioctl_resp_sge;
bool ioctl_sges_allocated;
bool reply_trigger_present;
bool event_trigger_present;
bool scsisense_trigger_present;
struct diag_buffer_desc diag_buffers[MPI3MR_MAX_NUM_HDB];
struct mpi3_driver_page2 *driver_pg2;
spinlock_t trigger_lock;
bool snapdump_trigger_active;
bool trace_release_trigger_active;
bool fw_release_trigger_active;
bool pci_err_recovery;
bool block_on_pci_err;
};
/**
@ -1406,6 +1516,8 @@ int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
struct mpi3_driver_page1 *driver_pg1, u16 pg_sz);
int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_type);
u8 mpi3mr_is_expander_device(u16 device_info);
int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle);
@ -1439,4 +1551,28 @@ void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
struct mpi3mr_sas_node *sas_expander);
void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc);
int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc);
int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc,
struct diag_buffer_desc *diag_buffer);
void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action);
void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb,
u8 type, union mpi3mr_trigger_data *trigger_data, bool force);
int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_type);
struct diag_buffer_desc *mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc,
u8 buf_type);
int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
struct diag_buffer_desc *diag_buffer);
void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc,
u8 type, union mpi3mr_trigger_data *trigger_data, bool force);
void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 iocstatus,
u32 iocloginfo);
void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc,
struct trigger_event_data *event_data);
void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 senseky, u8 asc,
u8 ascq);
void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event);
void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data);
void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
struct mpi3_event_notification_reply *event_reply);
#endif /*MPI3MR_H_INCLUDED*/

File diff suppressed because it is too large Load diff

View file

@ -274,6 +274,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
case MPI3_EVENT_PREPARE_FOR_RESET:
desc = "Prepare For Reset";
break;
case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
desc = "Diagnostic Buffer Status Change";
break;
}
if (!desc)
@ -342,13 +345,14 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
{
u16 reply_desc_type, host_tag = 0;
u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
u32 ioc_loginfo = 0;
u32 ioc_loginfo = 0, sense_count = 0;
struct mpi3_status_reply_descriptor *status_desc;
struct mpi3_address_reply_descriptor *addr_desc;
struct mpi3_success_reply_descriptor *success_desc;
struct mpi3_default_reply *def_reply = NULL;
struct mpi3mr_drv_cmd *cmdptr = NULL;
struct mpi3_scsi_io_reply *scsi_reply;
struct scsi_sense_hdr sshdr;
u8 *sense_buf = NULL;
*reply_dma = 0;
@ -363,6 +367,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
@ -380,7 +385,15 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
le64_to_cpu(scsi_reply->sense_data_buffer_address));
sense_count = le32_to_cpu(scsi_reply->sense_count);
if (sense_buf) {
scsi_normalize_sense(sense_buf, sense_count,
&sshdr);
mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
sshdr.asc, sshdr.ascq);
}
}
mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
@ -595,7 +608,7 @@ int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
mrioc = (struct mpi3mr_ioc *)shost->hostdata;
if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
mrioc->unrecoverable))
mrioc->unrecoverable || mrioc->pci_err_recovery))
return 0;
num_entries = mpi3mr_process_op_reply_q(mrioc,
@ -938,6 +951,14 @@ static const struct {
},
{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
{
MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
"diagnostic buffer post timeout"
},
{
MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
"diagnostic buffer release timeout"
},
{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
@ -1672,6 +1693,12 @@ int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
retval = -EAGAIN;
goto out;
}
if (mrioc->pci_err_recovery) {
ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
retval = -EAGAIN;
goto out;
}
areq_entry = (u8 *)mrioc->admin_req_base +
(areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
@ -2342,6 +2369,11 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
retval = -EAGAIN;
goto out;
}
if (mrioc->pci_err_recovery) {
ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
retval = -EAGAIN;
goto out;
}
segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
req_entry = (u8 *)segment_base_addr +
@ -2387,6 +2419,7 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
{
u32 ioc_status, host_diagnostic, timeout;
union mpi3mr_trigger_data trigger_data;
if (mrioc->unrecoverable) {
ioc_err(mrioc, "controller is unrecoverable\n");
@ -2398,16 +2431,30 @@ void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
ioc_err(mrioc, "controller is not present\n");
return;
}
memset(&trigger_data, 0, sizeof(trigger_data));
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
mpi3mr_set_trigger_data_in_all_hdb(mrioc,
MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
return;
} else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
MPI3_SYSIF_FAULT_CODE_MASK);
mpi3mr_set_trigger_data_in_all_hdb(mrioc,
MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
mpi3mr_print_fault_info(mrioc);
return;
}
mpi3mr_set_diagsave(mrioc);
mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
reason_code);
trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
MPI3_SYSIF_FAULT_CODE_MASK);
mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
&trigger_data, 0);
timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
do {
host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
@ -2587,10 +2634,11 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
container_of(work, struct mpi3mr_ioc, watchdog_work.work);
unsigned long flags;
enum mpi3mr_iocstate ioc_state;
u32 fault, host_diagnostic, ioc_status;
u32 host_diagnostic, ioc_status;
union mpi3mr_trigger_data trigger_data;
u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
if (mrioc->reset_in_progress)
if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
return;
if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
@ -2618,8 +2666,11 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return;
}
memset(&trigger_data, 0, sizeof(trigger_data));
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
mpi3mr_set_trigger_data_in_all_hdb(mrioc,
MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
return;
}
@ -2629,7 +2680,9 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
if (ioc_state != MRIOC_STATE_FAULT)
goto schedule_work;
fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
mpi3mr_set_trigger_data_in_all_hdb(mrioc,
MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
if (!mrioc->diagsave_timeout) {
@ -2643,7 +2696,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
mpi3mr_print_fault_info(mrioc);
mrioc->diagsave_timeout = 0;
switch (fault) {
switch (trigger_data.fault) {
case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
ioc_warn(mrioc,
@ -3003,7 +3056,11 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
mrioc->facts.shutdown_timeout =
le16_to_cpu(facts_data->shutdown_timeout);
mrioc->facts.diag_trace_sz =
le32_to_cpu(facts_data->diag_trace_size);
mrioc->facts.diag_fw_sz =
le32_to_cpu(facts_data->diag_fw_size);
mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
mrioc->facts.max_dev_per_tg =
facts_data->max_devices_per_throttle_group;
mrioc->facts.io_throttle_data_length =
@ -3681,6 +3738,94 @@ static const struct {
{ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
};
/**
* mpi3mr_repost_diag_bufs - repost host diag buffers
* @mrioc: Adapter instance reference
*
* repost firmware and trace diag buffers based on global
* trigger flag from driver page 2
*
* Return: 0 on success, non-zero on failures.
*/
static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
{
u64 global_trigger;
union mpi3mr_trigger_data prev_trigger_data;
struct diag_buffer_desc *trace_hdb = NULL;
struct diag_buffer_desc *fw_hdb = NULL;
int retval = 0;
bool trace_repost_needed = false;
bool fw_repost_needed = false;
u8 prev_trigger_type;
retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
if (retval)
return -1;
trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
MPI3_DIAG_BUFFER_TYPE_TRACE);
if (trace_hdb &&
trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
trace_repost_needed = true;
fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
fw_repost_needed = true;
if (trace_repost_needed || fw_repost_needed) {
global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
if (global_trigger &
MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
trace_repost_needed = false;
if (global_trigger &
MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
fw_repost_needed = false;
}
if (trace_repost_needed) {
prev_trigger_type = trace_hdb->trigger_type;
memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
sizeof(trace_hdb->trigger_data));
retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
if (!retval) {
dprint_init(mrioc, "trace diag buffer reposted");
mpi3mr_set_trigger_data_in_hdb(trace_hdb,
MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
} else {
trace_hdb->trigger_type = prev_trigger_type;
memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
sizeof(prev_trigger_data));
ioc_err(mrioc, "trace diag buffer repost failed");
return -1;
}
}
if (fw_repost_needed) {
prev_trigger_type = fw_hdb->trigger_type;
memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
sizeof(fw_hdb->trigger_data));
retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
if (!retval) {
dprint_init(mrioc, "firmware diag buffer reposted");
mpi3mr_set_trigger_data_in_hdb(fw_hdb,
MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
} else {
fw_hdb->trigger_type = prev_trigger_type;
memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
sizeof(prev_trigger_data));
ioc_err(mrioc, "firmware diag buffer repost failed");
return -1;
}
}
return retval;
}
/**
* mpi3mr_print_ioc_info - Display controller information
* @mrioc: Adapter instance reference
@ -3898,6 +4043,7 @@ static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
retval = mpi3mr_issue_event_notification(mrioc);
if (retval)
@ -3989,9 +4135,18 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
}
}
dprint_init(mrioc, "allocating host diag buffers\n");
mpi3mr_alloc_diag_bufs(mrioc);
dprint_init(mrioc, "allocating ioctl dma buffers\n");
mpi3mr_alloc_ioctl_dma_memory(mrioc);
dprint_init(mrioc, "posting host diag buffers\n");
retval = mpi3mr_post_diag_bufs(mrioc);
if (retval)
ioc_warn(mrioc, "failed to post host diag buffers\n");
if (!mrioc->init_cmds.reply) {
retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
if (retval) {
@ -4067,6 +4222,12 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
goto out_failed;
}
retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
if (retval) {
ioc_err(mrioc, "failed to refresh triggers\n");
goto out_failed;
}
ioc_info(mrioc, "controller initialization completed successfully\n");
return retval;
out_failed:
@ -4118,7 +4279,7 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
goto out_failed_noretry;
}
if (is_resume) {
if (is_resume || mrioc->block_on_pci_err) {
dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
if (retval) {
@ -4144,6 +4305,17 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
mpi3mr_print_ioc_info(mrioc);
if (is_resume) {
dprint_reset(mrioc, "posting host diag buffers\n");
retval = mpi3mr_post_diag_bufs(mrioc);
if (retval)
ioc_warn(mrioc, "failed to post host diag buffers\n");
} else {
retval = mpi3mr_repost_diag_bufs(mrioc);
if (retval)
ioc_warn(mrioc, "failed to re post host diag buffers\n");
}
dprint_reset(mrioc, "sending ioc_init\n");
retval = mpi3mr_issue_iocinit(mrioc);
if (retval) {
@ -4158,7 +4330,7 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
goto out_failed;
}
if (is_resume) {
if (is_resume || mrioc->block_on_pci_err) {
dprint_reset(mrioc, "setting up multiple ISR\n");
retval = mpi3mr_setup_isr(mrioc, 0);
if (retval) {
@ -4409,6 +4581,7 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
u16 i;
struct mpi3mr_intr_info *intr_info;
struct diag_buffer_desc *diag_buffer;
mpi3mr_free_enclosure_list(mrioc);
mpi3mr_free_ioctl_dma_memory(mrioc);
@ -4543,6 +4716,19 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
mrioc->pel_seqnum_virt = NULL;
}
for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
diag_buffer = &mrioc->diag_buffers[i];
if (diag_buffer->addr) {
dma_free_coherent(&mrioc->pdev->dev,
diag_buffer->size, diag_buffer->addr,
diag_buffer->dma_addr);
diag_buffer->addr = NULL;
diag_buffer->size = 0;
diag_buffer->type = 0;
diag_buffer->status = 0;
}
}
kfree(mrioc->throttle_groups);
mrioc->throttle_groups = NULL;
@ -4632,7 +4818,8 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
ioc_state = mpi3mr_get_iocstate(mrioc);
if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
!mrioc->pci_err_recovery &&
(ioc_state == MRIOC_STATE_READY)) {
if (mpi3mr_issue_and_process_mur(mrioc,
MPI3MR_RESET_FROM_CTLR_CLEANUP))
@ -4980,6 +5167,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
int retval = 0, i;
unsigned long flags;
u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
union mpi3mr_trigger_data trigger_data;
/* Block the reset handler until diag save in progress*/
dprint_reset(mrioc,
@ -5012,10 +5200,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mrioc->reset_in_progress = 1;
mrioc->stop_bsgs = 1;
mrioc->prev_reset_result = -1;
memset(&trigger_data, 0, sizeof(trigger_data));
if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
(reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
(reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
mpi3mr_set_trigger_data_in_all_hdb(mrioc,
MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
dprint_reset(mrioc,
"soft_reset_handler: releasing host diagnostic buffers\n");
mpi3mr_release_diag_bufs(mrioc, 0);
for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mrioc->event_masks[i] = -1;
@ -5032,6 +5226,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
retval = mpi3mr_issue_reset(mrioc,
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
if (!retval) {
trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
MPI3_SYSIF_FAULT_CODE_MASK);
do {
host_diagnostic =
readl(&mrioc->sysif_regs->host_diagnostic);
@ -5040,6 +5236,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
break;
msleep(100);
} while (--timeout);
mpi3mr_set_trigger_data_in_all_hdb(mrioc,
MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
}
}
@ -5075,6 +5273,15 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mrioc->prepare_for_reset_timeout_counter = 0;
}
mpi3mr_memset_buffers(mrioc);
mpi3mr_release_diag_bufs(mrioc, 1);
mrioc->fw_release_trigger_active = false;
mrioc->trace_release_trigger_active = false;
mrioc->snapdump_trigger_active = false;
mpi3mr_set_trigger_data_in_all_hdb(mrioc,
MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
dprint_reset(mrioc,
"soft_reset_handler: reinitializing the controller\n");
retval = mpi3mr_reinit_ioc(mrioc, 0);
if (retval) {
pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
@ -5954,3 +6161,64 @@ int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
out_failed:
return -1;
}
/**
* mpi3mr_cfg_get_driver_pg2 - Read current driver page2
* @mrioc: Adapter instance reference
* @driver_pg2: Pointer to return driver page 2
* @pg_sz: Size of the memory allocated to the page pointer
* @page_action: Page action
*
* This is handler for config page read for the driver page2.
* This routine checks ioc_status to decide whether the page
* read is success or not.
*
* Return: 0 on success, non-zero on failure.
*/
int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
{
struct mpi3_config_page_header cfg_hdr;
struct mpi3_config_request cfg_req;
u16 ioc_status = 0;
memset(driver_pg2, 0, pg_sz);
memset(&cfg_hdr, 0, sizeof(cfg_hdr));
memset(&cfg_req, 0, sizeof(cfg_req));
cfg_req.function = MPI3_FUNCTION_CONFIG;
cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
cfg_req.page_number = 2;
cfg_req.page_address = 0;
cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
ioc_err(mrioc, "driver page2 header read failed\n");
goto out_failed;
}
if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
ioc_err(mrioc, "driver page2 header read failed with\n"
"ioc_status(0x%04x)\n",
ioc_status);
goto out_failed;
}
cfg_req.action = page_action;
if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
ioc_err(mrioc, "driver page2 read failed\n");
goto out_failed;
}
if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
ioc_err(mrioc, "driver page2 read failed with\n"
"ioc_status(0x%04x)\n",
ioc_status);
goto out_failed;
}
return 0;
out_failed:
return -1;
}

View file

@ -241,6 +241,40 @@ static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
}
/**
* mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to
* the list
* @mrioc: Adapter instance reference
* @event_data: Event data
*
* Add the given hdb trigger data event to the firmware event
* list.
*
* Return: Nothing.
*/
void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc,
struct trigger_event_data *event_data)
{
struct mpi3mr_fwevt *fwevt;
u16 sz = sizeof(*event_data);
fwevt = mpi3mr_alloc_fwevt(sz);
if (!fwevt) {
ioc_warn(mrioc, "failed to queue hdb trigger data event\n");
return;
}
fwevt->mrioc = mrioc;
fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER;
fwevt->send_ack = 0;
fwevt->process_evt = 1;
fwevt->evt_ctx = 0;
fwevt->event_data_size = sz;
memcpy(fwevt->event_data, event_data, sz);
mpi3mr_fwevt_add_to_list(mrioc, fwevt);
}
/**
* mpi3mr_fwevt_del_from_list - Delete firmware event from list
* @mrioc: Adapter instance reference
@ -898,6 +932,8 @@ void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
}
} else
mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
mpi3mr_global_trigger(mrioc,
MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED);
ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
__func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
@ -920,7 +956,7 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
int retval = 0;
struct mpi3mr_tgt_dev *tgtdev;
if (mrioc->reset_in_progress)
if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
return -1;
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
@ -1433,6 +1469,62 @@ struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
return r;
}
/**
* mpi3mr_process_trigger_data_event_bh - Process trigger event
* data
* @mrioc: Adapter instance reference
* @event_data: Event data
*
* This function releases diage buffers or issues diag fault
* based on trigger conditions
*
* Return: Nothing
*/
static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc,
struct trigger_event_data *event_data)
{
struct diag_buffer_desc *trace_hdb = event_data->trace_hdb;
struct diag_buffer_desc *fw_hdb = event_data->fw_hdb;
unsigned long flags;
int retval = 0;
u8 trigger_type = event_data->trigger_type;
union mpi3mr_trigger_data *trigger_data =
&event_data->trigger_specific_data;
if (event_data->snapdump) {
if (trace_hdb)
mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type,
trigger_data, 1);
if (fw_hdb)
mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type,
trigger_data, 1);
mpi3mr_soft_reset_handler(mrioc,
MPI3MR_RESET_FROM_TRIGGER, 1);
return;
}
if (trace_hdb) {
retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb);
if (!retval) {
mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type,
trigger_data, 1);
}
spin_lock_irqsave(&mrioc->trigger_lock, flags);
mrioc->trace_release_trigger_active = false;
spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
}
if (fw_hdb) {
retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb);
if (!retval) {
mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type,
trigger_data, 1);
}
spin_lock_irqsave(&mrioc->trigger_lock, flags);
mrioc->fw_release_trigger_active = false;
spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
}
}
/**
* mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
* @mrioc: Adapter instance reference
@ -1915,6 +2007,7 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
struct mpi3_device_page0 *dev_pg0 = NULL;
u16 perst_id, handle, dev_info;
struct mpi3_device0_sas_sata_format *sasinf = NULL;
unsigned int timeout;
mpi3mr_fwevt_del_from_list(mrioc, fwevt);
mrioc->current_event = fwevt;
@ -2005,8 +2098,18 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
}
case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
{
while (mrioc->device_refresh_on)
timeout = MPI3MR_RESET_TIMEOUT * 2;
while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) &&
!mrioc->unrecoverable && !mrioc->pci_err_recovery) {
msleep(500);
if (!timeout--) {
mrioc->unrecoverable = 1;
break;
}
}
if (mrioc->unrecoverable || mrioc->pci_err_recovery)
break;
dprint_event_bh(mrioc,
"scan for non responding and newly added devices after soft reset started\n");
@ -2019,6 +2122,12 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
"scan for non responding and newly added devices after soft reset completed\n");
break;
}
case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER:
{
mpi3mr_process_trigger_data_event_bh(mrioc,
(struct trigger_event_data *)fwevt->event_data);
break;
}
default:
break;
}
@ -2857,6 +2966,7 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
ack_req = 1;
evt_type = event_reply->event;
mpi3mr_event_trigger(mrioc, event_reply->event);
switch (evt_type) {
case MPI3_EVENT_DEVICE_ADDED:
@ -2895,6 +3005,11 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
ack_req = 0;
break;
}
case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
{
mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply);
break;
}
case MPI3_EVENT_DEVICE_INFO_CHANGED:
case MPI3_EVENT_LOG_DATA:
case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
@ -3158,6 +3273,7 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
@ -3186,6 +3302,12 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
panic("%s: Ran out of sense buffers\n", mrioc->name);
if (sense_buf) {
scsi_normalize_sense(sense_buf, sense_count, &sshdr);
mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
sshdr.asc, sshdr.ascq);
}
mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
@ -3685,6 +3807,13 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
mutex_unlock(&drv_cmd->mutex);
goto out;
}
if (mrioc->block_on_pci_err) {
retval = -1;
dprint_tm(mrioc, "sending task management failed due to\n"
"pci error recovery in progress\n");
mutex_unlock(&drv_cmd->mutex);
goto out;
}
drv_cmd->state = MPI3MR_CMD_PENDING;
drv_cmd->is_waiting = 1;
@ -3811,6 +3940,8 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
default:
break;
}
mpi3mr_global_trigger(mrioc,
MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED);
out_unlock:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
@ -4068,6 +4199,7 @@ static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd)
struct mpi3mr_sdev_priv_data *sdev_priv_data;
u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
int retval = FAILED;
unsigned int timeout = MPI3MR_RESET_TIMEOUT;
sdev_priv_data = scmd->device->hostdata;
if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
@ -4078,12 +4210,24 @@ static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd)
if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
mpi3mr_wait_for_host_io(mrioc,
MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
if (!mpi3mr_get_fw_pending_ios(mrioc))
if (!mpi3mr_get_fw_pending_ios(mrioc)) {
while (mrioc->reset_in_progress ||
mrioc->prepare_for_reset ||
mrioc->block_on_pci_err) {
ssleep(1);
if (!timeout--) {
retval = FAILED;
goto out;
}
}
retval = SUCCESS;
goto out;
}
}
if (retval == FAILED)
mpi3mr_print_pending_host_io(mrioc);
out:
sdev_printk(KERN_INFO, scmd->device,
"Bus reset is %s for scmd(%p)\n",
((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@ -4766,7 +4910,8 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
if (mrioc->reset_in_progress) {
if (mrioc->reset_in_progress || mrioc->prepare_for_reset
|| mrioc->block_on_pci_err) {
retval = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@ -5249,7 +5394,14 @@ static void mpi3mr_remove(struct pci_dev *pdev)
while (mrioc->reset_in_progress || mrioc->is_driver_loading)
ssleep(1);
if (!pci_device_is_present(mrioc->pdev)) {
if (mrioc->block_on_pci_err) {
mrioc->block_on_pci_err = false;
scsi_unblock_requests(shost);
mrioc->unrecoverable = 1;
}
if (!pci_device_is_present(mrioc->pdev) ||
mrioc->pci_err_recovery) {
mrioc->unrecoverable = 1;
mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
}
@ -5433,6 +5585,197 @@ mpi3mr_resume(struct device *dev)
return 0;
}
/**
* mpi3mr_pcierr_error_detected - PCI error detected callback
* @pdev: PCI device instance
* @state: channel state
*
* This function is called by the PCI error recovery driver and
* based on the state passed the driver decides what actions to
* be recommended back to PCI driver.
*
* For all of the states if there is no valid mrioc or scsi host
* references in the PCI device then this function will return
* the result as disconnect.
*
* For normal state, this function will return the result as can
* recover.
*
* For frozen state, this function will block for any pending
* controller initialization or re-initialization to complete,
* stop any new interactions with the controller and return
* status as reset required.
*
* For permanent failure state, this function will mark the
* controller as unrecoverable and return status as disconnect.
*
* Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or
* DISCONNECT based on the controller state.
*/
static pci_ers_result_t
mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
unsigned int timeout = MPI3MR_RESET_TIMEOUT;
dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__,
state);
shost = pci_get_drvdata(pdev);
mrioc = shost_priv(shost);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
mrioc->pci_err_recovery = true;
mrioc->block_on_pci_err = true;
do {
if (mrioc->reset_in_progress || mrioc->is_driver_loading)
ssleep(1);
else
break;
} while (--timeout);
if (!timeout) {
mrioc->pci_err_recovery = true;
mrioc->block_on_pci_err = true;
mrioc->unrecoverable = 1;
mpi3mr_stop_watchdog(mrioc);
mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
return PCI_ERS_RESULT_DISCONNECT;
}
scsi_block_requests(mrioc->shost);
mpi3mr_stop_watchdog(mrioc);
mpi3mr_cleanup_resources(mrioc);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
mrioc->pci_err_recovery = true;
mrioc->block_on_pci_err = true;
mrioc->unrecoverable = 1;
mpi3mr_stop_watchdog(mrioc);
mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
return PCI_ERS_RESULT_DISCONNECT;
default:
return PCI_ERS_RESULT_DISCONNECT;
}
}
/**
* mpi3mr_pcierr_slot_reset - Post slot reset callback
* @pdev: PCI device instance
*
* This function is called by the PCI error recovery driver
* after a slot or link reset issued by it for the recovery, the
* driver is expected to bring back the controller and
* initialize it.
*
* This function restores PCI state and reinitializes controller
* resources and the controller, this blocks for any pending
* reset to complete.
*
* Returns: PCI_ERS_RESULT_DISCONNECT on failure or
* PCI_ERS_RESULT_RECOVERED
*/
static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
unsigned int timeout = MPI3MR_RESET_TIMEOUT;
dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
shost = pci_get_drvdata(pdev);
mrioc = shost_priv(shost);
do {
if (mrioc->reset_in_progress)
ssleep(1);
else
break;
} while (--timeout);
if (!timeout)
goto out_failed;
pci_restore_state(pdev);
if (mpi3mr_setup_resources(mrioc)) {
ioc_err(mrioc, "setup resources failed\n");
goto out_failed;
}
mrioc->unrecoverable = 0;
mrioc->pci_err_recovery = false;
if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0))
goto out_failed;
return PCI_ERS_RESULT_RECOVERED;
out_failed:
mrioc->unrecoverable = 1;
mrioc->block_on_pci_err = false;
scsi_unblock_requests(shost);
mpi3mr_start_watchdog(mrioc);
return PCI_ERS_RESULT_DISCONNECT;
}
/**
* mpi3mr_pcierr_resume - PCI error recovery resume
* callback
* @pdev: PCI device instance
*
* This function enables all I/O and IOCTLs post reset issued as
* part of the PCI error recovery
*
* Return: Nothing.
*/
static void mpi3mr_pcierr_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
shost = pci_get_drvdata(pdev);
mrioc = shost_priv(shost);
if (mrioc->block_on_pci_err) {
mrioc->block_on_pci_err = false;
scsi_unblock_requests(shost);
mpi3mr_start_watchdog(mrioc);
}
}
/**
* mpi3mr_pcierr_mmio_enabled - PCI error recovery callback
* @pdev: PCI device instance
*
* This is called only if mpi3mr_pcierr_error_detected returns
* PCI_ERS_RESULT_CAN_RECOVER.
*
* Return: PCI_ERS_RESULT_DISCONNECT when the controller is
* unrecoverable or when the shost/mrioc reference cannot be
* found, else return PCI_ERS_RESULT_RECOVERED
*/
static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
shost = pci_get_drvdata(pdev);
mrioc = shost_priv(shost);
if (mrioc->unrecoverable)
return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_RECOVERED;
}
static const struct pci_device_id mpi3mr_pci_id_table[] = {
{
PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
@ -5450,6 +5793,13 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
static struct pci_error_handlers mpi3mr_err_handler = {
.error_detected = mpi3mr_pcierr_error_detected,
.mmio_enabled = mpi3mr_pcierr_mmio_enabled,
.slot_reset = mpi3mr_pcierr_slot_reset,
.resume = mpi3mr_pcierr_resume,
};
static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
static struct pci_driver mpi3mr_pci_driver = {
@ -5458,6 +5808,7 @@ static struct pci_driver mpi3mr_pci_driver = {
.probe = mpi3mr_probe,
.remove = mpi3mr_remove,
.shutdown = mpi3mr_shutdown,
.err_handler = &mpi3mr_err_handler,
.driver.pm = &mpi3mr_pm_ops,
};

View file

@ -151,6 +151,11 @@ static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc,
return -EFAULT;
}
if (mrioc->pci_err_recovery) {
ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__);
return -EFAULT;
}
data_out_sz = sizeof(struct rep_manu_request);
data_in_sz = sizeof(struct rep_manu_reply);
data_out = dma_alloc_coherent(&mrioc->pdev->dev,
@ -790,6 +795,12 @@ static int mpi3mr_set_identify(struct mpi3mr_ioc *mrioc, u16 handle,
return -EFAULT;
}
if (mrioc->pci_err_recovery) {
ioc_err(mrioc, "%s: pci error recovery in progress!\n",
__func__);
return -EFAULT;
}
if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0,
sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) {
ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
@ -1007,6 +1018,9 @@ mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id)
hba_port->port_id = port_id;
ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n",
hba_port, hba_port->port_id);
if (mrioc->reset_in_progress ||
mrioc->pci_err_recovery)
hba_port->flags = MPI3MR_HBA_PORT_FLAG_NEW;
list_add_tail(&hba_port->list, &mrioc->hba_port_table_list);
return hba_port;
}
@ -1055,7 +1069,7 @@ void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
struct mpi3mr_sas_node *mr_sas_node;
struct mpi3mr_sas_phy *mr_sas_phy;
if (mrioc->reset_in_progress)
if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
return;
spin_lock_irqsave(&mrioc->sas_node_lock, flags);
@ -1353,7 +1367,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
mr_sas_port->remote_identify.sas_address, hba_port);
if (mr_sas_node->num_phys > sizeof(mr_sas_port->phy_mask) * 8)
if (mr_sas_node->num_phys >= sizeof(mr_sas_port->phy_mask) * 8)
ioc_info(mrioc, "max port count %u could be too high\n",
mr_sas_node->num_phys);
@ -1363,7 +1377,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
(mr_sas_node->phy[i].hba_port != hba_port))
continue;
if (i > sizeof(mr_sas_port->phy_mask) * 8) {
if (i >= sizeof(mr_sas_port->phy_mask) * 8) {
ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n",
i, sizeof(mr_sas_port->phy_mask) * 8);
goto out_fail;
@ -1978,7 +1992,7 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
if (!handle)
return -1;
if (mrioc->reset_in_progress)
if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
return -1;
if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
@ -2184,7 +2198,7 @@ void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mr_sas_port, next,
&sas_expander->sas_port_list, port_list) {
if (mrioc->reset_in_progress)
if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
return;
if (mr_sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
@ -2234,7 +2248,7 @@ void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
struct mpi3mr_sas_node *sas_expander;
unsigned long flags;
if (mrioc->reset_in_progress)
if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
return;
if (!hba_port)
@ -2545,6 +2559,11 @@ static int mpi3mr_get_expander_phy_error_log(struct mpi3mr_ioc *mrioc,
return -EFAULT;
}
if (mrioc->pci_err_recovery) {
ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__);
return -EFAULT;
}
data_out_sz = sizeof(struct phy_error_log_request);
data_in_sz = sizeof(struct phy_error_log_reply);
sz = data_out_sz + data_in_sz;
@ -2804,6 +2823,12 @@ mpi3mr_expander_phy_control(struct mpi3mr_ioc *mrioc,
return -EFAULT;
}
if (mrioc->pci_err_recovery) {
ioc_err(mrioc, "%s: pci error recovery in progress!\n",
__func__);
return -EFAULT;
}
data_out_sz = sizeof(struct phy_control_request);
data_in_sz = sizeof(struct phy_control_reply);
sz = data_out_sz + data_in_sz;
@ -3227,6 +3252,12 @@ mpi3mr_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto out;
}
if (mrioc->pci_err_recovery) {
ioc_err(mrioc, "%s: pci error recovery in progress!\n", __func__);
rc = -EFAULT;
goto out;
}
rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
&dma_addr_out, &dma_len_out, &addr_out);
if (rc)

View file

@ -75,6 +75,7 @@ module_param(synchronous, int, 0);
module_param(reset_delay, int, 0);
module_param(ext_trans, int, 0);
MODULE_DESCRIPTION("Adaptec AHA152X-compatible PCMCIA SCSI card driver");
MODULE_LICENSE("Dual MPL/GPL");
/*====================================================================*/

View file

@ -166,7 +166,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
unsigned long flags;
pm8001_ha = sas_phy->ha->lldd_ha;
phy = &pm8001_ha->phy[phy_id];
pm8001_ha->phy[phy_id].enable_completion = &completion;
if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
/*
@ -190,6 +189,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
rates->maximum_linkrate;
}
if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
pm8001_ha->phy[phy_id].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@ -198,6 +198,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_HARD_RESET:
if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
pm8001_ha->phy[phy_id].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@ -206,6 +207,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_LINK_RESET:
if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
pm8001_ha->phy[phy_id].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}

View file

@ -568,13 +568,13 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
pm8001_dbg(pm8001_ha, DEV,
pm8001_dbg(pm8001_ha, INIT,
"Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev);
pm8001_dbg(pm8001_ha, DEV,
pm8001_dbg(pm8001_ha, INIT,
"table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
@ -582,7 +582,7 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset);
pm8001_dbg(pm8001_ha, DEV,
pm8001_dbg(pm8001_ha, INIT,
"Main cfg table; ila rev:%x Inactive fw rev:%x\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version);

View file

@ -1155,4 +1155,5 @@ static struct parport_driver ppa_driver = {
};
module_parport_driver(ppa_driver);
MODULE_DESCRIPTION("IOMEGA PPA3 parallel port SCSI host adapter driver");
MODULE_LICENSE("GPL");

View file

@ -324,7 +324,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
"request_sg_cnt=%x reply_sg_cnt=%x.\n",
bsg_job->request_payload.sg_cnt,
bsg_job->reply_payload.sg_cnt);
rval = -EPERM;
rval = -ENOBUFS;
goto done;
}
@ -3059,16 +3059,60 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
return ret;
}
static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
{
bool found = false;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
struct qla_hw_data *ha = vha->hw;
srb_t *sp = NULL;
int cnt;
unsigned long flags;
struct req_que *req;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
req = qpair->req;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp &&
(sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST ||
sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
sp->u.bsg_job == bsg_job) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command failed.\n");
bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command success.\n");
bsg_reply->result = 0;
}
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
found = true;
goto done;
}
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
done:
return found;
}
int
qla24xx_bsg_timeout(struct bsg_job *bsg_job)
{
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
int cnt, que;
unsigned long flags;
struct req_que *req;
int i;
struct qla_qpair *qpair;
ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
__func__, bsg_job);
@ -3079,48 +3123,22 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
qla_pci_set_eeh_busy(vha);
}
if (qla_bsg_found(ha->base_qpair, bsg_job))
goto done;
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
if (!req)
for (i = 0; i < ha->max_qpairs; i++) {
qpair = vha->hw->queue_pair_map[i];
if (!qpair)
continue;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp &&
(sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST ||
sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
sp->type == SRB_FXIOCB_BCMD) &&
sp->u.bsg_job == bsg_job) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command failed.\n");
bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command success.\n");
bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
goto done;
}
}
if (qla_bsg_found(qpair, bsg_job))
goto done;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
bsg_reply->result = -ENXIO;
return 0;
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
return 0;
}

View file

@ -3309,9 +3309,20 @@ struct fab_scan_rp {
u8 node_name[8];
};
enum scan_step {
FAB_SCAN_START,
FAB_SCAN_GPNFT_FCP,
FAB_SCAN_GNNFT_FCP,
FAB_SCAN_GPNFT_NVME,
FAB_SCAN_GNNFT_NVME,
};
struct fab_scan {
struct fab_scan_rp *l;
u32 size;
u32 rscn_gen_start;
u32 rscn_gen_end;
enum scan_step step;
u16 scan_retry;
#define MAX_SCAN_RETRIES 5
enum scan_flags_t scan_flags;
@ -3537,9 +3548,8 @@ enum qla_work_type {
QLA_EVT_RELOGIN,
QLA_EVT_ASYNC_PRLO,
QLA_EVT_ASYNC_PRLO_DONE,
QLA_EVT_GPNFT,
QLA_EVT_GPNFT_DONE,
QLA_EVT_GNNFT_DONE,
QLA_EVT_SCAN_CMD,
QLA_EVT_SCAN_FINISH,
QLA_EVT_GFPNID,
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
@ -5030,6 +5040,7 @@ typedef struct scsi_qla_host {
/* Counter to detect races between ELS and RSCN events */
atomic_t generation_tick;
atomic_t rscn_gen;
/* Time when global fcport update has been scheduled */
int total_fcport_update_gen;
/* List of pending LOGOs, protected by tgt_mutex */

View file

@ -728,9 +728,9 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
int qla_fab_async_scan(scsi_qla_host_t *, srb_t *);
void qla_fab_scan_start(struct scsi_qla_host *);
void qla_fab_scan_finish(scsi_qla_host_t *, srb_t *);
int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);

View file

@ -1710,7 +1710,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
alen = scnprintf(
eiter->a.orom_version, sizeof(eiter->a.orom_version),
"%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
"%d.%02d", ha->efi_revision[1], ha->efi_revision[0]);
alen += FDMI_ATTR_ALIGNMENT(alen);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@ -3168,7 +3168,30 @@ static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
return rc;
}
void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
static bool qla_ok_to_clear_rscn(scsi_qla_host_t *vha, fc_port_t *fcport)
{
u32 rscn_gen;
rscn_gen = atomic_read(&vha->rscn_gen);
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2017,
"%s %d %8phC rscn_gen %x start %x end %x current %x\n",
__func__, __LINE__, fcport->port_name, fcport->rscn_gen,
vha->scan.rscn_gen_start, vha->scan.rscn_gen_end, rscn_gen);
if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_start,
vha->scan.rscn_gen_end))
/* rscn came in before fabric scan */
return true;
if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_end, rscn_gen))
/* rscn came in after fabric scan */
return false;
/* rare: fcport's scan_needed + rscn_gen must be stale */
return true;
}
void qla_fab_scan_finish(scsi_qla_host_t *vha, srb_t *sp)
{
fc_port_t *fcport;
u32 i, rc;
@ -3281,10 +3304,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
(fcport->scan_needed &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_NVME_INITIATOR)) {
fcport->scan_needed = 0;
qlt_schedule_sess_for_deletion(fcport);
}
fcport->d_id.b24 = rp->id.b24;
fcport->scan_needed = 0;
break;
}
@ -3325,7 +3348,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
do_delete = true;
}
fcport->scan_needed = 0;
if (qla_ok_to_clear_rscn(vha, fcport))
fcport->scan_needed = 0;
if (((qla_dual_mode_enabled(vha) ||
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) ||
@ -3355,7 +3380,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
fcport->port_name, fcport->loop_id,
fcport->login_retry);
}
fcport->scan_needed = 0;
if (qla_ok_to_clear_rscn(vha, fcport))
fcport->scan_needed = 0;
qla24xx_fcport_handle_login(vha, fcport);
}
}
@ -3379,14 +3406,11 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
}
}
static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
static int qla2x00_post_next_scan_work(struct scsi_qla_host *vha,
srb_t *sp, int cmd)
{
struct qla_work_evt *e;
if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
return QLA_PARAMETER_ERROR;
e = qla2x00_alloc_work(vha, cmd);
if (!e)
return QLA_FUNCTION_FAILED;
@ -3396,37 +3420,15 @@ static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
return qla2x00_post_work(vha, e);
}
static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
srb_t *sp, int cmd)
{
struct qla_work_evt *e;
if (cmd != QLA_EVT_GPNFT)
return QLA_PARAMETER_ERROR;
e = qla2x00_alloc_work(vha, cmd);
if (!e)
return QLA_FUNCTION_FAILED;
e->u.gpnft.fc4_type = FC4_TYPE_NVME;
e->u.gpnft.sp = sp;
return qla2x00_post_work(vha, e);
}
static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
struct srb *sp)
{
struct qla_hw_data *ha = vha->hw;
int num_fibre_dev = ha->max_fibre_devices;
struct ct_sns_req *ct_req =
(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
struct ct_sns_gpnft_rsp *ct_rsp =
(struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
struct ct_sns_gpn_ft_data *d;
struct fab_scan_rp *rp;
u16 cmd = be16_to_cpu(ct_req->command);
u8 fc4_type = sp->gen2;
int i, j, k;
port_id_t id;
u8 found;
@ -3445,85 +3447,83 @@ static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
if (id.b24 == 0 || wwn == 0)
continue;
if (fc4_type == FC4_TYPE_FCP_SCSI) {
if (cmd == GPN_FT_CMD) {
rp = &vha->scan.l[j];
rp->id = id;
memcpy(rp->port_name, d->port_name, 8);
j++;
rp->fc4type = FS_FC4TYPE_FCP;
} else {
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2025,
"%s %06x %8ph \n",
__func__, id.b24, d->port_name);
switch (vha->scan.step) {
case FAB_SCAN_GPNFT_FCP:
rp = &vha->scan.l[j];
rp->id = id;
memcpy(rp->port_name, d->port_name, 8);
j++;
rp->fc4type = FS_FC4TYPE_FCP;
break;
case FAB_SCAN_GNNFT_FCP:
for (k = 0; k < num_fibre_dev; k++) {
rp = &vha->scan.l[k];
if (id.b24 == rp->id.b24) {
memcpy(rp->node_name,
d->port_name, 8);
break;
}
}
break;
case FAB_SCAN_GPNFT_NVME:
found = 0;
for (k = 0; k < num_fibre_dev; k++) {
rp = &vha->scan.l[k];
if (!memcmp(rp->port_name, d->port_name, 8)) {
/*
* Supports FC-NVMe & FCP
*/
rp->fc4type |= FS_FC4TYPE_NVME;
found = 1;
break;
}
}
/* We found new FC-NVMe only port */
if (!found) {
for (k = 0; k < num_fibre_dev; k++) {
rp = &vha->scan.l[k];
if (id.b24 == rp->id.b24) {
memcpy(rp->node_name,
d->port_name, 8);
if (wwn_to_u64(rp->port_name)) {
continue;
} else {
rp->id = id;
memcpy(rp->port_name, d->port_name, 8);
rp->fc4type = FS_FC4TYPE_NVME;
break;
}
}
}
} else {
/* Search if the fibre device supports FC4_TYPE_NVME */
if (cmd == GPN_FT_CMD) {
found = 0;
for (k = 0; k < num_fibre_dev; k++) {
rp = &vha->scan.l[k];
if (!memcmp(rp->port_name,
d->port_name, 8)) {
/*
* Supports FC-NVMe & FCP
*/
rp->fc4type |= FS_FC4TYPE_NVME;
found = 1;
break;
}
}
/* We found new FC-NVMe only port */
if (!found) {
for (k = 0; k < num_fibre_dev; k++) {
rp = &vha->scan.l[k];
if (wwn_to_u64(rp->port_name)) {
continue;
} else {
rp->id = id;
memcpy(rp->port_name,
d->port_name, 8);
rp->fc4type =
FS_FC4TYPE_NVME;
break;
}
}
}
} else {
for (k = 0; k < num_fibre_dev; k++) {
rp = &vha->scan.l[k];
if (id.b24 == rp->id.b24) {
memcpy(rp->node_name,
d->port_name, 8);
break;
}
break;
case FAB_SCAN_GNNFT_NVME:
for (k = 0; k < num_fibre_dev; k++) {
rp = &vha->scan.l[k];
if (id.b24 == rp->id.b24) {
memcpy(rp->node_name, d->port_name, 8);
break;
}
}
break;
default:
break;
}
}
}
static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
static void qla_async_scan_sp_done(srb_t *sp, int res)
{
struct scsi_qla_host *vha = sp->vha;
struct ct_sns_req *ct_req =
(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
u16 cmd = be16_to_cpu(ct_req->command);
u8 fc4_type = sp->gen2;
unsigned long flags;
int rc;
/* gen2 field is holding the fc4type */
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async done-%s res %x FC4Type %x\n",
sp->name, res, sp->gen2);
ql_dbg(ql_dbg_disc, vha, 0x2026,
"Async done-%s res %x step %x\n",
sp->name, res, vha->scan.step);
sp->rc = res;
if (res) {
@ -3547,8 +3547,7 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
* sp for GNNFT_DONE work. This will allow all
* the resource to get freed up.
*/
rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
QLA_EVT_GNNFT_DONE);
rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
if (rc) {
/* Cleanup here to prevent memory leak */
qla24xx_sp_unmap(vha, sp);
@ -3573,28 +3572,30 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
qla2x00_find_free_fcp_nvme_slot(vha, sp);
if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
cmd == GNN_FT_CMD) {
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
sp->rc = res;
rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
if (rc) {
qla24xx_sp_unmap(vha, sp);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
return;
}
switch (vha->scan.step) {
case FAB_SCAN_GPNFT_FCP:
case FAB_SCAN_GPNFT_NVME:
rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD);
break;
case FAB_SCAN_GNNFT_FCP:
if (vha->flags.nvme_enabled)
rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD);
else
rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
if (cmd == GPN_FT_CMD) {
rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
QLA_EVT_GPNFT_DONE);
} else {
rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
QLA_EVT_GNNFT_DONE);
break;
case FAB_SCAN_GNNFT_NVME:
rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH);
break;
default:
/* should not be here */
WARN_ON(1);
rc = QLA_FUNCTION_FAILED;
break;
}
if (rc) {
@ -3605,127 +3606,16 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
}
}
/*
* Get WWNN list for fc4_type
*
* It is assumed the same SRB is re-used from GPNFT to avoid
* mem free & re-alloc
*/
static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
u8 fc4_type)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
struct ct_sns_pkt *ct_sns;
unsigned long flags;
if (!vha->flags.online) {
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
}
if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xffff,
"%s: req %p rsp %p are not setup\n",
__func__, sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.rsp);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
WARN_ON(1);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
goto done_free_sp;
}
ql_dbg(ql_dbg_disc, vha, 0xfffff,
"%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
__func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
sp->u.iocb_cmd.u.ctarg.req_size);
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gnnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
qla2x00_async_gpnft_gnnft_sp_done);
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
sp->u.iocb_cmd.u.ctarg.rsp_size);
/* GPN_FT req */
ct_req->req.gpn_ft.port_type = fc4_type;
sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
sp->handle, ct_req->req.gpn_ft.port_type);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
goto done_free_sp;
}
return rval;
done_free_sp:
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
if (vha->scan.scan_flags == 0) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s: schedule\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
schedule_delayed_work(&vha->scan.scan_work, 5);
}
spin_unlock_irqrestore(&vha->work_lock, flags);
return rval;
} /* GNNFT */
void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
{
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
qla24xx_async_gnnft(vha, sp, sp->gen2);
}
/* Get WWPN list for certain fc4_type */
int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
int qla_fab_async_scan(scsi_qla_host_t *vha, srb_t *sp)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
struct ct_sns_pkt *ct_sns;
u32 rspsz;
u32 rspsz = 0;
unsigned long flags;
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x200c,
"%s enter\n", __func__);
if (!vha->flags.online)
@ -3734,22 +3624,21 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags & SF_SCANNING) {
spin_unlock_irqrestore(&vha->work_lock, flags);
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2012,
"%s: scan active\n", __func__);
return rval;
}
vha->scan.scan_flags |= SF_SCANNING;
if (!sp)
vha->scan.step = FAB_SCAN_START;
spin_unlock_irqrestore(&vha->work_lock, flags);
if (fc4_type == FC4_TYPE_FCP_SCSI) {
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
switch (vha->scan.step) {
case FAB_SCAN_START:
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2018,
"%s: Performing FCP Scan\n", __func__);
if (sp) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
/* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp) {
@ -3765,7 +3654,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
GFP_KERNEL);
sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xffff,
ql_log(ql_log_warn, vha, 0x201a,
"Failed to allocate ct_sns request.\n");
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
@ -3773,7 +3662,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
qla2x00_rel_sp(sp);
return rval;
}
sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
rspsz = sizeof(struct ct_sns_gpnft_rsp) +
vha->hw->max_fibre_devices *
@ -3785,7 +3673,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
GFP_KERNEL);
sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xffff,
ql_log(ql_log_warn, vha, 0x201b,
"Failed to allocate ct_sns request.\n");
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
@ -3805,35 +3693,95 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
"%s scan list size %d\n", __func__, vha->scan.size);
memset(vha->scan.l, 0, vha->scan.size);
} else if (!sp) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
"NVME scan did not provide SP\n");
return rval;
vha->scan.step = FAB_SCAN_GPNFT_FCP;
break;
case FAB_SCAN_GPNFT_FCP:
vha->scan.step = FAB_SCAN_GNNFT_FCP;
break;
case FAB_SCAN_GNNFT_FCP:
vha->scan.step = FAB_SCAN_GPNFT_NVME;
break;
case FAB_SCAN_GPNFT_NVME:
vha->scan.step = FAB_SCAN_GNNFT_NVME;
break;
case FAB_SCAN_GNNFT_NVME:
default:
/* should not be here */
WARN_ON(1);
goto done_free_sp;
}
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
qla2x00_async_gpnft_gnnft_sp_done);
if (!sp) {
ql_dbg(ql_dbg_disc, vha, 0x201c,
"scan did not provide SP\n");
return rval;
}
if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0x201d,
"%s: req %p rsp %p are not setup\n",
__func__, sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.rsp);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
WARN_ON(1);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
goto done_free_sp;
}
rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
sp->type = SRB_CT_PTHRU_CMD;
sp->gen1 = vha->hw->base_qpair->chip_reset;
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
qla_async_scan_sp_done);
ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
/* GPN_FT req */
ct_req->req.gpn_ft.port_type = fc4_type;
/* CT_IU preamble */
switch (vha->scan.step) {
case FAB_SCAN_GPNFT_FCP:
sp->name = "gpnft";
ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI;
sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
break;
case FAB_SCAN_GNNFT_FCP:
sp->name = "gnnft";
ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz);
ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI;
sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
break;
case FAB_SCAN_GPNFT_NVME:
sp->name = "gpnft";
ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME;
sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
break;
case FAB_SCAN_GNNFT_NVME:
sp->name = "gnnft";
ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz);
ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME;
sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
break;
default:
/* should not be here */
WARN_ON(1);
goto done_free_sp;
}
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
sp->handle, ct_req->req.gpn_ft.port_type);
ql_dbg(ql_dbg_disc, vha, 0x2003,
"%s: step %d, rsp size %d, req size %d hdl %x %s FC4TYPE %x \n",
__func__, vha->scan.step, sp->u.iocb_cmd.u.ctarg.rsp_size,
sp->u.iocb_cmd.u.ctarg.req_size, sp->handle, sp->name,
ct_req->req.gpn_ft.port_type);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@ -3864,7 +3812,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
if (vha->scan.scan_flags == 0) {
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2007,
"%s: Scan scheduled.\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
schedule_delayed_work(&vha->scan.scan_work, 5);
@ -3875,6 +3823,15 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
return rval;
}
void qla_fab_scan_start(struct scsi_qla_host *vha)
{
int rval;
rval = qla_fab_async_scan(vha, NULL);
if (rval)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
void qla_scan_work_fn(struct work_struct *work)
{
struct fab_scan *s = container_of(to_delayed_work(work),

View file

@ -423,7 +423,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_LOGOUT_CMD;
sp->name = "logout";
qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
qla2x00_async_logout_sp_done),
qla2x00_async_logout_sp_done);
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
@ -1842,10 +1842,18 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
return qla2x00_post_work(vha, e);
}
static void qla_rscn_gen_tick(scsi_qla_host_t *vha, u32 *ret_rscn_gen)
{
*ret_rscn_gen = atomic_inc_return(&vha->rscn_gen);
/* memory barrier */
wmb();
}
void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *fcport;
unsigned long flags;
u32 rscn_gen;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
@ -1875,15 +1883,16 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
* Otherwise we're already in the middle of a relogin
*/
fcport->scan_needed = 1;
fcport->rscn_gen++;
qla_rscn_gen_tick(vha, &fcport->rscn_gen);
}
} else {
fcport->scan_needed = 1;
fcport->rscn_gen++;
qla_rscn_gen_tick(vha, &fcport->rscn_gen);
}
}
break;
case RSCN_AREA_ADDR:
qla_rscn_gen_tick(vha, &rscn_gen);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->flags & FCF_FCP2_DEVICE &&
atomic_read(&fcport->state) == FCS_ONLINE)
@ -1891,11 +1900,12 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
fcport->scan_needed = 1;
fcport->rscn_gen++;
fcport->rscn_gen = rscn_gen;
}
}
break;
case RSCN_DOM_ADDR:
qla_rscn_gen_tick(vha, &rscn_gen);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->flags & FCF_FCP2_DEVICE &&
atomic_read(&fcport->state) == FCS_ONLINE)
@ -1903,19 +1913,20 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
fcport->scan_needed = 1;
fcport->rscn_gen++;
fcport->rscn_gen = rscn_gen;
}
}
break;
case RSCN_FAB_ADDR:
default:
qla_rscn_gen_tick(vha, &rscn_gen);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->flags & FCF_FCP2_DEVICE &&
atomic_read(&fcport->state) == FCS_ONLINE)
continue;
fcport->scan_needed = 1;
fcport->rscn_gen++;
fcport->rscn_gen = rscn_gen;
}
break;
}
@ -1924,6 +1935,7 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
if (vha->scan.scan_flags == 0) {
ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
vha->scan.rscn_gen_start = atomic_read(&vha->rscn_gen);
schedule_delayed_work(&vha->scan.scan_work, 5);
}
spin_unlock_irqrestore(&vha->work_lock, flags);
@ -6393,10 +6405,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
qlt_do_generation_tick(vha, &discovery_gen);
if (USE_ASYNC_SCAN(ha)) {
rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
NULL);
if (rval)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
/* start of scan begins here */
vha->scan.rscn_gen_end = atomic_read(&vha->rscn_gen);
qla_fab_scan_start(vha);
} else {
list_for_each_entry(fcport, &vha->vp_fcports, list)
fcport->scan_state = QLA_FCPORT_SCAN;
@ -8207,15 +8218,21 @@ qla28xx_get_aux_images(
struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
bool valid_pri_image = false, valid_sec_image = false;
bool active_pri_image = false, active_sec_image = false;
int rc;
if (!ha->flt_region_aux_img_status_pri) {
ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
goto check_sec_image;
}
qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
rc = qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
ha->flt_region_aux_img_status_pri,
sizeof(pri_aux_image_status) >> 2);
if (rc) {
ql_log(ql_log_info, vha, 0x01a1,
"Unable to read Primary aux image(%x).\n", rc);
goto check_sec_image;
}
qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
@ -8246,9 +8263,15 @@ qla28xx_get_aux_images(
goto check_valid_image;
}
qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
rc = qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
ha->flt_region_aux_img_status_sec,
sizeof(sec_aux_image_status) >> 2);
if (rc) {
ql_log(ql_log_info, vha, 0x01a2,
"Unable to read Secondary aux image(%x).\n", rc);
goto check_valid_image;
}
qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
@ -8306,6 +8329,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
struct qla27xx_image_status pri_image_status, sec_image_status;
bool valid_pri_image = false, valid_sec_image = false;
bool active_pri_image = false, active_sec_image = false;
int rc;
if (!ha->flt_region_img_status_pri) {
ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
@ -8347,8 +8371,14 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
goto check_valid_image;
}
qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
rc = qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
if (rc) {
ql_log(ql_log_info, vha, 0x01a3,
"Unable to read Secondary image status(%x).\n", rc);
goto check_valid_image;
}
qla27xx_print_image(vha, "Secondary image", &sec_image_status);
if (qla27xx_check_image_status_signature(&sec_image_status)) {
@ -8420,11 +8450,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
"FW: Loading firmware from flash (%x).\n", faddr);
dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
rval = qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (rval || qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x008c,
"Unable to verify the integrity of flash firmware "
"image.\n");
"Unable to verify the integrity of flash firmware image (rval %x).\n", rval);
ql_log(ql_log_fatal, vha, 0x008d,
"Firmware data: %08x %08x %08x %08x.\n",
dcode[0], dcode[1], dcode[2], dcode[3]);
@ -8438,7 +8467,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
for (j = 0; j < segments; j++) {
ql_dbg(ql_dbg_init, vha, 0x008d,
"-> Loading segment %u...\n", j);
qla24xx_read_flash_data(vha, dcode, faddr, 10);
rval = qla24xx_read_flash_data(vha, dcode, faddr, 10);
if (rval) {
ql_log(ql_log_fatal, vha, 0x016a,
"-> Unable to read segment addr + size .\n");
return QLA_FUNCTION_FAILED;
}
risc_addr = be32_to_cpu((__force __be32)dcode[2]);
risc_size = be32_to_cpu((__force __be32)dcode[3]);
if (!*srisc_addr) {
@ -8454,7 +8488,13 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_dbg(ql_dbg_init, vha, 0x008e,
"-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
fragment, risc_addr, faddr, dlen);
qla24xx_read_flash_data(vha, dcode, faddr, dlen);
rval = qla24xx_read_flash_data(vha, dcode, faddr, dlen);
if (rval) {
ql_log(ql_log_fatal, vha, 0x016b,
"-> Unable to read fragment(faddr %#x dlen %#lx).\n",
faddr, dlen);
return QLA_FUNCTION_FAILED;
}
for (i = 0; i < dlen; i++)
dcode[i] = swab32(dcode[i]);
@ -8483,7 +8523,14 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
fwdt->length = 0;
dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 7);
rval = qla24xx_read_flash_data(vha, dcode, faddr, 7);
if (rval) {
ql_log(ql_log_fatal, vha, 0x016c,
"-> Unable to read template size.\n");
goto failed;
}
risc_size = be32_to_cpu((__force __be32)dcode[2]);
ql_dbg(ql_dbg_init, vha, 0x0161,
"-> fwdt%u template array at %#x (%#x dwords)\n",
@ -8509,11 +8556,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
}
dcode = fwdt->template;
qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
if (!qla27xx_fwdt_template_valid(dcode)) {
if (rval || !qla27xx_fwdt_template_valid(dcode)) {
ql_log(ql_log_warn, vha, 0x0165,
"-> fwdt%u failed template validate\n", j);
"-> fwdt%u failed template validate (rval %x)\n",
j, rval);
goto failed;
}

View file

@ -631,3 +631,11 @@ static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
}
return 0;
}
static inline bool val_is_in_range(u32 val, u32 start, u32 end)
{
if (val >= start && val <= end)
return true;
else
return false;
}

View file

@ -3014,12 +3014,6 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
}
}
struct scsi_dif_tuple {
__be16 guard; /* Checksum */
__be16 app_tag; /* APPL identifier */
__be32 ref_tag; /* Target LBA or indirect LBA */
};
/*
* Checks the guard or meta-data for the type of error
* detected by the HBA. In case of errors, we set the

View file

@ -180,7 +180,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
list_for_each_entry(fcport, &vha->vp_fcports, list)
fcport->logout_on_delete = 0;
fcport->logout_on_delete = 1;
if (!vha->hw->flags.edif_enabled)
qla2x00_wait_for_sess_deletion(vha);

View file

@ -49,7 +49,10 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return 0;
}
if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
if (qla_nvme_register_hba(vha))
return 0;
if (!vha->nvme_local_port)
return 0;
if (!(fcport->nvme_prli_service_param &

View file

@ -1875,14 +1875,9 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
/*
* perform lockless completion during driver unload
*/
if (qla2x00_chip_is_down(vha)) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
sp->done(sp, res);
spin_lock_irqsave(qp->qp_lock_ptr, flags);
continue;
}
@ -4689,7 +4684,7 @@ static void
qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
{
u32 temp;
struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
struct init_cb_81xx *icb = (struct init_cb_81xx *)vha->hw->init_cb;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
if (max_cnt > vha->hw->max_exchg)
@ -5563,15 +5558,11 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_prlo_done(vha, e->u.logio.fcport,
e->u.logio.data);
break;
case QLA_EVT_GPNFT:
qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
e->u.gpnft.sp);
case QLA_EVT_SCAN_CMD:
qla_fab_async_scan(vha, e->u.iosb.sp);
break;
case QLA_EVT_GPNFT_DONE:
qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
break;
case QLA_EVT_GNNFT_DONE:
qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
case QLA_EVT_SCAN_FINISH:
qla_fab_scan_finish(vha, e->u.iosb.sp);
break;
case QLA_EVT_GFPNID:
qla24xx_async_gfpnid(vha, e->u.fcport.fcport);

View file

@ -555,6 +555,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
struct qla_flt_location *fltl = (void *)req->ring;
uint32_t *dcode = (uint32_t *)req->ring;
uint8_t *buf = (void *)req->ring, *bcode, last_image;
int rc;
/*
* FLT-location structure resides after the last PCI region.
@ -584,14 +585,24 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
pcihdr = 0;
do {
/* Verify PCI expansion ROM header. */
qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
if (rc) {
ql_log(ql_log_info, vha, 0x016d,
"Unable to read PCI Expansion Rom Header (%x).\n", rc);
return QLA_FUNCTION_FAILED;
}
bcode = buf + (pcihdr % 4);
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
goto end;
/* Locate PCI data structure. */
pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
rc = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
if (rc) {
ql_log(ql_log_info, vha, 0x0179,
"Unable to read PCI Data Structure (%x).\n", rc);
return QLA_FUNCTION_FAILED;
}
bcode = buf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
@ -606,7 +617,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
} while (!last_image);
/* Now verify FLT-location structure. */
qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
if (rc) {
ql_log(ql_log_info, vha, 0x017a,
"Unable to read FLT (%x).\n", rc);
return QLA_FUNCTION_FAILED;
}
if (memcmp(fltl->sig, "QFLT", 4))
goto end;
@ -2605,13 +2621,18 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
struct qla_hw_data *ha = vha->hw;
int rc;
/* Suspend HBA. */
scsi_block_requests(vha->host);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
rc = qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
if (rc) {
ql_log(ql_log_info, vha, 0x01a0,
"Unable to perform optrom read(%x).\n", rc);
}
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@ -3412,7 +3433,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
struct active_regions active_regions = { };
if (IS_P3P_TYPE(ha))
return ret;
return QLA_SUCCESS;
if (!mbuf)
return QLA_FUNCTION_FAILED;
@ -3432,20 +3453,31 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
do {
/* Verify PCI expansion ROM header. */
qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
ret = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
if (ret) {
ql_log(ql_log_info, vha, 0x017d,
"Unable to read PCI EXP Rom Header(%x).\n", ret);
return QLA_FUNCTION_FAILED;
}
bcode = mbuf + (pcihdr % 4);
if (memcmp(bcode, "\x55\xaa", 2)) {
/* No signature */
ql_log(ql_log_fatal, vha, 0x0059,
"No matching ROM signature.\n");
ret = QLA_FUNCTION_FAILED;
break;
return QLA_FUNCTION_FAILED;
}
/* Locate PCI data structure. */
pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
ret = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
if (ret) {
ql_log(ql_log_info, vha, 0x018e,
"Unable to read PCI Data Structure (%x).\n", ret);
return QLA_FUNCTION_FAILED;
}
bcode = mbuf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
@ -3454,8 +3486,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
ql_log(ql_log_fatal, vha, 0x005a,
"PCI data struct not found pcir_adr=%x.\n", pcids);
ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32);
ret = QLA_FUNCTION_FAILED;
break;
return QLA_FUNCTION_FAILED;
}
/* Read version */
@ -3507,20 +3538,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
faddr = ha->flt_region_fw_sec;
}
qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x005f,
"Unrecognized fw revision at %x.\n",
ha->flt_region_fw * 4);
ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
ret = qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (ret) {
ql_log(ql_log_info, vha, 0x019e,
"Unable to read FW version (%x).\n", ret);
return ret;
} else {
for (i = 0; i < 4; i++)
ha->fw_revision[i] =
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x005f,
"Unrecognized fw revision at %x.\n",
ha->flt_region_fw * 4);
ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
} else {
for (i = 0; i < 4; i++)
ha->fw_revision[i] =
be32_to_cpu((__force __be32)dcode[4+i]);
ql_dbg(ql_dbg_init, vha, 0x0060,
"Firmware revision (flash) %u.%u.%u (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
ha->fw_revision[2], ha->fw_revision[3]);
ql_dbg(ql_dbg_init, vha, 0x0060,
"Firmware revision (flash) %u.%u.%u (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
ha->fw_revision[2], ha->fw_revision[3]);
}
}
/* Check for golden firmware and get version if available */
@ -3531,18 +3568,23 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
faddr = ha->flt_region_gold_fw;
qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x0056,
"Unrecognized golden fw at %#x.\n", faddr);
ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
ret = qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
if (ret) {
ql_log(ql_log_info, vha, 0x019f,
"Unable to read Gold FW version (%x).\n", ret);
return ret;
} else {
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x0056,
"Unrecognized golden fw at %#x.\n", faddr);
ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
return QLA_FUNCTION_FAILED;
}
for (i = 0; i < 4; i++)
ha->gold_fw_version[i] =
be32_to_cpu((__force __be32)dcode[4+i]);
}
for (i = 0; i < 4; i++)
ha->gold_fw_version[i] =
be32_to_cpu((__force __be32)dcode[4+i]);
return ret;
}

View file

@ -6,9 +6,9 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "10.02.09.200-k"
#define QLA2XXX_VERSION "10.02.09.300-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 9
#define QLA_DRIVER_BETA_VER 200
#define QLA_DRIVER_BETA_VER 300

View file

@ -12,6 +12,7 @@
#include <asm/unaligned.h>
#include <scsi/scsi_common.h>
MODULE_DESCRIPTION("SCSI functions used by both the initiator and the target code");
MODULE_LICENSE("GPL v2");
/* Command group 3 is reserved and should never be used. */

View file

@ -39,13 +39,12 @@ static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
/*
* scsi_static_device_list: deprecated list of devices that require
* settings that differ from the default, includes black-listed (broken)
* devices. The entries here are added to the tail of scsi_dev_info_list
* via scsi_dev_info_list_init.
* scsi_static_device_list: list of devices that require settings that differ
* from the default, includes black-listed (broken) devices. The entries here
* are added to the tail of scsi_dev_info_list via scsi_dev_info_list_init.
*
* Do not add to this list, use the command line or proc interface to add
* to the scsi_dev_info_list. This table will eventually go away.
* If possible, set the BLIST_* flags from inside a SCSI LLD rather than
* adding an entry to this list.
*/
static struct {
char *vendor;

View file

@ -334,7 +334,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sg_reserved_size = INT_MAX;
scsi_init_limits(shost, &lim);
q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, NULL);
q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
@ -344,7 +344,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
}
kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
q->queuedata = sdev;
depth = sdev->host->cmd_per_lun ?: 1;

View file

@ -666,4 +666,5 @@ static struct platform_driver sun3_scsi_driver = {
module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe);
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
MODULE_DESCRIPTION("Sun3 NCR5380 SCSI controller driver");
MODULE_LICENSE("GPL");

View file

@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#define MAX_QUEUE_SUP GENMASK(7, 0)
#define QCFGPTR GENMASK(23, 16)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_MIN_POLL_QUEUES 0
@ -25,7 +26,6 @@
#define QUEUE_ID_OFFSET 16
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
@ -116,6 +116,19 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
return hctx ? &hba->uhq[hctx->queue_num] : NULL;
}
/**
* ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config
* Registers.
* @hba: per adapter instance
*
* Return: Start address of MCQ Queue Config Registers in HCI
*/
unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
{
return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200;
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
/**
* ufshcd_mcq_decide_queue_depth - decide the queue depth
* @hba: per adapter instance
@ -124,7 +137,6 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
*
* MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device.
* It is mandatory to implement get_hba_mac() to enable MCQ mode.
* Calculates and adjusts the queue depth based on the depth
* supported by the HC and ufs device.
*/
@ -132,12 +144,21 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{
int mac;
/* Mandatory to implement get_hba_mac() */
mac = ufshcd_mcq_vops_get_hba_mac(hba);
if (mac < 0) {
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
if (!hba->vops || !hba->vops->get_hba_mac) {
/*
* Extract the maximum number of active transfer tasks value
* from the host controller capabilities register. This value is
* 0-based.
*/
hba->capabilities =
ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
mac++;
} else {
mac = hba->vops->get_hba_mac(hba);
}
if (mac < 0)
goto err;
WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
/*
@ -146,6 +167,10 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
* shared queuing architecture is enabled.
*/
return min_t(int, mac, hba->dev_info.bqueuedepth);
err:
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
}
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
@ -165,6 +190,15 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
/*
* Device should support at least one I/O queue to handle device
* commands via hba->dev_cmd_queue.
*/
if (hba_maxq == poll_queues) {
dev_err(hba->dev, "At least one non-poll queue required\n");
return -EOPNOTSUPP;
}
rem = hba_maxq;
if (rw_queues) {
@ -227,12 +261,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
return 0;
}
/* Operation and runtime registers configuration */
#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
#define MCQ_OPR_OFFSET_n(p, i) \
(hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
static void __iomem *mcq_opr_base(struct ufs_hba *hba,
enum ufshcd_mcq_opr n, int i)
{
@ -337,29 +365,29 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
/* Submission Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
MCQ_CFG_n(REG_SQLBA, i));
ufshcd_mcq_cfg_offset(REG_SQLBA, i));
/* Submission Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
MCQ_CFG_n(REG_SQUBA, i));
ufshcd_mcq_cfg_offset(REG_SQUBA, i));
/* Submission Queue Doorbell Address Offset */
ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
MCQ_CFG_n(REG_SQDAO, i));
ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
ufshcd_mcq_cfg_offset(REG_SQDAO, i));
/* Submission Queue Interrupt Status Address Offset */
ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
MCQ_CFG_n(REG_SQISAO, i));
ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
ufshcd_mcq_cfg_offset(REG_SQISAO, i));
/* Completion Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
MCQ_CFG_n(REG_CQLBA, i));
ufshcd_mcq_cfg_offset(REG_CQLBA, i));
/* Completion Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
MCQ_CFG_n(REG_CQUBA, i));
ufshcd_mcq_cfg_offset(REG_CQUBA, i));
/* Completion Queue Doorbell Address Offset */
ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
MCQ_CFG_n(REG_CQDAO, i));
ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
ufshcd_mcq_cfg_offset(REG_CQDAO, i));
/* Completion Queue Interrupt Status Address Offset */
ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
MCQ_CFG_n(REG_CQISAO, i));
ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
ufshcd_mcq_cfg_offset(REG_CQISAO, i));
/* Save the base addresses for quicker access */
hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
@ -376,7 +404,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
/* Completion Queue Enable|Size to Completion Queue Attribute */
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
MCQ_CFG_n(REG_CQATTR, i));
ufshcd_mcq_cfg_offset(REG_CQATTR, i));
/*
* Submission Qeueue Enable|Size|Completion Queue ID to
@ -384,7 +412,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
*/
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
(i << QUEUE_ID_OFFSET),
MCQ_CFG_n(REG_SQATTR, i));
ufshcd_mcq_cfg_offset(REG_SQATTR, i));
}
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
@ -399,9 +427,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
void ufshcd_mcq_enable(struct ufs_hba *hba)
{
ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
hba->mcq_enabled = true;
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
void ufshcd_mcq_disable(struct ufs_hba *hba)
{
ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
hba->mcq_enabled = false;
}
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
{
ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);

View file

@ -1340,6 +1340,78 @@ static const struct attribute_group ufs_sysfs_flags_group = {
.attrs = ufs_sysfs_device_flags,
};
static ssize_t max_number_of_rtt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 rtt;
int ret;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
up(&hba->host_sem);
return -EBUSY;
}
ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
ufshcd_rpm_put_sync(hba);
if (ret)
goto out;
ret = sysfs_emit(buf, "0x%08X\n", rtt);
out:
up(&hba->host_sem);
return ret;
}
static ssize_t max_number_of_rtt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_dev_info *dev_info = &hba->dev_info;
struct scsi_device *sdev;
unsigned int rtt;
int ret;
if (kstrtouint(buf, 0, &rtt))
return -EINVAL;
if (rtt > dev_info->rtt_cap) {
dev_err(dev, "rtt can be at most bDeviceRTTCap\n");
return -EINVAL;
}
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
ufshcd_rpm_get_sync(hba);
shost_for_each_device(sdev, hba->host)
blk_mq_freeze_queue(sdev->request_queue);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
shost_for_each_device(sdev, hba->host)
blk_mq_unfreeze_queue(sdev->request_queue);
ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RW(max_number_of_rtt);
static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
{
return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
@ -1387,7 +1459,6 @@ UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT);
UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);

View file

@ -95,8 +95,12 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
return err;
}
static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct ufs_hba *hba =
container_of(profile, struct ufs_hba, crypto_profile);
/*
* Clear the crypto cfg on the device. Clearing CFGE
* might not be sufficient, so just clear the entire cfg.
@ -106,16 +110,10 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
return ufshcd_program_key(hba, &cfg, slot);
}
static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct ufs_hba *hba =
container_of(profile, struct ufs_hba, crypto_profile);
return ufshcd_clear_keyslot(hba, slot);
}
/*
* Reprogram the keyslots if needed, and return true if CRYPTO_GENERAL_ENABLE
* should be used in the host controller initialization sequence.
*/
bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
if (!(hba->caps & UFSHCD_CAP_CRYPTO))
@ -123,6 +121,10 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba)
/* Reset might clear all keys, so reprogram all the keys. */
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE)
return false;
return true;
}
@ -159,6 +161,9 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
int err = 0;
enum blk_crypto_mode_num blk_mode_num;
if (hba->quirks & UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE)
return 0;
/*
* Don't use crypto if either the hardware doesn't advertise the
* standard crypto capability bit *or* if the vendor specific driver
@ -228,9 +233,10 @@ void ufshcd_init_crypto(struct ufs_hba *hba)
if (!(hba->caps & UFSHCD_CAP_CRYPTO))
return;
/* Clear all keyslots - the number of keyslots is (CFGC + 1) */
for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++)
ufshcd_clear_keyslot(hba, slot);
/* Clear all keyslots. */
for (slot = 0; slot < hba->crypto_profile.num_slots; slot++)
hba->crypto_profile.ll_ops.keyslot_evict(&hba->crypto_profile,
NULL, slot);
}
void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)

View file

@ -37,6 +37,33 @@ ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
}
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
struct scsi_cmnd *cmd = lrbp->cmd;
const struct bio_crypt_ctx *crypt_ctx = scsi_cmd_to_rq(cmd)->crypt_ctx;
if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt)
return hba->vops->fill_crypto_prdt(hba, crypt_ctx,
lrbp->ucd_prdt_ptr,
scsi_sg_count(cmd));
return 0;
}
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT))
return;
if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx))
return;
/* Zeroize the PRDT because it can contain cryptographic keys. */
memzero_explicit(lrbp->ucd_prdt_ptr,
ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd));
}
bool ufshcd_crypto_enable(struct ufs_hba *hba);
int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba);
@ -54,6 +81,15 @@ static inline void
ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
struct request_desc_header *h) { }
static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
return 0;
}
static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp) { }
static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
return false;

View file

@ -64,16 +64,11 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe);
int ufshcd_mcq_init(struct ufs_hba *hba);
void ufshcd_mcq_disable(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
@ -255,14 +250,6 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->get_hba_mac)
return hba->vops->get_hba_mac(hba);
return -EOPNOTSUPP;
}
static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->op_runtime_config)

View file

@ -102,6 +102,9 @@
/* Default RTC update every 10 seconds */
#define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
/* bMaxNumOfRTT is equal to two after device manufacturing */
#define DEFAULT_MAX_NUM_RTT 2
/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
@ -161,8 +164,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
static const char *const ufshcd_state_name[] = {
@ -452,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
hwq_id = hwq->id;
@ -1560,7 +1561,8 @@ static int ufshcd_devfreq_target(struct device *dev,
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
out:
if (sched_clk_scaling_suspend_work && !scale_up)
if (sched_clk_scaling_suspend_work &&
(!scale_up || hba->clk_scaling.suspend_on_no_request))
queue_work(hba->clk_scaling.workq,
&hba->clk_scaling.suspend_work);
@ -2300,7 +2302,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
int utrd_size = sizeof(struct utp_transfer_req_desc);
struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
struct utp_transfer_req_desc *dest;
@ -2400,11 +2402,13 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
hba->reserved_slot = hba->nutrs - 1;
hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1;
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
if (err) {
@ -2636,7 +2640,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
return 0;
return ufshcd_crypto_fill_prdt(hba, lrbp);
}
/**
@ -2997,7 +3001,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
if (is_mcq_enabled(hba))
if (hba->mcq_enabled)
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
ufshcd_send_command(hba, tag, hwq);
@ -3056,7 +3060,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
unsigned long flags;
int err;
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
/*
* MCQ mode. Clean up the MCQ resources similar to
* what the ufshcd_utrl_clear() does for SDB mode.
@ -3166,7 +3170,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
__func__, lrbp->task_tag);
/* MCQ mode */
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
/* successfully cleared the command, retry if needed */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
err = -EAGAIN;
@ -3988,11 +3992,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
*/
static int ufshcd_dme_link_startup(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_LINK_STARTUP,
};
int ret;
uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_dbg(hba->dev,
@ -4010,11 +4014,11 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
*/
static int ufshcd_dme_reset(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_RESET,
};
int ret;
uic_cmd.command = UIC_CMD_DME_RESET;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
@ -4049,11 +4053,11 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*/
static int ufshcd_dme_enable(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_ENABLE,
};
int ret;
uic_cmd.command = UIC_CMD_DME_ENABLE;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
@ -4106,7 +4110,12 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer)
{
struct uic_command uic_cmd = {0};
struct uic_command uic_cmd = {
.command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET,
.argument1 = attr_sel,
.argument2 = UIC_ARG_ATTR_TYPE(attr_set),
.argument3 = mib_val,
};
static const char *const action[] = {
"dme-set",
"dme-peer-set"
@ -4115,12 +4124,6 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
int ret;
int retries = UFS_UIC_COMMAND_RETRIES;
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
uic_cmd.argument1 = attr_sel;
uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
uic_cmd.argument3 = mib_val;
do {
/* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
@ -4150,7 +4153,10 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer)
{
struct uic_command uic_cmd = {0};
struct uic_command uic_cmd = {
.command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET,
.argument1 = attr_sel,
};
static const char *const action[] = {
"dme-get",
"dme-peer-get"
@ -4184,10 +4190,6 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
}
}
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
uic_cmd.argument1 = attr_sel;
do {
/* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
@ -4320,7 +4322,11 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
*/
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
struct uic_command uic_cmd = {0};
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_SET,
.argument1 = UIC_ARG_MIB(PA_PWRMODE),
.argument3 = mode,
};
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
@ -4333,9 +4339,6 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
}
}
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
@ -4376,13 +4379,14 @@ EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
int ret;
struct uic_command uic_cmd = {0};
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_HIBER_ENTER,
};
ktime_t start = ktime_get();
int ret;
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@ -4400,13 +4404,14 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_HIBER_EXIT,
};
int ret;
ktime_t start = ktime_get();
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@ -5476,6 +5481,7 @@ void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct scsi_cmnd *cmd = lrbp->cmd;
scsi_dma_unmap(cmd);
ufshcd_crypto_clear_prdt(hba, lrbp);
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
@ -5558,7 +5564,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
u32 tr_doorbell;
struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
@ -6199,7 +6205,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
{
if (is_mcq_enabled(hba))
if (hba->mcq_enabled)
ufshcd_mcq_compl_pending_transfer(hba, force_compl);
else
ufshcd_transfer_req_compl(hba);
@ -6456,7 +6462,7 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
*ret ? "failed" : "succeeded");
/* Release cmd in MCQ mode if abort succeeds */
if (is_mcq_enabled(hba) && (*ret == 0)) {
if (hba->mcq_enabled && (*ret == 0)) {
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
if (!hwq)
return 0;
@ -7389,7 +7395,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
goto out;
}
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
for (pos = 0; pos < hba->nutrs; pos++) {
lrbp = &hba->lrb[pos];
if (ufshcd_cmd_inflight(lrbp->cmd) &&
@ -7485,7 +7491,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
*/
dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
__func__, tag);
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
/* MCQ mode */
if (ufshcd_cmd_inflight(lrbp->cmd)) {
/* sleep for max. 200us same delay as in SDB mode */
@ -7563,7 +7569,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
ufshcd_hold(hba);
if (!is_mcq_enabled(hba)) {
if (!hba->mcq_enabled) {
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!test_bit(tag, &hba->outstanding_reqs)) {
/* If command is already aborted/completed, return FAILED. */
@ -7596,7 +7602,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
}
hba->req_abort_count++;
if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
if (!hba->mcq_enabled && !(reg & (1 << tag))) {
/* only execute this code in single doorbell mode */
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
@ -7623,7 +7629,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
/* MCQ mode. Branch off to handle abort for mcq mode */
err = ufshcd_mcq_abort(cmd);
goto release;
@ -8125,6 +8131,38 @@ static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
dev_info->b_ext_iid_en = ext_iid_en;
}
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u32 rtt = 0;
u32 dev_rtt = 0;
int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ?
hba->vops->max_num_rtt : hba->nortt;
/* RTT override makes sense only for UFS-4.0 and above */
if (dev_info->wspecversion < 0x400)
return;
if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &dev_rtt)) {
dev_err(hba->dev, "failed reading bMaxNumOfRTT\n");
return;
}
/* do not override if it was already written */
if (dev_rtt != DEFAULT_MAX_NUM_RTT)
return;
rtt = min_t(int, dev_info->rtt_cap, host_rtt_cap);
if (rtt == dev_rtt)
return;
if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt))
dev_err(hba->dev, "failed writing bMaxNumOfRTT\n");
}
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups)
{
@ -8260,6 +8298,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
@ -8512,6 +8552,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
goto out;
}
ufshcd_set_rtt(hba);
ufshcd_get_ref_clk_gating_wait(hba);
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
@ -8642,6 +8684,9 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
if (ret)
goto err;
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
return 0;
err:
hba->nutrs = old_nutrs;
@ -8663,12 +8708,6 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
ufshcd_mcq_enable(hba);
hba->mcq_enabled = true;
dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
@ -8696,8 +8735,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_set_link_active(hba);
/* Reconfigure MCQ upon reset */
if (is_mcq_enabled(hba) && !init_dev_params)
if (hba->mcq_enabled && !init_dev_params) {
ufshcd_config_mcq(hba);
ufshcd_mcq_enable(hba);
}
/* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
@ -8718,11 +8759,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
if (ret)
return ret;
if (is_mcq_supported(hba) && !hba->scsi_host_added) {
ufshcd_mcq_enable(hba);
ret = ufshcd_alloc_mcq(hba);
if (!ret) {
ufshcd_config_mcq(hba);
} else {
/* Continue with SDB mode */
ufshcd_mcq_disable(hba);
use_mcq_mode = false;
dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
ret);
@ -8736,6 +8779,7 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
} else if (is_mcq_supported(hba)) {
/* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
ufshcd_config_mcq(hba);
ufshcd_mcq_enable(hba);
}
}
@ -8921,8 +8965,6 @@ static const struct scsi_host_template ufshcd_driver_template = {
.eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_sectors = SZ_1M / SECTOR_SIZE,
.max_host_blocked = 1,
@ -10179,7 +10221,8 @@ void ufshcd_remove(struct ufs_hba *hba)
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
scsi_remove_host(hba->host);
if (hba->scsi_host_added)
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
@ -10458,6 +10501,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "scsi_add_host failed\n");
goto out_disable;
}
hba->scsi_host_added = true;
}
hba->tmf_tag_set = (struct blk_mq_tag_set) {
@ -10540,7 +10584,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
out_remove_scsi_host:
scsi_remove_host(hba->host);
if (hba->scsi_host_added)
scsi_remove_host(hba->host);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);

View file

@ -8,6 +8,9 @@
*
*/
#include <asm/unaligned.h>
#include <crypto/aes.h>
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/module.h>
@ -25,12 +28,13 @@
#include "ufs-exynos.h"
#define DATA_UNIT_SIZE 4096
/*
* Exynos's Vendor specific registers for UFSHCI
*/
#define HCI_TXPRDT_ENTRY_SIZE 0x00
#define PRDT_PREFECT_EN BIT(31)
#define PRDT_SET_SIZE(x) ((x) & 0x1F)
#define HCI_RXPRDT_ENTRY_SIZE 0x04
#define HCI_1US_TO_CNT_VAL 0x0C
#define CNT_VAL_1US_MASK 0x3FF
@ -1043,8 +1047,8 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
exynos_ufs_fit_aggr_timeout(ufs);
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE);
hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE);
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
@ -1151,6 +1155,227 @@ static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
hba->quirks = ufs->drv_data->quirks;
}
#ifdef CONFIG_SCSI_UFS_CRYPTO
/*
* Support for Flash Memory Protector (FMP), which is the inline encryption
* hardware on Exynos and Exynos-based SoCs. The interface to this hardware is
* not compatible with the standard UFS crypto. It requires that encryption be
* configured in the PRDT using a nonstandard extension.
*/
enum fmp_crypto_algo_mode {
FMP_BYPASS_MODE = 0,
FMP_ALGO_MODE_AES_CBC = 1,
FMP_ALGO_MODE_AES_XTS = 2,
};
enum fmp_crypto_key_length {
FMP_KEYLEN_256BIT = 1,
};
/**
* struct fmp_sg_entry - nonstandard format of PRDT entries when FMP is enabled
*
* @base: The standard PRDT entry, but with nonstandard bitfields in the high
* bits of the 'size' field, i.e. the last 32-bit word. When these
* nonstandard bitfields are zero, the data segment won't be encrypted or
* decrypted. Otherwise they specify the algorithm and key length with
* which the data segment will be encrypted or decrypted.
* @file_iv: The initialization vector (IV) with all bytes reversed
* @file_enckey: The first half of the AES-XTS key with all bytes reserved
* @file_twkey: The second half of the AES-XTS key with all bytes reserved
* @disk_iv: Unused
* @reserved: Unused
*/
struct fmp_sg_entry {
struct ufshcd_sg_entry base;
__be64 file_iv[2];
__be64 file_enckey[4];
__be64 file_twkey[4];
__be64 disk_iv[2];
__be64 reserved[2];
};
#define SMC_CMD_FMP_SECURITY \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_SIP, 0x1810)
#define SMC_CMD_SMU \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_SIP, 0x1850)
#define SMC_CMD_FMP_SMU_RESUME \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_SIP, 0x1860)
#define SMU_EMBEDDED 0
#define SMU_INIT 0
#define CFG_DESCTYPE_3 3
static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs)
{
struct blk_crypto_profile *profile = &hba->crypto_profile;
struct arm_smccc_res res;
int err;
/*
* Check for the standard crypto support bit, since it's available even
* though the rest of the interface to FMP is nonstandard.
*
* This check should have the effect of preventing the driver from
* trying to use FMP on old Exynos SoCs that don't have FMP.
*/
if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) &
MASK_CRYPTO_SUPPORT))
return;
/*
* The below sequence of SMC calls to enable FMP can be found in the
* downstream driver source for gs101 and other Exynos-based SoCs. It
* is the only way to enable FMP that works on SoCs such as gs101 that
* don't make the FMP registers accessible to Linux. It probably works
* on other Exynos-based SoCs too, and might even still be the only way
* that works. But this hasn't been properly tested, and this code is
* mutually exclusive with exynos_ufs_config_smu(). So for now only
* enable FMP support on SoCs with EXYNOS_UFS_OPT_UFSPR_SECURE.
*/
if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE))
return;
/*
* This call (which sets DESCTYPE to 0x3 in the FMPSECURITY0 register)
* is needed to make the hardware use the larger PRDT entry size.
*/
BUILD_BUG_ON(sizeof(struct fmp_sg_entry) != 128);
arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3,
0, 0, 0, 0, &res);
if (res.a0) {
dev_warn(hba->dev,
"SMC_CMD_FMP_SECURITY failed on init: %ld. Disabling FMP support.\n",
res.a0);
return;
}
ufshcd_set_sg_entry_size(hba, sizeof(struct fmp_sg_entry));
/*
* This is needed to initialize FMP. Without it, errors occur when
* inline encryption is used.
*/
arm_smccc_smc(SMC_CMD_SMU, SMU_INIT, SMU_EMBEDDED, 0, 0, 0, 0, 0, &res);
if (res.a0) {
dev_err(hba->dev,
"SMC_CMD_SMU(SMU_INIT) failed: %ld. Disabling FMP support.\n",
res.a0);
return;
}
/* Advertise crypto capabilities to the block layer. */
err = devm_blk_crypto_profile_init(hba->dev, profile, 0);
if (err) {
/* Only ENOMEM should be possible here. */
dev_err(hba->dev, "Failed to initialize crypto profile: %d\n",
err);
return;
}
profile->max_dun_bytes_supported = AES_BLOCK_SIZE;
profile->dev = hba->dev;
profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] =
DATA_UNIT_SIZE;
/* Advertise crypto support to ufshcd-core. */
hba->caps |= UFSHCD_CAP_CRYPTO;
/* Advertise crypto quirks to ufshcd-core. */
hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE |
UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE |
UFSHCD_QUIRK_KEYS_IN_PRDT;
}
static void exynos_ufs_fmp_resume(struct ufs_hba *hba)
{
struct arm_smccc_res res;
arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3,
0, 0, 0, 0, &res);
if (res.a0)
dev_err(hba->dev,
"SMC_CMD_FMP_SECURITY failed on resume: %ld\n", res.a0);
arm_smccc_smc(SMC_CMD_FMP_SMU_RESUME, 0, SMU_EMBEDDED, 0, 0, 0, 0, 0,
&res);
if (res.a0)
dev_err(hba->dev,
"SMC_CMD_FMP_SMU_RESUME failed: %ld\n", res.a0);
}
static inline __be64 fmp_key_word(const u8 *key, int j)
{
return cpu_to_be64(get_unaligned_le64(
key + AES_KEYSIZE_256 - (j + 1) * sizeof(u64)));
}
/* Fill the PRDT for a request according to the given encryption context. */
static int exynos_ufs_fmp_fill_prdt(struct ufs_hba *hba,
const struct bio_crypt_ctx *crypt_ctx,
void *prdt, unsigned int num_segments)
{
struct fmp_sg_entry *fmp_prdt = prdt;
const u8 *enckey = crypt_ctx->bc_key->raw;
const u8 *twkey = enckey + AES_KEYSIZE_256;
u64 dun_lo = crypt_ctx->bc_dun[0];
u64 dun_hi = crypt_ctx->bc_dun[1];
unsigned int i;
/* If FMP wasn't enabled, we shouldn't get any encrypted requests. */
if (WARN_ON_ONCE(!(hba->caps & UFSHCD_CAP_CRYPTO)))
return -EIO;
/* Configure FMP on each segment of the request. */
for (i = 0; i < num_segments; i++) {
struct fmp_sg_entry *prd = &fmp_prdt[i];
int j;
/* Each segment must be exactly one data unit. */
if (prd->base.size != cpu_to_le32(DATA_UNIT_SIZE - 1)) {
dev_err(hba->dev,
"data segment is misaligned for FMP\n");
return -EIO;
}
/* Set the algorithm and key length. */
prd->base.size |= cpu_to_le32((FMP_ALGO_MODE_AES_XTS << 28) |
(FMP_KEYLEN_256BIT << 26));
/* Set the IV. */
prd->file_iv[0] = cpu_to_be64(dun_hi);
prd->file_iv[1] = cpu_to_be64(dun_lo);
/* Set the key. */
for (j = 0; j < AES_KEYSIZE_256 / sizeof(u64); j++) {
prd->file_enckey[j] = fmp_key_word(enckey, j);
prd->file_twkey[j] = fmp_key_word(twkey, j);
}
/* Increment the data unit number. */
dun_lo++;
if (dun_lo == 0)
dun_hi++;
}
return 0;
}
#else /* CONFIG_SCSI_UFS_CRYPTO */
static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs)
{
}
static void exynos_ufs_fmp_resume(struct ufs_hba *hba)
{
}
#define exynos_ufs_fmp_fill_prdt NULL
#endif /* !CONFIG_SCSI_UFS_CRYPTO */
static int exynos_ufs_init(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
@ -1198,6 +1423,8 @@ static int exynos_ufs_init(struct ufs_hba *hba)
exynos_ufs_priv_init(hba, ufs);
exynos_ufs_fmp_init(hba, ufs);
if (ufs->drv_data->drv_init) {
ret = ufs->drv_data->drv_init(dev, ufs);
if (ret) {
@ -1213,7 +1440,7 @@ static int exynos_ufs_init(struct ufs_hba *hba)
if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE))
exynos_ufs_config_smu(ufs);
hba->host->dma_alignment = SZ_4K - 1;
hba->host->dma_alignment = DATA_UNIT_SIZE - 1;
return 0;
out:
@ -1332,7 +1559,7 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
* (ufshcd_async_scan()). Note: this callback may also be called
* from other functions than ufshcd_init().
*/
hba->host->max_segment_size = SZ_4K;
hba->host->max_segment_size = DATA_UNIT_SIZE;
if (ufs->drv_data->pre_hce_enable) {
ret = ufs->drv_data->pre_hce_enable(ufs);
@ -1432,7 +1659,7 @@ static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
phy_power_on(ufs->phy);
exynos_ufs_config_smu(ufs);
exynos_ufs_fmp_resume(hba);
return 0;
}
@ -1698,6 +1925,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.hibern8_notify = exynos_ufs_hibern8_notify,
.suspend = exynos_ufs_suspend,
.resume = exynos_ufs_resume,
.fill_crypto_prdt = exynos_ufs_fmp_fill_prdt,
};
static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {

View file

@ -693,7 +693,7 @@ static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
if (!is_mcq_enabled(hba))
if (!hba->mcq_enabled)
return;
if (host->mcq_nr_intr == 0)
@ -711,7 +711,7 @@ static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
if (!is_mcq_enabled(hba))
if (!hba->mcq_enabled)
return;
if (host->mcq_nr_intr == 0)
@ -1308,7 +1308,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
if (err)
return err;
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
ufs_mtk_config_mcq(hba, false);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
@ -1785,6 +1785,7 @@ static int ufs_mtk_config_esi(struct ufs_hba *hba)
*/
static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
.max_num_rtt = MTK_MAX_NUM_RTT,
.init = ufs_mtk_init,
.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
.setup_clocks = ufs_mtk_setup_clocks,

View file

@ -189,4 +189,7 @@ struct ufs_mtk_host {
/* MTK delay of autosuspend: 500 ms */
#define MTK_RPM_AUTOSUSPEND_DELAY_MS 500
/* MTK RTT support number */
#define MTK_MAX_NUM_RTT 2
#endif /* !_UFS_MEDIATEK_H */

View file

@ -1548,6 +1548,8 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
p->timer = DEVFREQ_TIMER_DELAYED;
d->upthreshold = 70;
d->downdifferential = 5;
hba->clk_scaling.suspend_on_no_request = true;
}
#else
static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
@ -1883,4 +1885,5 @@ static struct platform_driver ufs_qcom_pltform = {
};
module_platform_driver(ufs_qcom_pltform);
MODULE_DESCRIPTION("Qualcomm UFS host controller driver");
MODULE_LICENSE("GPL v2");

View file

@ -20,6 +20,8 @@
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
#define MAX_SUPP_MAC 64
struct ufs_host {
void (*late_init)(struct ufs_hba *hba);
};
@ -446,6 +448,49 @@ static int ufs_intel_mtl_init(struct ufs_hba *hba)
return ufs_intel_common_init(hba);
}
static int ufs_qemu_get_hba_mac(struct ufs_hba *hba)
{
return MAX_SUPP_MAC;
}
static int ufs_qemu_mcq_config_resource(struct ufs_hba *hba)
{
hba->mcq_base = hba->mmio_base + ufshcd_mcq_queue_cfg_addr(hba);
return 0;
}
static int ufs_qemu_op_runtime_config(struct ufs_hba *hba)
{
struct ufshcd_mcq_opr_info_t *opr;
int i;
u32 sqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQDAO, 0));
u32 sqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQISAO, 0));
u32 cqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQDAO, 0));
u32 cqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQISAO, 0));
hba->mcq_opr[OPR_SQD].offset = sqdao;
hba->mcq_opr[OPR_SQIS].offset = sqisao;
hba->mcq_opr[OPR_CQD].offset = cqdao;
hba->mcq_opr[OPR_CQIS].offset = cqisao;
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
opr->stride = 48;
opr->base = hba->mmio_base + opr->offset;
}
return 0;
}
static struct ufs_hba_variant_ops ufs_qemu_hba_vops = {
.name = "qemu-pci",
.get_hba_mac = ufs_qemu_get_hba_mac,
.mcq_config_resource = ufs_qemu_mcq_config_resource,
.op_runtime_config = ufs_qemu_op_runtime_config,
};
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
@ -591,7 +636,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
(kernel_ulong_t)&ufs_qemu_hba_vops },
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
@ -602,6 +648,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0xE447), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ } /* terminate list */
};

View file

@ -296,6 +296,7 @@ struct mpi3mr_hdb_entry {
* multiple hdb entries.
*
* @num_hdb_types: Number of host diag buffer types supported
* @element_trigger_format: Element trigger format
* @rsvd1: Reserved
* @rsvd2: Reserved
* @rsvd3: Reserved
@ -303,7 +304,7 @@ struct mpi3mr_hdb_entry {
*/
struct mpi3mr_bsg_in_hdb_status {
__u8 num_hdb_types;
__u8 rsvd1;
__u8 element_trigger_format;
__u16 rsvd2;
__u32 rsvd3;
struct mpi3mr_hdb_entry entry[1];

View file

@ -592,6 +592,8 @@ struct ufs_dev_info {
enum ufs_rtc_time rtc_type;
time64_t rtc_time_baseline;
u32 rtc_update_period;
u8 rtt_cap; /* bDeviceRTTCap */
};
/*

View file

@ -73,8 +73,8 @@ enum ufs_event_type {
* @done: UIC command completion
*/
struct uic_command {
u32 command;
u32 argument1;
const u32 command;
const u32 argument1;
u32 argument2;
u32 argument3;
int cmd_active;
@ -295,6 +295,7 @@ struct ufs_pwr_mode_info {
/**
* struct ufs_hba_variant_ops - variant specific callbacks
* @name: variant name
* @max_num_rtt: maximum RTT supported by the host
* @init: called when the driver is initialized
* @exit: called to cleanup everything done in init
* @get_ufs_hci_version: called to get UFS HCI version
@ -321,10 +322,13 @@ struct ufs_pwr_mode_info {
* @device_reset: called to issue a reset pulse on the UFS device
* @config_scaling_param: called to configure clock scaling parameters
* @program_key: program or evict an inline encryption key
* @fill_crypto_prdt: initialize crypto-related fields in the PRDT
* @event_notify: called to notify important events
* @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources
* @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
* @get_hba_mac: reports maximum number of outstanding commands supported by
* the controller. Should be implemented for UFSHCI 4.0 or later
* controllers that are not compliant with the UFSHCI 4.0 specification.
* @op_runtime_config: called to config Operation and runtime regs Pointers
* @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt
@ -332,6 +336,7 @@ struct ufs_pwr_mode_info {
*/
struct ufs_hba_variant_ops {
const char *name;
int max_num_rtt;
int (*init)(struct ufs_hba *);
void (*exit)(struct ufs_hba *);
u32 (*get_ufs_hci_version)(struct ufs_hba *);
@ -365,6 +370,9 @@ struct ufs_hba_variant_ops {
struct devfreq_simple_ondemand_data *data);
int (*program_key)(struct ufs_hba *hba,
const union ufs_crypto_cfg_entry *cfg, int slot);
int (*fill_crypto_prdt)(struct ufs_hba *hba,
const struct bio_crypt_ctx *crypt_ctx,
void *prdt, unsigned int num_segments);
void (*event_notify)(struct ufs_hba *hba,
enum ufs_event_type evt, void *data);
void (*reinit_notify)(struct ufs_hba *);
@ -457,6 +465,7 @@ struct ufs_clk_scaling {
bool is_initialized;
bool is_busy_started;
bool is_suspended;
bool suspend_on_no_request;
};
#define UFS_EVENT_HIST_LENGTH 8
@ -643,6 +652,30 @@ enum ufshcd_quirks {
* thus need this quirk to skip related flow.
*/
UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21,
/*
* This quirk needs to be enabled if the host controller supports inline
* encryption but it needs to initialize the crypto capabilities in a
* nonstandard way and/or needs to override blk_crypto_ll_ops. If
* enabled, the standard code won't initialize the blk_crypto_profile;
* ufs_hba_variant_ops::init() must do it instead.
*/
UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 22,
/*
* This quirk needs to be enabled if the host controller supports inline
* encryption but does not support the CRYPTO_GENERAL_ENABLE bit, i.e.
* host controller initialization fails if that bit is set.
*/
UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 23,
/*
* This quirk needs to be enabled if the host controller driver copies
* cryptographic keys into the PRDT in order to send them to hardware,
* and therefore the PRDT should be zeroized after each request (as per
* the standard best practice for managing keys).
*/
UFSHCD_QUIRK_KEYS_IN_PRDT = 1 << 24,
};
enum ufshcd_caps {
@ -819,6 +852,7 @@ enum ufshcd_mcq_opr {
* @capabilities: UFS Controller Capabilities
* @mcq_capabilities: UFS Multi Circular Queue capabilities
* @nutrs: Transfer Request Queue depth supported by controller
* @nortt - Max outstanding RTTs supported by controller
* @nutmrs: Task Management Queue depth supported by controller
* @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
* @ufs_version: UFS Version to which controller complies
@ -957,6 +991,7 @@ struct ufs_hba {
u32 capabilities;
int nutrs;
int nortt;
u32 mcq_capabilities;
int nutmrs;
u32 reserved_slot;
@ -1126,9 +1161,17 @@ struct ufs_hw_queue {
struct mutex sq_mutex;
};
static inline bool is_mcq_enabled(struct ufs_hba *hba)
#define MCQ_QCFG_SIZE 0x40
static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
enum ufshcd_mcq_opr opr, int idx)
{
return hba->mcq_enabled;
return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx;
}
static inline unsigned int ufshcd_mcq_cfg_offset(unsigned int reg, int idx)
{
return reg + MCQ_QCFG_SIZE * idx;
}
#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
@ -1261,6 +1304,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,

View file

@ -67,7 +67,9 @@ enum {
/* Controller capability masks */
enum {
MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
MASK_TRANSFER_REQUESTS_SLOTS_SDB = 0x0000001F,
MASK_TRANSFER_REQUESTS_SLOTS_MCQ = 0x000000FF,
MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00,
MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
MASK_EHSLUTRD_SUPPORTED = 0x00400000,
MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,