SCSI fixes on 20220128

Sixteen patches, mostly minor fixes and updates; however there are
 substantive driver bug fixes in pm8001, bnx2fc, zfcp, myrs and qedf.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYfQhcSYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishU8AAQCM0USn
 hET4tokGXRSqnVbf2RpG7PNYiJoQgDT5VfNvaQD/QM5vtPXf95LuEyy+Zm4g0XGk
 dczwQK8gUhC2h/hTbS0=
 =dkxX
 -----END PGP SIGNATURE-----

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Sixteen patches, mostly minor fixes and updates; however there are
  substantive driver bug fixes in pm8001, bnx2fc, zfcp, myrs and qedf"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: myrs: Fix crash in error case
  scsi: 53c700: Remove redundant assignment to pointer SCp
  scsi: ufs: Treat link loss as fatal error
  scsi: ufs: Use generic error code in ufshcd_set_dev_pwr_mode()
  scsi: bfa: Remove useless DMA-32 fallback configuration
  scsi: hisi_sas: Remove useless DMA-32 fallback configuration
  scsi: 3w-sas: Remove useless DMA-32 fallback configuration
  scsi: bnx2fc: Flush destroy_work queue before calling bnx2fc_interface_put()
  scsi: zfcp: Fix failed recovery on gone remote port with non-NPIV FCP devices
  scsi: pm8001: Fix bogus FW crash for maxcpus=1
  scsi: qedf: Change context reset messages to ratelimited
  scsi: qedf: Fix refcount issue when LOGO is received during TMF
  scsi: qedf: Add stag_work to all the vports
  scsi: ufs: ufshcd-pltfrm: Check the return value of devm_kstrdup()
  scsi: target: iscsi: Make sure the np under each tpg is unique
  scsi: elx: efct: Don't use GFP_KERNEL under spin lock
This commit is contained in:
Linus Torvalds 2022-01-28 21:17:58 +02:00
commit 169387e2aa
17 changed files with 64 additions and 48 deletions

View file

@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
goto out;
}
/* re-init to undo drop from zfcp_fc_adisc() */
port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
u32 d_id;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
d_id = port->d_id; /* remember as destination for send els below */
/*
* Force fresh GID_PN lookup on next port recovery.
* Must happen after request setup and before sending request,
* to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
*/
port->d_id = 0;
ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);

View file

@ -1567,8 +1567,6 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (retval)
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
@ -1786,8 +1784,6 @@ static int __maybe_unused twl_resume(struct device *dev)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (retval)
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;

View file

@ -1507,7 +1507,6 @@ NCR_700_intr(int irq, void *dev_id)
struct scsi_cmnd *SCp = hostdata->cmd;
handled = 1;
SCp = hostdata->cmd;
if(istat & SCSI_INT_PENDING) {
udelay(10);

View file

@ -732,9 +732,6 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
@ -1559,9 +1556,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
if (rc)
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
DMA_BIT_MASK(32));
if (rc)
goto out_disable_device;

View file

@ -82,7 +82,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
static void bnx2fc_destroy_work(struct work_struct *work);
static void bnx2fc_port_destroy(struct fcoe_port *port);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
@ -907,9 +907,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
/* Ensure ALL destroy work has been completed before return */
flush_workqueue(bnx2fc_wq);
return;
default:
@ -1215,8 +1212,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
bnx2fc_free_vport(interface->hba, port->lport);
bnx2fc_port_shutdown(port->lport);
bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
queue_work(bnx2fc_wq, &port->destroy_work);
return 0;
}
@ -1525,7 +1522,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port->lport = lport;
port->priv = interface;
port->get_netdev = bnx2fc_netdev;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
rc = bnx2fc_lport_config(lport);
@ -1653,8 +1649,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
bnx2fc_interface_cleanup(interface);
bnx2fc_stop(interface);
list_del(&interface->list);
bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
queue_work(bnx2fc_wq, &port->destroy_work);
}
/**
@ -1694,15 +1690,12 @@ static int bnx2fc_destroy(struct net_device *netdev)
return rc;
}
static void bnx2fc_destroy_work(struct work_struct *work)
static void bnx2fc_port_destroy(struct fcoe_port *port)
{
struct fcoe_port *port;
struct fc_lport *lport;
port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
bnx2fc_if_destroy(lport);
}
@ -2556,9 +2549,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
/* Ensure ALL destroy work has been completed before return */
flush_workqueue(bnx2fc_wq);
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))

View file

@ -46,18 +46,14 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
efc = node->efc;
spin_lock_irqsave(&node->els_ios_lock, flags);
if (!node->els_io_enabled) {
efc_log_err(efc, "els io alloc disabled\n");
spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
if (!els) {
atomic_add_return(1, &efc->els_io_alloc_failed_count);
spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
@ -74,7 +70,6 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
&els->io.req.phys, GFP_KERNEL);
if (!els->io.req.virt) {
mempool_free(els, efc->els_io_pool);
spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
@ -94,10 +89,11 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* add els structure to ELS IO list */
INIT_LIST_HEAD(&els->list_entry);
spin_lock_irqsave(&node->els_ios_lock, flags);
list_add_tail(&els->list_entry, &node->els_ios_list);
spin_unlock_irqrestore(&node->els_ios_lock, flags);
}
spin_unlock_irqrestore(&node->els_ios_lock, flags);
return els;
}

View file

@ -2666,9 +2666,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (error)
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;

View file

@ -4695,8 +4695,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;

View file

@ -2267,7 +2267,8 @@ static void myrs_cleanup(struct myrs_hba *cs)
myrs_unmap(cs);
if (cs->mmio_base) {
cs->disable_intr(cs);
if (cs->disable_intr)
cs->disable_intr(cs);
iounmap(cs->mmio_base);
cs->mmio_base = NULL;
}

View file

@ -4151,10 +4151,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u32 ret = MPI_IO_STATUS_FAIL;
u32 regval;
/*
* Fatal errors are programmed to be signalled in irq vector
* pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl.
* fatal_err_interrupt
*/
if (vec == (pm8001_ha->max_q_num - 1)) {
u32 mipsall_ready;
if (pm8001_ha->chip_id == chip_8008 ||
pm8001_ha->chip_id == chip_8009)
mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT;
else
mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT;
regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
SCRATCH_PAD_MIPSALL_READY) {
if ((regval & mipsall_ready) != mipsall_ready) {
pm8001_ha->controller_fatal_error = true;
pm8001_dbg(pm8001_ha, FAIL,
"Firmware Fatal error! Regval:0x%x\n",

View file

@ -1405,8 +1405,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \
SCRATCH_PAD_IOP0_READY | \
SCRATCH_PAD_ILA_READY | \
SCRATCH_PAD_RAAE_READY)
#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \
SCRATCH_PAD_ILA_READY | \
SCRATCH_PAD_RAAE_READY)
/* boot loader state */

View file

@ -2250,6 +2250,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
io_req->tm_flags == FCP_TMF_TGT_RESET) {
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
kref_put(&io_req->refcount, qedf_release_cmd);
complete(&io_req->tm_done);
}

View file

@ -911,7 +911,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qed_link_output if_link;
if (lport->vport) {
QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
@ -1864,6 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
init_completion(&vport_qedf->flogi_compl);
INIT_LIST_HEAD(&vport_qedf->fcports);
INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
rc = qedf_vport_libfc_config(vport, vn_port);
if (rc) {
@ -3980,7 +3981,9 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
dev_name(&qedf->pdev->dev), __func__, __LINE__,
qedf->dbg_ctx.host_no);
qedf_ctx_soft_reset(qedf->lport);
}

View file

@ -92,6 +92,11 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
clki->min_freq = clkfreq[i];
clki->max_freq = clkfreq[i+1];
clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!clki->name) {
ret = -ENOMEM;
goto out;
}
if (!strcmp(name, "ref_clk"))
clki->keep_link_active = true;
dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
@ -127,6 +132,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
return -ENOMEM;
vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!vreg->name)
return -ENOMEM;
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {

View file

@ -8613,7 +8613,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
* @pwr_mode: device power mode to set
*
* Returns 0 if requested power mode is set successfully
* Returns non-zero if failed to set the requested power mode
* Returns < 0 if failed to set the requested power mode
*/
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
enum ufs_dev_pwr_mode pwr_mode)
@ -8667,8 +8667,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
if (ret > 0 && scsi_sense_valid(&sshdr))
scsi_print_sense_hdr(sdp, NULL, &sshdr);
if (ret > 0) {
if (scsi_sense_valid(&sshdr))
scsi_print_sense_hdr(sdp, NULL, &sshdr);
ret = -EIO;
}
}
if (!ret)

View file

@ -142,7 +142,8 @@ static inline u32 ufshci_version(u32 major, u32 minor)
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
SYSTEM_BUS_FATAL_ERROR |\
CRYPTO_ENGINE_FATAL_ERROR)
CRYPTO_ENGINE_FATAL_ERROR |\
UIC_LINK_LOST)
/* HCS - Host Controller Status 30h */
#define DEVICE_PRESENT 0x1

View file

@ -443,6 +443,9 @@ static bool iscsit_tpg_check_network_portal(
break;
}
spin_unlock(&tpg->tpg_np_lock);
if (match)
break;
}
spin_unlock(&tiqn->tiqn_tpg_lock);