SCSI misc on 20210711

This is a set of minor fixes and clean ups in the core and various
 drivers.  The only core change in behaviour is the I/O retry for
 spinup notify, but that shouldn't impact anything other than the
 failing case.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYOqWWyYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishYBtAQCpqVdl
 Axi1SpD6/UuKOgRmboWscoKD8FLHwvLDMRyCRQEAnLu3XdB9HcQrwZOkTG14vrfB
 q2XB5cP4XAITxFLN1qo=
 =9AO9
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
 "This is a set of minor fixes and clean ups in the core and various
  drivers.

  The only core change in behaviour is the I/O retry for spinup notify,
  but that shouldn't impact anything other than the failing case"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (23 commits)
  scsi: virtio_scsi: Add validation for residual bytes from response
  scsi: ipr: System crashes when seeing type 20 error
  scsi: core: Retry I/O for Notify (Enable Spinup) Required error
  scsi: mpi3mr: Fix warnings reported by smatch
  scsi: qedf: Add check to synchronize abort and flush
  scsi: MAINTAINERS: Add mpi3mr driver maintainers
  scsi: libfc: Fix array index out of bound exception
  scsi: mvsas: Use DEVICE_ATTR_RO()/RW() macro
  scsi: megaraid_mbox: Use DEVICE_ATTR_ADMIN_RO() macro
  scsi: qedf: Use DEVICE_ATTR_RO() macro
  scsi: qedi: Use DEVICE_ATTR_RO() macro
  scsi: message: mptfc: Switch from pci_ to dma_ API
  scsi: be2iscsi: Fix some missing space in some messages
  scsi: be2iscsi: Fix an error handling path in beiscsi_dev_probe()
  scsi: ufs: Fix build warning without CONFIG_PM
  scsi: bnx2fc: Remove meaningless bnx2fc_abts_cleanup() return value assignment
  scsi: qla2xxx: Add heartbeat check
  scsi: virtio_scsi: Do not overwrite SCSI status
  scsi: libsas: Add LUN number check in .slave_alloc callback
  scsi: core: Inline scsi_mq_alloc_queue()
  ...
This commit is contained in:
Linus Torvalds 2021-07-11 10:59:53 -07:00
commit 8b9cc17a46
36 changed files with 287 additions and 162 deletions

View File

@ -3781,6 +3781,17 @@ S: Supported
F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
F: drivers/gpio/gpio-bcm-kona.c
BROADCOM MPI3 STORAGE CONTROLLER DRIVER
M: Sathya Prakash Veerichetty <sathya.prakash@broadcom.com>
M: Kashyap Desai <kashyap.desai@broadcom.com>
M: Sumit Saxena <sumit.saxena@broadcom.com>
M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
L: mpi3mr-linuxdrv.pdl@broadcom.com
L: linux-scsi@vger.kernel.org
S: Supported
W: https://www.broadcom.com/support/storage
F: drivers/scsi/mpi3mr/
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
M: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>

View File

@ -331,8 +331,8 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
break;
data_sz = hdr.PageLength * 4;
ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz,
&page0_dma);
ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page0_dma, GFP_KERNEL);
rc = -ENOMEM;
if (!ppage0_alloc)
break;
@ -367,8 +367,8 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
*p_p0 = *ppage0_alloc; /* save data */
*p_pp0++ = p_p0++; /* save addr */
}
pci_free_consistent(ioc->pcidev, data_sz,
(u8 *) ppage0_alloc, page0_dma);
dma_free_coherent(&ioc->pcidev->dev, data_sz,
ppage0_alloc, page0_dma);
if (rc != 0)
break;
@ -763,7 +763,8 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page0_dma, GFP_KERNEL);
if (ppage0_alloc) {
try_again:
@ -817,7 +818,8 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
mptfc_display_port_link_speed(ioc, portnum, pp0dest);
}
pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
dma_free_coherent(&ioc->pcidev->dev, data_sz, ppage0_alloc,
page0_dma);
}
return rc;
@ -904,9 +906,8 @@ start_over:
if (data_sz < sizeof(FCPortPage1_t))
data_sz = sizeof(FCPortPage1_t);
page1_alloc = pci_alloc_consistent(ioc->pcidev,
data_sz,
&page1_dma);
page1_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
&page1_dma, GFP_KERNEL);
if (!page1_alloc)
return -ENOMEM;
}
@ -916,8 +917,8 @@ start_over:
data_sz = ioc->fc_data.fc_port_page1[portnum].pg_sz;
if (hdr.PageLength * 4 > data_sz) {
ioc->fc_data.fc_port_page1[portnum].data = NULL;
pci_free_consistent(ioc->pcidev, data_sz, (u8 *)
page1_alloc, page1_dma);
dma_free_coherent(&ioc->pcidev->dev, data_sz,
page1_alloc, page1_dma);
goto start_over;
}
}
@ -932,8 +933,8 @@ start_over:
}
else {
ioc->fc_data.fc_port_page1[portnum].data = NULL;
pci_free_consistent(ioc->pcidev, data_sz, (u8 *)
page1_alloc, page1_dma);
dma_free_coherent(&ioc->pcidev->dev, data_sz, page1_alloc,
page1_dma);
}
return rc;
@ -1514,10 +1515,10 @@ static void mptfc_remove(struct pci_dev *pdev)
for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) {
if (ioc->fc_data.fc_port_page1[ii].data) {
pci_free_consistent(ioc->pcidev,
ioc->fc_data.fc_port_page1[ii].pg_sz,
(u8 *) ioc->fc_data.fc_port_page1[ii].data,
ioc->fc_data.fc_port_page1[ii].dma);
dma_free_coherent(&ioc->pcidev->dev,
ioc->fc_data.fc_port_page1[ii].pg_sz,
ioc->fc_data.fc_port_page1[ii].data,
ioc->fc_data.fc_port_page1[ii].dma);
ioc->fc_data.fc_port_page1[ii].data = NULL;
}
}

View File

@ -493,7 +493,7 @@ ahc_inq(struct ahc_softc *ahc, u_int port)
return ((ahc_inb(ahc, port))
| (ahc_inb(ahc, port+1) << 8)
| (ahc_inb(ahc, port+2) << 16)
| (ahc_inb(ahc, port+3) << 24)
| (((uint64_t)ahc_inb(ahc, port+3)) << 24)
| (((uint64_t)ahc_inb(ahc, port+4)) << 32)
| (((uint64_t)ahc_inb(ahc, port+5)) << 40)
| (((uint64_t)ahc_inb(ahc, port+6)) << 48)

View File

@ -53,6 +53,7 @@ static struct scsi_host_template aic94xx_sht = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT

View File

@ -143,8 +143,7 @@ DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
beiscsi_##_name##_disp, beiscsi_##_name##_store)
/*
* When new log level added update the
* the MAX allowed value for log_enable
* When new log level added update MAX allowed value for log_enable
*/
BEISCSI_RW_ATTR(log_enable, 0x00,
0xFF, 0x00, "Enable logging Bit Mask\n"
@ -825,9 +824,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
&phwi_context->be_eq[i]);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_init_irqs-Failed to"
"register msix for i = %d\n",
i);
"BM_%d : %s-Failed to register msix for i = %d\n",
__func__, i);
kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@ -841,9 +839,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
phba->msi_name[i], &phwi_context->be_eq[i]);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
"BM_%d : beiscsi_init_irqs-"
"Failed to register beiscsi_msix_mcc\n");
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : %s-Failed to register beiscsi_msix_mcc\n",
__func__);
kfree(phba->msi_name[i]);
goto free_msix_irqs;
}
@ -853,8 +851,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
"beiscsi", phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_init_irqs-"
"Failed to register irq\\n");
"BM_%d : %s-Failed to register irq\n",
__func__);
return ret;
}
}
@ -1030,7 +1028,7 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
phba->params.wrbs_per_cxn);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
"BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x "
"wrb_handles_available=%d\n",
pwrb_handle, pwrb_context->free_index,
pwrb_context->wrb_handles_available);
@ -1374,7 +1372,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
"BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
" hwi_complete_cmd- Solicited path\n");
" %s- Solicited path\n", __func__);
break;
case HWH_TYPE_NOP:
@ -1384,8 +1382,8 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
default:
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
"BM_%d : In hwi_complete_cmd, unknown type = %d"
"wrb_index 0x%x CID 0x%x\n", type,
"BM_%d : In %s, unknown type = %d "
"wrb_index 0x%x CID 0x%x\n", __func__, type,
csol_cqe.wrb_index,
csol_cqe.cid);
break;
@ -1883,9 +1881,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
cid = AMAP_GET_BITS(
struct amap_i_t_dpdu_cqe_v2,
cid, sol);
else
cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
cid, sol);
else
cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
cid, sol);
}
cri_index = BE_GET_CRI_FROM_CID(cid);
@ -2010,8 +2008,7 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
default:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : Invalid CQE Event Received Code : %d"
"CID 0x%x...\n",
"BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n",
code, cid);
break;
}
@ -3001,7 +2998,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
void *eq_vaddress;
dma_addr_t paddr;
num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries *
sizeof(struct be_eq_entry));
if (phba->pcidev->msix_enabled)
@ -3034,8 +3031,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
BEISCSI_EQ_DELAY_DEF);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_cmd_eq_create"
"Failed for EQ\n");
"BM_%d : beiscsi_cmd_eq_create Failed for EQ\n");
goto create_eq_error;
}
@ -3068,7 +3064,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
int ret = -ENOMEM;
dma_addr_t paddr;
num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries *
sizeof(struct sol_cqe));
for (i = 0; i < phba->num_cpus; i++) {
@ -3090,8 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
sizeof(struct sol_cqe), cq_vaddress);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : be_fill_queue Failed "
"for ISCSI CQ\n");
"BM_%d : be_fill_queue Failed for ISCSI CQ\n");
goto create_cq_error;
}
@ -3100,8 +3095,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
false, 0);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_cmd_eq_create"
"Failed for ISCSI CQ\n");
"BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n");
goto create_cq_error;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
@ -3226,8 +3220,8 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
phwi_context->be_def_dataq[ulp_num].id);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : DEFAULT PDU DATA RING CREATED"
"on ULP : %d\n", ulp_num);
"BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n",
ulp_num);
return 0;
}
@ -3253,13 +3247,13 @@ beiscsi_post_template_hdr(struct beiscsi_hba *phba)
if (status != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Post Template HDR Failed for"
"BM_%d : Post Template HDR Failed for "
"ULP_%d\n", ulp_num);
return status;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : Template HDR Pages Posted for"
"BM_%d : Template HDR Pages Posted for "
"ULP_%d\n", ulp_num);
}
}
@ -3374,18 +3368,17 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
} else {
idx++;
wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
pa_addr_lo = mem_descr->mem_array[idx].\
pa_addr_lo = mem_descr->mem_array[idx].
bus_address.u.a64.address;
num_wrb_rings = mem_descr->mem_array[idx].size /
(phba->params.wrbs_per_cxn *
sizeof(struct iscsi_wrb));
pwrb_arr[num].virtual_address = wrb_vaddr;
pwrb_arr[num].bus_address.u.a64.address\
= pa_addr_lo;
pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
pwrb_arr[num].size = phba->params.wrbs_per_cxn *
sizeof(struct iscsi_wrb);
wrb_vaddr += pwrb_arr[num].size;
pa_addr_lo += pwrb_arr[num].size;
pa_addr_lo += pwrb_arr[num].size;
num_wrb_rings--;
}
}
@ -3937,7 +3930,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
idx++;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : phba->io_sgl_hndl_avbl=%d"
"BM_%d : phba->io_sgl_hndl_avbl=%d "
"phba->eh_sgl_hndl_avbl=%d\n",
phba->io_sgl_hndl_avbl,
phba->eh_sgl_hndl_avbl);
@ -3995,13 +3988,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
GFP_KERNEL);
if (!ptr_cid_info) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory"
"for ULP_CID_INFO for ULP : %d\n",
ulp_num);
ret = -ENOMEM;
goto free_memory;
}
/* Allocate memory for CID array */
@ -4010,10 +3998,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(*ptr_cid_info->cid_array),
GFP_KERNEL);
if (!ptr_cid_info->cid_array) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory"
"for CID_ARRAY for ULP : %d\n",
ulp_num);
kfree(ptr_cid_info);
ptr_cid_info = NULL;
ret = -ENOMEM;
@ -4031,9 +4015,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(struct iscsi_endpoint *),
GFP_KERNEL);
if (!phba->ep_array) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory in "
"hba_setup_cid_tbls\n");
ret = -ENOMEM;
goto free_memory;
@ -4043,10 +4024,6 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
sizeof(struct beiscsi_conn *),
GFP_KERNEL);
if (!phba->conn_table) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory in"
"hba_setup_cid_tbls\n");
kfree(phba->ep_array);
phba->ep_array = NULL;
ret = -ENOMEM;
@ -4399,7 +4376,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : Alloc of IO_SGL_ICD Failed"
"BM_%d : Alloc of IO_SGL_ICD Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
@ -4410,7 +4387,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : Alloc of WRB_HANDLE Failed"
"BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_io_hndls;
@ -4426,10 +4403,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
"BM_%d : Alloc of MGMT_SGL_ICD Failed"
"BM_%d : Alloc of MGMT_SGL_ICD Failed "
"for the CID : %d\n",
beiscsi_conn->
beiscsi_conn_cid);
beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
}
@ -4444,10 +4420,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
"BM_%d : Alloc of WRB_HANDLE Failed"
"BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
beiscsi_conn->
beiscsi_conn_cid);
beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
}
beiscsi_conn->plogin_wrb_handle =
@ -4465,10 +4440,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
BEISCSI_LOG_CONFIG,
"BM_%d : Alloc of MGMT_SGL_ICD Failed"
"BM_%d : Alloc of MGMT_SGL_ICD Failed "
"for the CID : %d\n",
beiscsi_conn->
beiscsi_conn_cid);
beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
}
io_task->pwrb_handle =
@ -4478,7 +4452,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : Alloc of WRB_HANDLE Failed"
"BM_%d : Alloc of WRB_HANDLE Failed "
"for the CID : %d\n",
beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
@ -5743,6 +5717,7 @@ free_hba:
pci_disable_msix(phba->pcidev);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
pci_disable_pcie_error_reporting(pcidev);
pci_set_drvdata(pcidev, NULL);
disable_pci:
pci_release_regions(pcidev);

View File

@ -1213,7 +1213,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
* cleanup the command and return that I/O was successfully
* aborted.
*/
rc = bnx2fc_abts_cleanup(io_req);
bnx2fc_abts_cleanup(io_req);
/* This only occurs when an task abort was requested while ABTS
is in progress. Setting the IO_CLEANUP flag will skip the
RRQ process in the case when the fw generated SCSI_CMD cmpl

View File

@ -832,10 +832,6 @@ efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
}
vport_list->lio_vport = lio_vport;
spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
INIT_LIST_HEAD(&vport_list->list_entry);
list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
memset(&vport_id, 0, sizeof(vport_id));
vport_id.port_name = npiv_wwpn;
@ -853,6 +849,10 @@ efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
}
lio_vport->fc_vport = new_fc_vport;
spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
INIT_LIST_HEAD(&vport_list->list_entry);
list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
return &lio_vport->vport_wwn;
}

View File

@ -2381,8 +2381,6 @@ sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp,
els->ox_id = cpu_to_le16(params->ox_id);
els->flags2 |= SLI4_ELS_IOD & SLI4_ELS_REQUEST64_DIR_WRITE;
els->flags2 |= SLI4_ELS_QOSD;
els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN;

View File

@ -1771,6 +1771,7 @@ static struct scsi_host_template sht_v1_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT

View File

@ -3584,6 +3584,7 @@ static struct scsi_host_template sht_v2_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT

View File

@ -3155,6 +3155,7 @@ static struct scsi_host_template sht_v3_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT

View File

@ -1300,7 +1300,7 @@ static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
*p = '\0';
p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
return buffer;
@ -1323,7 +1323,7 @@ static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
*p = '\0';
p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
__ipr_format_res_path(res_path, p, len - (buffer - p));
__ipr_format_res_path(res_path, p, len - (p - buffer));
return buffer;
}

View File

@ -428,6 +428,7 @@ struct ipr_config_table_entry64 {
__be64 lun;
__be64 lun_wwn[2];
#define IPR_MAX_RES_PATH_LENGTH 48
#define IPR_RES_PATH_BYTES 8
__be64 res_path;
struct ipr_std_inq_data std_inq_data;
u8 reserved2[4];

View File

@ -167,6 +167,7 @@ static struct scsi_host_template isci_sht = {
.eh_abort_handler = sas_eh_abort_handler,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT

View File

@ -1162,6 +1162,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
pp->spp.spp_flags, pp->spp.spp_type);
rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
@ -1184,11 +1185,13 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
/*
* Call prli provider if we should act as a target
*/
prov = fc_passive_prov[rdata->spp_type];
if (prov) {
memset(&temp_spp, 0, sizeof(temp_spp));
prov->prli(rdata, pp->prli.prli_spp_len,
&pp->spp, &temp_spp);
if (rdata->spp_type < FC_FC4_PROV_SIZE) {
prov = fc_passive_prov[rdata->spp_type];
if (prov) {
memset(&temp_spp, 0, sizeof(temp_spp));
prov->prli(rdata, pp->prli.prli_spp_len,
&pp->spp, &temp_spp);
}
}
/*
* Check if the image pair could be established

View File

@ -911,6 +911,14 @@ void sas_task_abort(struct sas_task *task)
blk_abort_request(sc->request);
}
int sas_slave_alloc(struct scsi_device *sdev)
{
if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
return -ENXIO;
return 0;
}
void sas_target_destroy(struct scsi_target *starget)
{
struct domain_device *found_dev = starget->hostdata;
@ -957,5 +965,6 @@ EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
EXPORT_SYMBOL_GPL(sas_slave_alloc);
EXPORT_SYMBOL_GPL(sas_target_destroy);
EXPORT_SYMBOL_GPL(sas_ioctl);

View File

@ -121,8 +121,8 @@ static irqreturn_t megaraid_isr(int, void *);
static void megaraid_mbox_dpc(unsigned long);
static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
static ssize_t megaraid_mbox_app_hndl_show(struct device *, struct device_attribute *attr, char *);
static ssize_t megaraid_mbox_ld_show(struct device *, struct device_attribute *attr, char *);
static int megaraid_cmm_register(adapter_t *);
static int megaraid_cmm_unregister(adapter_t *);
@ -302,8 +302,7 @@ static struct pci_driver megaraid_pci_driver = {
// definitions for the device attributes for exporting logical drive number
// for a scsi address (Host, Channel, Id, Lun)
static DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
NULL);
static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_app_hndl);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_shost_attrs[] = {
@ -312,7 +311,7 @@ static struct device_attribute *megaraid_shost_attrs[] = {
};
static DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_ld);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_sdev_attrs[] = {
@ -3961,7 +3960,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
/**
* megaraid_sysfs_show_app_hndl - display application handle for this adapter
* megaraid_mbox_app_hndl_show - display application handle for this adapter
* @dev : class device object representation for the host
* @attr : device attribute (unused)
* @buf : buffer to send data to
@ -3971,8 +3970,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
* handle, since we do not interface with applications directly.
*/
static ssize_t
megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
char *buf)
megaraid_mbox_app_hndl_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost);
@ -3985,7 +3983,7 @@ megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
/**
* megaraid_sysfs_show_ldnum - display the logical drive number for this device
* megaraid_mbox_ld_show - display the logical drive number for this device
* @dev : device object representation for the scsi device
* @attr : device attribute to show
* @buf : buffer to send data to
@ -4000,7 +3998,7 @@ megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
* <int> <int> <int> <int>
*/
static ssize_t
megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
megaraid_mbox_ld_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);

View File

@ -867,8 +867,9 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
if ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
tgtdev->is_hidden = 1;
if (mrioc->shost)
prot_mask = scsi_host_get_prot(mrioc->shost);
if (!mrioc->shost)
break;
prot_mask = scsi_host_get_prot(mrioc->shost);
if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
ioc_info(mrioc,

View File

@ -46,6 +46,7 @@ static struct scsi_host_template mvs_sht = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
@ -692,22 +693,17 @@ static struct pci_driver mvs_pci_driver = {
.remove = mvs_pci_remove,
};
static ssize_t
mvs_show_driver_version(struct device *cdev,
struct device_attribute *attr, char *buffer)
static ssize_t driver_version_show(struct device *cdev,
struct device_attribute *attr, char *buffer)
{
return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
}
static DEVICE_ATTR(driver_version,
S_IRUGO,
mvs_show_driver_version,
NULL);
static DEVICE_ATTR_RO(driver_version);
static ssize_t
mvs_store_interrupt_coalescing(struct device *cdev,
struct device_attribute *attr,
const char *buffer, size_t size)
static ssize_t interrupt_coalescing_store(struct device *cdev,
struct device_attribute *attr,
const char *buffer, size_t size)
{
unsigned int val = 0;
struct mvs_info *mvi = NULL;
@ -745,16 +741,13 @@ mvs_store_interrupt_coalescing(struct device *cdev,
return strlen(buffer);
}
static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
struct device_attribute *attr, char *buffer)
static ssize_t interrupt_coalescing_show(struct device *cdev,
struct device_attribute *attr, char *buffer)
{
return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
}
static DEVICE_ATTR(interrupt_coalescing,
S_IRUGO|S_IWUSR,
mvs_show_interrupt_coalescing,
mvs_store_interrupt_coalescing);
static DEVICE_ATTR_RW(interrupt_coalescing);
static int __init mvs_init(void)
{

View File

@ -101,6 +101,7 @@ static struct scsi_host_template pm8001_sht = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT

View File

@ -24,9 +24,8 @@ static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
return lport_priv(base_lport);
}
static ssize_t
qedf_fcoe_mac_show(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t fcoe_mac_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fc_lport *lport = shost_priv(class_to_shost(dev));
u32 port_id;
@ -42,9 +41,8 @@ qedf_fcoe_mac_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
}
static ssize_t
qedf_fka_period_show(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t fka_period_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fc_lport *lport = shost_priv(class_to_shost(dev));
struct qedf_ctx *qedf = lport_priv(lport);
@ -59,8 +57,8 @@ qedf_fka_period_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period);
}
static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
static DEVICE_ATTR(fka_period, S_IRUGO, qedf_fka_period_show, NULL);
static DEVICE_ATTR_RO(fcoe_mac);
static DEVICE_ATTR_RO(fka_period);
struct device_attribute *qedf_host_attrs[] = {
&dev_attr_fcoe_mac,

View File

@ -1515,9 +1515,19 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
{
int rval;
if (io_req == NULL) {
QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
return;
}
if (io_req->fcport == NULL) {
QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
return;
}
if (!cqe) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
"cqe is NULL for io_req %p\n", io_req);
"cqe is NULL for io_req %p\n", io_req);
return;
}
@ -1533,6 +1543,16 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
le32_to_cpu(cqe->cqe_info.err_info.rx_id));
/* When flush is active, let the cmds be flushed out from the cleanup context */
if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
(test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
QEDF_ERR(&qedf->dbg_ctx,
"Dropping EQE for xid=0x%x as fcport is flushing",
io_req->xid);
return;
}
if (qedf->stop_io_on_error) {
qedf_stop_all_io(qedf);
return;

View File

@ -16,9 +16,9 @@ static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
return iscsi_host_priv(shost);
}
static ssize_t qedi_show_port_state(struct device *dev,
struct device_attribute *attr,
char *buf)
static ssize_t port_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
@ -28,8 +28,8 @@ static ssize_t qedi_show_port_state(struct device *dev,
return sprintf(buf, "Linkdown\n");
}
static ssize_t qedi_show_speed(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t speed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
struct qed_link_output if_link;
@ -39,8 +39,8 @@ static ssize_t qedi_show_speed(struct device *dev,
return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
}
static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
static DEVICE_ATTR_RO(port_state);
static DEVICE_ATTR_RO(speed);
struct device_attribute *qedi_shost_attrs[] = {
&dev_attr_port_state,

View File

@ -3660,6 +3660,8 @@ struct qla_qpair {
struct qla_tgt_counters tgt_counters;
uint16_t cpuid;
struct qla_fw_resources fwres ____cacheline_aligned;
u32 cmd_cnt;
u32 cmd_completion_cnt;
};
/* Place holder for FW buffer parameters */
@ -4616,6 +4618,7 @@ struct qla_hw_data {
struct qla_hw_data_stat stat;
pci_error_state_t pci_error_state;
u64 prev_cmd_cnt;
};
struct active_regions {
@ -4743,6 +4746,7 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 32
#define ISP_ABORT_TO_ROM 33
#define VPORT_DELETE 34
#define HEARTBEAT_CHK 38
#define PROCESS_PUREX_IOCB 63

View File

@ -549,6 +549,7 @@ extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t,
uint32_t *);
extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t,
uint32_t);
void qla_no_op_mb(struct scsi_qla_host *vha);
/*
* Global Function Prototypes in qla_isr.c source file.

View File

@ -6870,10 +6870,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->flags.fw_init_done = 0;
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i])
if (ha->queue_pair_map[i]) {
ha->queue_pair_map[i]->chip_reset =
ha->base_qpair->chip_reset;
ha->queue_pair_map[i]->cmd_cnt =
ha->queue_pair_map[i]->cmd_completion_cnt = 0;
}
}
/* purge MBox commands */

View File

@ -1710,6 +1710,7 @@ qla24xx_start_scsi(srb_t *sp)
} else
req->ring_ptr++;
sp->qpair->cmd_cnt++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
@ -1912,6 +1913,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
} else
req->ring_ptr++;
sp->qpair->cmd_cnt++;
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
@ -2068,6 +2070,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
} else
req->ring_ptr++;
sp->qpair->cmd_cnt++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
@ -2284,6 +2287,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
} else
req->ring_ptr++;
sp->qpair->cmd_cnt++;
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);

View File

@ -2322,6 +2322,8 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (unlikely(iocb->u.nvme.aen_op))
atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
else
sp->qpair->cmd_completion_cnt++;
if (unlikely(comp_status != CS_COMPLETE))
logit = 1;
@ -2967,6 +2969,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
sp->qpair->cmd_completion_cnt++;
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);

View File

@ -6939,3 +6939,30 @@ ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
return rval;
}
/**
* qla_no_op_mb(): This MB is used to check if FW is still alive and
* able to generate an interrupt. Otherwise, a timeout will trigger
* FW dump + reset
* @vha: host adapter pointer
* Return: None
*/
void qla_no_op_mb(struct scsi_qla_host *vha)
{
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
int rval;
memset(&mc, 0, sizeof(mc));
mcp->mb[0] = 0; // noop cmd= 0
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = 5;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval) {
ql_dbg(ql_dbg_async, vha, 0x7071,
"Failed %s %x\n", __func__, rval);
}
}

View File

@ -536,6 +536,10 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->ring_ptr++;
}
/* ignore nvme async cmd due to long timeout */
if (!nvme->u.nvme.aen_op)
sp->qpair->cmd_cnt++;
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);

View File

@ -6969,6 +6969,17 @@ intr_on_check:
qla2x00_lip_reset(base_vha);
}
if (test_bit(HEARTBEAT_CHK, &base_vha->dpc_flags)) {
/*
* if there is a mb in progress then that's
* enough of a check to see if fw is still ticking.
*/
if (!ha->flags.mbox_busy && base_vha->flags.init_done)
qla_no_op_mb(base_vha);
clear_bit(HEARTBEAT_CHK, &base_vha->dpc_flags);
}
ha->dpc_active = 0;
end_loop:
set_current_state(TASK_INTERRUPTIBLE);
@ -7025,6 +7036,61 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
}
}
static bool qla_do_heartbeat(struct scsi_qla_host *vha)
{
u64 cmd_cnt, prev_cmd_cnt;
bool do_hb = false;
struct qla_hw_data *ha = vha->hw;
int i;
/* if cmds are still pending down in fw, then do hb */
if (ha->base_qpair->cmd_cnt != ha->base_qpair->cmd_completion_cnt) {
do_hb = true;
goto skip;
}
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i] &&
ha->queue_pair_map[i]->cmd_cnt !=
ha->queue_pair_map[i]->cmd_completion_cnt) {
do_hb = true;
break;
}
}
skip:
prev_cmd_cnt = ha->prev_cmd_cnt;
cmd_cnt = ha->base_qpair->cmd_cnt;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i])
cmd_cnt += ha->queue_pair_map[i]->cmd_cnt;
}
ha->prev_cmd_cnt = cmd_cnt;
if (!do_hb && ((cmd_cnt - prev_cmd_cnt) > 50))
/*
* IOs are completing before periodic hb check.
* IOs seems to be running, do hb for sanity check.
*/
do_hb = true;
return do_hb;
}
static void qla_heart_beat(struct scsi_qla_host *vha)
{
if (vha->vp_idx)
return;
if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
return;
if (qla_do_heartbeat(vha)) {
set_bit(HEARTBEAT_CHK, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
}
/**************************************************************************
* qla2x00_timer
*
@ -7243,6 +7309,8 @@ qla2x00_timer(struct timer_list *t)
qla2xxx_wake_dpc(vha);
}
qla_heart_beat(vha);
qla2x00_restart_timer(vha, WATCH_INTERVAL);
}

View File

@ -726,6 +726,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x07: /* operation in progress */
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
case 0x11: /* notify (enable spinup) required */
case 0x14: /* space allocation in progress */
case 0x1a: /* start stop unit in progress */
case 0x1b: /* sanitize in progress */
@ -1897,18 +1898,6 @@ static const struct blk_mq_ops scsi_mq_ops = {
.get_rq_budget_token = scsi_mq_get_rq_budget_token,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
{
sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
if (IS_ERR(sdev->request_queue))
return NULL;
sdev->request_queue->queuedata = sdev;
__scsi_init_queue(sdev->host, sdev->request_queue);
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
return sdev->request_queue;
}
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;

View File

@ -91,7 +91,6 @@ extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);

View File

@ -217,6 +217,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
{
unsigned int depth;
struct scsi_device *sdev;
struct request_queue *q;
int display_failure_msg = 1, ret;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
@ -266,16 +267,19 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/
sdev->borken = 1;
sdev->request_queue = scsi_mq_alloc_queue(sdev);
if (!sdev->request_queue) {
q = blk_mq_init_queue(&sdev->host->tag_set);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
put_device(&starget->dev);
kfree(sdev);
goto out;
}
WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
sdev->request_queue->queuedata = sdev;
sdev->request_queue = q;
q->queuedata = sdev;
__scsi_init_queue(sdev->host, q);
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
WARN_ON_ONCE(!blk_get_queue(q));
depth = sdev->host->cmd_per_lun ?: 1;

View File

@ -8926,6 +8926,7 @@ out:
return ret;
}
#ifdef CONFIG_PM
static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret;
@ -9052,6 +9053,7 @@ static int ufshcd_wl_runtime_resume(struct device *dev)
return ret;
}
#endif
#ifdef CONFIG_PM_SLEEP
static int ufshcd_wl_suspend(struct device *dev)
@ -9766,6 +9768,7 @@ static inline int ufshcd_clear_rpmb_uac(struct ufs_hba *hba)
return ret;
}
#ifdef CONFIG_PM
static int ufshcd_rpmb_resume(struct device *dev)
{
struct ufs_hba *hba = wlun_dev_to_hba(dev);
@ -9774,6 +9777,7 @@ static int ufshcd_rpmb_resume(struct device *dev)
ufshcd_clear_rpmb_uac(hba);
return 0;
}
#endif
static const struct dev_pm_ops ufs_rpmb_pm_ops = {
SET_RUNTIME_PM_OPS(NULL, ufshcd_rpmb_resume, NULL)

View File

@ -97,7 +97,7 @@ static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
{
if (resid)
scsi_set_resid(sc, resid);
scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
}
/*
@ -156,12 +156,11 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
VIRTIO_SCSI_SENSE_SIZE);
if (sc->sense_buffer) {
if (resp->sense_len) {
memcpy(sc->sense_buffer, resp->sense,
min_t(u32,
virtio32_to_cpu(vscsi->vdev, resp->sense_len),
VIRTIO_SCSI_SENSE_SIZE));
set_status_byte(sc, SAM_STAT_CHECK_CONDITION);
}
sc->scsi_done(sc);