SCSI misc on 20240120

Final round of fixes that came in too late to send in the first
 request.  It's 9 bug fixes and one version update (because of a bug
 fix) and one set of PCI ID additions.  There's one bug fix in the core
 which is really a one liner (except that an additional sdev pointer
 was added for convenience) and the rest are in drivers.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCZavlICYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishSIpAP9Ynm64
 sxlwxmaRv9uzf6+Nbadesve0UWCPHHYjIxtNBwD/SV6wgn/WdJqO1rONR8w3RknL
 ZFShKw5QmiKXVDmDJTg=
 =WSYQ
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "Final round of fixes that came in too late to send in the first
  request.

  It's nine bug fixes and one version update (because of a bug fix) and
  one set of PCI ID additions. There's one bug fix in the core which is
  really a one liner (except that an additional sdev pointer was added
  for convenience) and the rest are in drivers"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: target: core: Add TMF to tmr_list handling
  scsi: core: Kick the requeue list after inserting when flushing
  scsi: fnic: unlock on error path in fnic_queuecommand()
  scsi: fcoe: Fix unsigned comparison with zero in store_ctlr_mode()
  scsi: mpi3mr: Fix mpi3mr_fw.c kernel-doc warnings
  scsi: smartpqi: Bump driver version to 2.1.26-030
  scsi: smartpqi: Fix logical volume rescan race condition
  scsi: smartpqi: Add new controller PCI IDs
  scsi: ufs: qcom: Remove unnecessary goto statement from ufs_qcom_config_esi()
  scsi: ufs: core: Remove the ufshcd_hba_exit() call from ufshcd_async_scan()
  scsi: ufs: core: Simplify power management during async scan
This commit is contained in:
Linus Torvalds 2024-01-20 09:42:32 -08:00
commit c25b24fa72
10 changed files with 103 additions and 39 deletions

View File

@ -263,6 +263,7 @@ static ssize_t store_ctlr_mode(struct device *dev,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
int res;
if (count > FCOE_MAX_MODENAME_LEN)
return -EINVAL;
@ -279,12 +280,13 @@ static ssize_t store_ctlr_mode(struct device *dev,
return -ENOTSUPP;
}
ctlr->mode = sysfs_match_string(fip_conn_type_names, buf);
if (ctlr->mode < 0 || ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
res = sysfs_match_string(fip_conn_type_names, buf);
if (res < 0 || res == FIP_CONN_TYPE_UNKNOWN) {
LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
buf);
return -EINVAL;
}
ctlr->mode = res;
ctlr->f->set_fcoe_ctlr_mode(ctlr);
LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);

View File

@ -546,6 +546,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) {
WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n",
fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag));
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return SCSI_MLQUEUE_HOST_BUSY;
}

View File

@ -475,7 +475,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
* @op_reply_q: op_reply_qinfo object
* @reply_ci: operational reply descriptor's queue consumer index
*
* Returns reply descriptor frame address
* Returns: reply descriptor frame address
*/
static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
@ -1063,7 +1063,6 @@ enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
* @mrioc: Adapter instance reference
*
* Free the DMA memory allocated for IOCTL handling purpose.
*
* Return: None
*/
@ -1106,7 +1105,6 @@ static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
/**
* mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
* @mrioc: Adapter instance reference
*
* This function allocates dmaable memory required to handle the
* application issued MPI3 IOCTL requests.
@ -1241,7 +1239,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
* during reset/resume
* @mrioc: Adapter instance reference
*
* Return zero if the new IOCFacts parameters value is compatible with
* Return: zero if the new IOCFacts parameters value is compatible with
* older values else return -EPERM
*/
static int

View File

@ -2197,15 +2197,18 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
struct scsi_cmnd *scmd, *next;
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
struct scsi_device *sdev = scmd->device;
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
!scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
scsi_eh_should_retry_cmd(scmd)) {
if (scsi_device_online(sdev) && !scsi_noretry_cmd(scmd) &&
scsi_cmd_retry_allowed(scmd) &&
scsi_eh_should_retry_cmd(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush retry cmd\n",
current->comm));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
blk_mq_kick_requeue_list(sdev->request_queue);
} else {
/*
* If just we got sense for the device (called

View File

@ -1347,7 +1347,6 @@ struct pqi_ctrl_info {
bool controller_online;
bool block_requests;
bool scan_blocked;
u8 logical_volume_rescan_needed : 1;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;

View File

@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
#define DRIVER_VERSION "2.1.24-046"
#define DRIVER_VERSION "2.1.26-030"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
#define DRIVER_RELEASE 24
#define DRIVER_REVISION 46
#define DRIVER_RELEASE 26
#define DRIVER_REVISION 30
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@ -2093,8 +2093,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
if (existing_device->devtype == TYPE_DISK) {
existing_device->raid_level = new_device->raid_level;
existing_device->volume_status = new_device->volume_status;
if (ctrl_info->logical_volume_rescan_needed)
existing_device->rescan = true;
memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
kfree(existing_device->raid_map);
@ -2164,6 +2162,20 @@ static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
}
static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
{
if (pqi_device_in_remove(device))
return false;
if (device->sdev == NULL)
return false;
if (!scsi_device_online(device->sdev))
return false;
return device->rescan;
}
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
@ -2284,9 +2296,13 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
if (device->rescan) {
scsi_rescan_device(device->sdev);
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
if (pqi_volume_rescan_needed(device)) {
device->rescan = false;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
scsi_rescan_device(device->sdev);
} else {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
}
}
@ -2308,8 +2324,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
}
ctrl_info->logical_volume_rescan_needed = false;
}
static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
@ -3702,6 +3716,21 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
return ack_event;
}
static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
{
unsigned long flags;
struct pqi_scsi_dev *device;
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
device->rescan = true;
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
{
unsigned long flags;
@ -3742,7 +3771,7 @@ static void pqi_event_worker(struct work_struct *work)
ack_event = true;
rescan_needed = true;
if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
ctrl_info->logical_volume_rescan_needed = true;
pqi_mark_volumes_for_rescan(ctrl_info);
else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
pqi_disable_raid_bypass(ctrl_info);
}
@ -10142,6 +10171,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1014, 0x0718)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02f8)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02f9)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1137, 0x02fa)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
@ -10198,6 +10239,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x100a)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x100e)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x100f)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1010)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1011)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1043)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1044)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x1045)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)

View File

@ -147,7 +147,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
struct se_session *se_sess = se_cmd->se_sess;
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
@ -178,10 +177,6 @@ out_unlock:
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
return 0;
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);

View File

@ -3629,6 +3629,10 @@ int transport_generic_handle_tmr(
unsigned long flags;
bool aborted = false;
spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->transport_state & CMD_T_ABORTED) {
aborted = true;

View File

@ -8725,7 +8725,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
out:
return ret;
@ -8994,15 +8993,12 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
/* Probe and add UFS logical units */
ret = ufshcd_add_lus(hba);
out:
/*
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
*/
if (ret) {
pm_runtime_put_sync(hba->dev);
ufshcd_hba_exit(hba);
}
pm_runtime_put_sync(hba->dev);
if (ret)
dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
}
static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)

View File

@ -1716,7 +1716,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
goto out;
return ret;
}
msi_lock_descs(hba->dev);
@ -1750,11 +1750,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);
ufshcd_mcq_enable_esi(hba);
}
out:
if (!ret)
host->esi_enabled = true;
}
return ret;
}