scsi: pm8001: Simplify pm8001_ccb_task_free()

The task argument of the pm8001_ccb_task_free() function can be inferred
from the ccb argument ccb_task field. So there is no need to have this
argument. Likewise, the ccb_index argument is always equal to the ccb tag
field and is not needed either. Remove both arguments and update all call
sites. The pm8001_ccb_task_free_done() helper is also modified to match
this change.

Link: https://lore.kernel.org/r/20220220031810.738362-30-damien.lemoal@opensource.wdc.com
Reviewed-by: Jack Wang <jinpu.wang@ionos.com>
Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Damien Le Moal 2022-02-20 12:18:08 +09:00 committed by Martin K. Petersen
parent e29c47fe89
commit 304fe11bdc
4 changed files with 52 additions and 62 deletions

View file

@ -1564,11 +1564,11 @@ void pm8001_work_fn(struct work_struct *work)
spin_unlock_irqrestore(&t->task_state_lock, flags1);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, pw->handler, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, ccb->ccb_tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
pm8001_ccb_task_free(pm8001_ha, t, ccb, ccb->ccb_tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
@ -1697,8 +1697,7 @@ void pm8001_work_fn(struct work_struct *work)
continue;
}
/*complete sas task and update to top layer */
pm8001_ccb_task_free(pm8001_ha, task, ccb,
ccb->ccb_tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
ts->resp = SAS_TASK_COMPLETE;
task->task_done(task);
} else if (ccb->ccb_tag != PM8001_INVALID_TAG) {
@ -2084,10 +2083,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
@ -2251,10 +2250,10 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
@ -2480,7 +2479,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@ -2496,7 +2495,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@ -2518,7 +2517,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@ -2589,7 +2588,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@ -2609,7 +2608,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@ -2639,10 +2638,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@ -2994,12 +2993,10 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* in order to force CPU ordering */
t->task_done(t);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@ -3649,7 +3646,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
mb();
if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
@ -4287,12 +4284,11 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
task, ts->resp,
ts->stat);
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
pm8001_ccb_task_free_done(pm8001_ha, task,
ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
return 0;
}
}

View file

@ -489,22 +489,21 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
/**
* pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
* @pm8001_ha: our hba card information
* @ccb: the ccb which attached to ssp task
* @task: the task to be free.
* @ccb_idx: ccb index.
* @ccb: the ccb which attached to ssp task to free
*/
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
struct pm8001_ccb_info *ccb)
{
struct sas_task *task = ccb->task;
struct ata_queued_cmd *qc;
struct pm8001_device *pm8001_dev;
if (!ccb->task)
if (!task)
return;
if (!sas_protocol_ata(task->task_proto))
if (ccb->n_elem)
dma_unmap_sg(pm8001_ha->dev, task->scatter,
task->num_scatter, task->data_dir);
if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
dma_unmap_sg(pm8001_ha->dev, task->scatter,
task->num_scatter, task->data_dir);
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
@ -523,12 +522,12 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
}
if (sas_protocol_ata(task->task_proto)) {
// For SCSI/ATA commands uldd_task points to ata_queued_cmd
/* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
qc = task->uldd_task;
pm8001_dev = ccb->device;
trace_pm80xx_request_complete(pm8001_ha->id,
pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
ccb_idx, 0 /* ctlr_opcode not known */,
ccb->ccb_tag, 0 /* ctlr_opcode not known */,
qc ? qc->tf.command : 0, // ata opcode
pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
}
@ -844,11 +843,11 @@ void pm8001_open_reject_retry(
& SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb->ccb_tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb->ccb_tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
task->task_done(task);

View file

@ -637,7 +637,7 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
struct pm8001_ccb_info *ccb);
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void pm8001_scan_start(struct Scsi_Host *shost);
@ -780,12 +780,12 @@ static inline void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha,
pm8001_tag_free(pm8001_ha, tag);
}
static inline void
pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task, struct pm8001_ccb_info *ccb,
u32 ccb_idx)
static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
struct pm8001_ccb_info *ccb)
{
pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
struct sas_task *task = ccb->task;
pm8001_ccb_task_free(pm8001_ha, ccb);
smp_mb(); /*in order to force CPU ordering*/
task->task_done(task);
}

View file

@ -2157,14 +2157,12 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
if (t->slow_task)
complete(&t->slow_task->completion);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* in order to force CPU ordering */
t->task_done(t);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@ -2340,12 +2338,10 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* in order to force CPU ordering */
t->task_done(t);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@ -2579,7 +2575,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@ -2599,7 +2595,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@ -2627,7 +2623,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@ -2702,7 +2698,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@ -2726,7 +2722,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@ -2760,14 +2756,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
if (t->slow_task)
complete(&t->slow_task->completion);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
}
@ -3171,10 +3167,10 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%xstat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
@ -4702,13 +4698,12 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
task, ts->resp,
ts->stat);
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
pm8001_ccb_task_free(pm8001_ha, ccb);
return 0;
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
pm8001_ccb_task_free_done(pm8001_ha, task,
ccb, tag);
pm8001_ccb_task_free_done(pm8001_ha, ccb);
atomic_dec(&pm8001_ha_dev->running_req);
return 0;
}