dmaengine: idxd: rework descriptor free path on failure

Refactor the completion function to allow skipping of descriptor freeing on
the submission failure path. This completely removes descriptor freeing
from the submit failure path and leave the responsibility to the caller.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/163528416222.3925689.12859769271667814762.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Dave Jiang 2021-10-26 14:36:02 -07:00 committed by Vinod Koul
parent 365fceecd6
commit 5d78abb6fb
5 changed files with 19 additions and 28 deletions

View file

@ -21,7 +21,8 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
}
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type)
enum idxd_complete_type comp_type,
bool free_desc)
{
struct dma_async_tx_descriptor *tx;
struct dmaengine_result res;
@ -44,6 +45,9 @@ void idxd_dma_complete_txd(struct idxd_desc *desc,
tx->callback = NULL;
tx->callback_result = NULL;
}
if (free_desc)
idxd_free_desc(desc->wq, desc);
}
static void op_flag_setup(unsigned long flags, u32 *desc_flags)
@ -153,8 +157,10 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
rc = idxd_submit_desc(wq, desc);
if (rc < 0)
if (rc < 0) {
idxd_free_desc(wq, desc);
return rc;
}
return cookie;
}

View file

@ -579,7 +579,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type);
enum idxd_complete_type comp_type, bool free_desc);
/* cdev */
int idxd_cdev_register(void);
@ -603,10 +603,4 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {}
#endif
static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
idxd_dma_complete_txd(desc, reason);
idxd_free_desc(desc->wq, desc);
}
#endif

View file

@ -717,10 +717,8 @@ static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
if (!head)
return;
llist_for_each_entry_safe(desc, itr, head, llnode) {
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
idxd_free_desc(desc->wq, desc);
}
llist_for_each_entry_safe(desc, itr, head, llnode)
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
}
static void idxd_flush_work_list(struct idxd_irq_entry *ie)
@ -729,8 +727,7 @@ static void idxd_flush_work_list(struct idxd_irq_entry *ie)
list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
list_del(&desc->list);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
idxd_free_desc(desc->wq, desc);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
}
}

View file

@ -195,11 +195,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
complete_desc(desc, IDXD_COMPLETE_NORMAL);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
} else {
spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
@ -238,11 +238,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
complete_desc(desc, IDXD_COMPLETE_NORMAL);
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
}
}

View file

@ -129,7 +129,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
spin_unlock(&ie->list_lock);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
}
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
@ -139,15 +139,11 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
void __iomem *portal;
int rc;
if (idxd->state != IDXD_DEV_ENABLED) {
idxd_free_desc(wq, desc);
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
}
if (!percpu_ref_tryget_live(&wq->wq_active)) {
idxd_free_desc(wq, desc);
if (!percpu_ref_tryget_live(&wq->wq_active))
return -ENXIO;
}
portal = idxd_wq_portal_addr(wq);
@ -182,8 +178,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
/* abort operation frees the descriptor */
if (ie)
llist_abort_desc(wq, ie, desc);
else
idxd_free_desc(wq, desc);
return rc;
}
}