dmaengine fixes for v6.8

Core:
  - return of is_slave_direction() for D2D dma
 
 Driver fixes for:
  - Documentaion fixes to resolve warnings for at_hdmac driver
  - bunch of fsl driver fixes for memory leaks, and useless kfree
  - TI edma and k3 fixes for packet error and null pointer checks
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmW+SIsACgkQfBQHDyUj
 g0fsSA/9GP0AcZFS5c6PCZak99EI1ldMBDiKHCqQM4CZBYC3B/eg6PrFSiBDmjfU
 CCGs8h1wYPUWzuTmABFcK00rHyewIQ7OfDaEcFs8BdhfQsywvUSoKuf332Qs8ZaM
 YIG+jlAs4CtCTyP0cvS2sDmX6MFocfXZFIGwe9dqS+kJxPoPxxI9a/OvvOZnAawf
 fwqirJvPmLij37g7j2r1zJIIAyvIq1J1Q7txdWWp7AqJyhH5pv7IEaHB2asy4iDx
 OpzhkW+/MuHQXg4B1HizNvwhrtfAGxfHvN7PI9Gy1qlak7cbMqJS6hAZ3mdxAzQc
 hdei+KUH/eKCL9n1pOLe9pR83KT/ktJuIhp4KIikQC2JElQFmy0A60kB23hQ3JHl
 FRMk8KTqQRRxMSrPe4CmkSIe/HAIyNjroq8MyaUd3PsTl9T5netgx5Za9xDqQAhw
 wkTVv7V+5KOdiEk20UcUMI3u0N4c2bX5L58gHOjV5lNssuvetxdQXCQw4n9sMi5N
 CY2dBPNEEKfmWCgXvhRgqdoUrvk/gm1YucJIe8wvMnzTBYLVD9b3Khs13VwfVEVD
 2coFxm2ssvvbbl+kkBnUjnKbn4soO0g+RpKh1v9JCDopmu82jP/ELM3mXJcN+mot
 jEp8uEL8LXow5ALLQgqAWA5345ycI1u5S3lF43qPLvvpaEyXbzg=
 =6kH5
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-fix-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
 "Core:

   - fix return value of is_slave_direction() for D2D dma

  Driver fixes for:

   - Documentaion fixes to resolve warnings for at_hdmac driver

   - bunch of fsl driver fixes for memory leaks, and useless kfree

   - TI edma and k3 fixes for packet error and null pointer checks"

* tag 'dmaengine-fix-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  dmaengine: at_hdmac: add missing kernel-doc style description
  dmaengine: fix is_slave_direction() return false when DMA_DEV_TO_DEV
  dmaengine: fsl-qdma: Remove a useless devm_kfree()
  dmaengine: fsl-qdma: Fix a memory leak related to the queue command DMA
  dmaengine: fsl-qdma: Fix a memory leak related to the status queue DMA
  dmaengine: ti: k3-udma: Report short packet errors
  dmaengine: ti: edma: Add some null pointer checks to the edma_probe
  dmaengine: fsl-dpaa2-qdma: Fix the size of dma pools
  dmaengine: at_hdmac: fix some kernel-doc warnings
This commit is contained in:
Linus Torvalds 2024-02-04 06:37:38 +00:00
commit 8a0c60a0e4
6 changed files with 59 additions and 39 deletions

View file

@ -222,8 +222,14 @@ struct atdma_sg {
* @vd: pointer to the virtual dma descriptor.
* @atchan: pointer to the atmel dma channel.
* @total_len: total transaction byte count
* @sg_len: number of sg entries.
* @sglen: number of sg entries.
* @sg: array of sgs.
* @boundary: number of transfers to perform before the automatic address increment operation
* @dst_hole: value to add to the destination address when the boundary has been reached
* @src_hole: value to add to the source address when the boundary has been reached
* @memset_buffer: buffer used for the memset operation
* @memset_paddr: physical address of the buffer used for the memset operation
* @memset_vaddr: virtual address of the buffer used for the memset operation
*/
struct at_desc {
struct virt_dma_desc vd;
@ -245,7 +251,10 @@ struct at_desc {
/*-- Channels --------------------------------------------------------*/
/**
* atc_status - information bits stored in channel status flag
* enum atc_status - information bits stored in channel status flag
*
* @ATC_IS_PAUSED: If channel is pauses
* @ATC_IS_CYCLIC: If channel is cyclic
*
* Manipulated with atomic operations.
*/
@ -282,7 +291,6 @@ struct at_dma_chan {
u32 save_cfg;
u32 save_dscr;
struct dma_slave_config dma_sconfig;
bool cyclic;
struct at_desc *desc;
};
@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @dma_device: dmaengine dma_device object members
* @atdma_devtype: identifier of DMA controller compatibility
* @ch_regs: memory mapped register base
* @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @lli_pool: hw lli table
* @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
*/
struct at_dma {
@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
/**
* atc_get_llis_residue - Get residue for a hardware linked list transfer
* @atchan: pointer to an atmel hdmac channel.
* @desc: pointer to the descriptor for which the residue is calculated.
* @residue: residue to be set to dma_tx_state.
*
* Calculate the residue by removing the length of the Linked List Item (LLI)
* already transferred from the total length. To get the current LLI we can use
@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
* @atchan: pointer to an atmel hdmac channel.
* @desc: pointer to the descriptor for which the residue is calculated.
* @residue: residue to be set to dma_tx_state.
* Returns 0 on success, -errno otherwise.
*
* Returns: %0 on success, -errno otherwise.
*/
static int atc_get_llis_residue(struct at_dma_chan *atchan,
struct at_desc *desc, u32 *residue)
@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
* @chan: DMA channel
* @cookie: transaction identifier to check status of
* @residue: residue to be updated.
* Return 0 on success, -errono otherwise.
*
* Return: %0 on success, -errno otherwise.
*/
static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
u32 *residue)
@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan)
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
*
* return - the number of allocated descriptors
* Return: the number of allocated descriptors
*/
static int atc_alloc_chan_resources(struct dma_chan *chan)
{

View file

@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
if (!dpaa2_chan->fd_pool)
goto err;
dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
sizeof(struct dpaa2_fl_entry),
sizeof(struct dpaa2_fl_entry), 0);
dpaa2_chan->fl_pool =
dma_pool_create("fl_pool", dev,
sizeof(struct dpaa2_fl_entry) * 3,
sizeof(struct dpaa2_fl_entry), 0);
if (!dpaa2_chan->fl_pool)
goto err_fd;
dpaa2_chan->sdd_pool =
dma_pool_create("sdd_pool", dev,
sizeof(struct dpaa2_qdma_sd_d),
sizeof(struct dpaa2_qdma_sd_d) * 2,
sizeof(struct dpaa2_qdma_sd_d), 0);
if (!dpaa2_chan->sdd_pool)
goto err_fl;

View file

@ -514,11 +514,11 @@ static struct fsl_qdma_queue
queue_temp = queue_head + i + (j * queue_num);
queue_temp->cq =
dma_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
queue_size[i],
&queue_temp->bus_addr,
GFP_KERNEL);
dmam_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
queue_size[i],
&queue_temp->bus_addr,
GFP_KERNEL);
if (!queue_temp->cq)
return NULL;
queue_temp->block_base = fsl_qdma->block_base +
@ -563,15 +563,14 @@ static struct fsl_qdma_queue
/*
* Buffer for queue command
*/
status_head->cq = dma_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
status_size,
&status_head->bus_addr,
GFP_KERNEL);
if (!status_head->cq) {
devm_kfree(&pdev->dev, status_head);
status_head->cq = dmam_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
status_size,
&status_head->bus_addr,
GFP_KERNEL);
if (!status_head->cq)
return NULL;
}
status_head->n_cq = status_size;
status_head->virt_head = status_head->cq;
status_head->virt_tail = status_head->cq;
@ -1268,8 +1267,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
static void fsl_qdma_remove(struct platform_device *pdev)
{
int i;
struct fsl_qdma_queue *status;
struct device_node *np = pdev->dev.of_node;
struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
@ -1277,12 +1274,6 @@ static void fsl_qdma_remove(struct platform_device *pdev)
fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_qdma->dma_dev);
for (i = 0; i < fsl_qdma->block_number; i++) {
status = fsl_qdma->status[i];
dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
status->n_cq, status->cq, status->bus_addr);
}
}
static const struct of_device_id fsl_qdma_dt_ids[] = {

View file

@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
if (!irq_name) {
ret = -ENOMEM;
goto err_disable_pm;
}
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
if (!irq_name) {
ret = -ENOMEM;
goto err_disable_pm;
}
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {

View file

@ -3968,6 +3968,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
{
struct udma_chan *uc = to_udma_chan(&vc->chan);
struct udma_desc *d;
u8 status;
if (!vd)
return;
@ -3977,12 +3978,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
if (d->metadata_size)
udma_fetch_epib(uc, d);
/* Provide residue information for the client */
if (result) {
void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
if (cppi5_desc_get_type(desc_vaddr) ==
CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
/* Provide residue information for the client */
result->residue = d->residue -
cppi5_hdesc_get_pktlen(desc_vaddr);
if (result->residue)
@ -3991,7 +3992,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
result->result = DMA_TRANS_NOERROR;
} else {
result->residue = 0;
result->result = DMA_TRANS_NOERROR;
/* Propagate TR Response errors to the client */
status = d->hwdesc[0].tr_resp_base->status;
if (status)
result->result = DMA_TRANS_ABORTED;
else
result->result = DMA_TRANS_NOERROR;
}
}
}

View file

@ -953,7 +953,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
static inline bool is_slave_direction(enum dma_transfer_direction direction)
{
return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
(direction == DMA_DEV_TO_DEV);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(