dmaengine: xilinx_dma: commonize DMA copy size calculation

This patch removes a bit of duplicated code by introducing a new
function that implements calculations for DMA copy size, and
prepares for changes to the copy size calculation that will
happen in following patches.

Suggested-by: Vinod Koul <vkoul@kernel.org>
Signed-off-by: Andrea Merello <andrea.merello@gmail.com>
Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Andrea Merello 2018-11-20 16:31:45 +01:00 committed by Vinod Koul
parent bfeffd1552
commit 616f0f81d8

View file

@ -425,6 +425,7 @@ struct xilinx_dma_config {
* @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
* @max_buffer_len: Max buffer length
*/
struct xilinx_dma_device {
void __iomem *regs;
@ -444,6 +445,7 @@ struct xilinx_dma_device {
struct clk *rxs_clk;
u32 nr_channels;
u32 chan_id;
u32 max_buffer_len;
};
/* Macros */
@ -959,6 +961,25 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
return 0;
}
/**
* xilinx_dma_calc_copysize - Calculate the amount of data to copy
* @chan: Driver specific DMA channel
* @size: Total data that needs to be copied
* @done: Amount of data that has been already copied
*
* Return: Amount of data that has to be copied
*/
static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
int size, int done)
{
size_t copy;
copy = min_t(size_t, size - done,
chan->xdev->max_buffer_len);
return copy;
}
/**
* xilinx_dma_tx_status - Get DMA transaction status
* @dchan: DMA channel
@ -992,7 +1013,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
list_for_each_entry(segment, &desc->segments, node) {
hw = &segment->hw;
residue += (hw->control - hw->status) &
XILINX_DMA_MAX_TRANS_LEN;
chan->xdev->max_buffer_len;
}
}
spin_unlock_irqrestore(&chan->lock, flags);
@ -1254,7 +1275,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
hw->control & XILINX_DMA_MAX_TRANS_LEN);
hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@ -1357,7 +1378,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
hw->control & XILINX_DMA_MAX_TRANS_LEN);
hw->control & chan->xdev->max_buffer_len);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@ -1718,7 +1739,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct xilinx_cdma_tx_segment *segment;
struct xilinx_cdma_desc_hw *hw;
if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
if (!len || len > chan->xdev->max_buffer_len)
return NULL;
desc = xilinx_dma_alloc_tx_descriptor(chan);
@ -1808,8 +1829,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
copy = min_t(size_t, sg_dma_len(sg) - sg_used,
XILINX_DMA_MAX_TRANS_LEN);
copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
sg_used);
hw = &segment->hw;
/* Fill in the descriptor */
@ -1913,8 +1934,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
copy = min_t(size_t, period_len - sg_used,
XILINX_DMA_MAX_TRANS_LEN);
copy = xilinx_dma_calc_copysize(chan, period_len,
sg_used);
hw = &segment->hw;
xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
period_len * i);
@ -2628,6 +2649,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Retrieve the DMA engine properties from the device tree */
xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
xdev->max_buffer_len = XILINX_DMA_MAX_TRANS_LEN;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");