dmaengine: PL08x: use vchan's spinlock

Initialize the vchan struct, and use the provided spinlock rather than
our own.

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Russell King 2012-05-26 14:09:53 +01:00
parent 01d8dc64e9
commit 083be28a10
2 changed files with 21 additions and 25 deletions

View file

@ -53,6 +53,7 @@ config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support" bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA && EXPERIMENTAL depends on ARM_AMBA && EXPERIMENTAL
select DMA_ENGINE select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help help
Platform has a PL08x DMAC device Platform has a PL08x DMAC device
which can provide DMA engine support which can provide DMA engine support

View file

@ -237,7 +237,6 @@ struct pl08x_dma_chan {
struct list_head issued_list; struct list_head issued_list;
struct list_head done_list; struct list_head done_list;
struct pl08x_txd *at; struct pl08x_txd *at;
spinlock_t lock;
struct pl08x_driver_data *host; struct pl08x_driver_data *host;
enum pl08x_dma_chan_state state; enum pl08x_dma_chan_state state;
bool slave; bool slave;
@ -484,7 +483,7 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
unsigned long flags; unsigned long flags;
size_t bytes = 0; size_t bytes = 0;
spin_lock_irqsave(&plchan->lock, flags); spin_lock_irqsave(&plchan->vc.lock, flags);
ch = plchan->phychan; ch = plchan->phychan;
txd = plchan->at; txd = plchan->at;
@ -543,7 +542,7 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
} }
} }
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->vc.lock, flags);
return bytes; return bytes;
} }
@ -673,12 +672,12 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
* Eww. We know this isn't going to deadlock * Eww. We know this isn't going to deadlock
* but lockdep probably doesn't. * but lockdep probably doesn't.
*/ */
spin_lock(&next->lock); spin_lock(&next->vc.lock);
/* Re-check the state now that we have the lock */ /* Re-check the state now that we have the lock */
success = next->state == PL08X_CHAN_WAITING; success = next->state == PL08X_CHAN_WAITING;
if (success) if (success)
pl08x_phy_reassign_start(plchan->phychan, next); pl08x_phy_reassign_start(plchan->phychan, next);
spin_unlock(&next->lock); spin_unlock(&next->vc.lock);
/* If the state changed, try to find another channel */ /* If the state changed, try to find another channel */
if (!success) if (!success)
@ -1125,12 +1124,12 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
unsigned long flags; unsigned long flags;
dma_cookie_t cookie; dma_cookie_t cookie;
spin_lock_irqsave(&plchan->lock, flags); spin_lock_irqsave(&plchan->vc.lock, flags);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
/* Put this onto the pending list */ /* Put this onto the pending list */
list_add_tail(&txd->node, &plchan->pend_list); list_add_tail(&txd->node, &plchan->pend_list);
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->vc.lock, flags);
return cookie; return cookie;
} }
@ -1318,13 +1317,13 @@ static void pl08x_issue_pending(struct dma_chan *chan)
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&plchan->lock, flags); spin_lock_irqsave(&plchan->vc.lock, flags);
list_splice_tail_init(&plchan->pend_list, &plchan->issued_list); list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
if (!list_empty(&plchan->issued_list)) { if (!list_empty(&plchan->issued_list)) {
if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
pl08x_phy_alloc_and_start(plchan); pl08x_phy_alloc_and_start(plchan);
} }
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->vc.lock, flags);
} }
static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
@ -1337,9 +1336,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
if (!num_llis) { if (!num_llis) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&plchan->lock, flags); spin_lock_irqsave(&plchan->vc.lock, flags);
pl08x_free_txd(pl08x, txd); pl08x_free_txd(pl08x, txd);
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->vc.lock, flags);
return -EINVAL; return -EINVAL;
} }
@ -1551,9 +1550,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* Anything succeeds on channels with no physical allocation and * Anything succeeds on channels with no physical allocation and
* no queued transfers. * no queued transfers.
*/ */
spin_lock_irqsave(&plchan->lock, flags); spin_lock_irqsave(&plchan->vc.lock, flags);
if (!plchan->phychan && !plchan->at) { if (!plchan->phychan && !plchan->at) {
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->vc.lock, flags);
return 0; return 0;
} }
@ -1592,7 +1591,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
break; break;
} }
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->vc.lock, flags);
return ret; return ret;
} }
@ -1664,9 +1663,9 @@ static void pl08x_tasklet(unsigned long data)
unsigned long flags; unsigned long flags;
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irqsave(&plchan->lock, flags); spin_lock_irqsave(&plchan->vc.lock, flags);
list_splice_tail_init(&plchan->done_list, &head); list_splice_tail_init(&plchan->done_list, &head);
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->vc.lock, flags);
while (!list_empty(&head)) { while (!list_empty(&head)) {
struct pl08x_txd *txd = list_first_entry(&head, struct pl08x_txd *txd = list_first_entry(&head,
@ -1681,9 +1680,9 @@ static void pl08x_tasklet(unsigned long data)
pl08x_unmap_buffers(txd); pl08x_unmap_buffers(txd);
/* Free the descriptor */ /* Free the descriptor */
spin_lock_irqsave(&plchan->lock, flags); spin_lock_irqsave(&plchan->vc.lock, flags);
pl08x_free_txd(pl08x, txd); pl08x_free_txd(pl08x, txd);
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->vc.lock, flags);
/* Callback to signal completion */ /* Callback to signal completion */
if (callback) if (callback)
@ -1724,7 +1723,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
continue; continue;
} }
spin_lock(&plchan->lock); spin_lock(&plchan->vc.lock);
tx = plchan->at; tx = plchan->at;
if (tx) { if (tx) {
plchan->at = NULL; plchan->at = NULL;
@ -1745,7 +1744,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
else else
pl08x_phy_free(plchan); pl08x_phy_free(plchan);
} }
spin_unlock(&plchan->lock); spin_unlock(&plchan->vc.lock);
/* Schedule tasklet on this channel */ /* Schedule tasklet on this channel */
tasklet_schedule(&plchan->tasklet); tasklet_schedule(&plchan->tasklet);
@ -1808,17 +1807,13 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
"initialize virtual channel \"%s\"\n", "initialize virtual channel \"%s\"\n",
chan->name); chan->name);
chan->vc.chan.device = dmadev;
dma_cookie_init(&chan->vc.chan);
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pend_list); INIT_LIST_HEAD(&chan->pend_list);
INIT_LIST_HEAD(&chan->issued_list); INIT_LIST_HEAD(&chan->issued_list);
INIT_LIST_HEAD(&chan->done_list); INIT_LIST_HEAD(&chan->done_list);
tasklet_init(&chan->tasklet, pl08x_tasklet, tasklet_init(&chan->tasklet, pl08x_tasklet,
(unsigned long) chan); (unsigned long) chan);
list_add_tail(&chan->vc.chan.device_node, &dmadev->channels); vchan_init(&chan->vc, dmadev);
} }
dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
i, slave ? "slave" : "memcpy"); i, slave ? "slave" : "memcpy");