spi: rockchip: use atomic_t state
The state field is currently only used to make sure only the last of the tx and rx dma callbacks issue an spi_finalize_current_transfer. Rather than using a spinlock we can get away with just turning the state field into an atomic_t. Signed-off-by: Emil Renner Berthing <kernel@esmil.dk> Tested-by: Heiko Stuebner <heiko@sntech.de> Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
parent
2410d6a3c3
commit
fab3e4871f
|
@ -142,8 +142,9 @@
|
||||||
#define RF_DMA_EN (1 << 0)
|
#define RF_DMA_EN (1 << 0)
|
||||||
#define TF_DMA_EN (1 << 1)
|
#define TF_DMA_EN (1 << 1)
|
||||||
|
|
||||||
#define RXBUSY (1 << 0)
|
/* Driver state flags */
|
||||||
#define TXBUSY (1 << 1)
|
#define RXDMA (1 << 0)
|
||||||
|
#define TXDMA (1 << 1)
|
||||||
|
|
||||||
/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
|
/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
|
||||||
#define MAX_SCLK_OUT 50000000
|
#define MAX_SCLK_OUT 50000000
|
||||||
|
@ -169,6 +170,9 @@ struct rockchip_spi {
|
||||||
struct clk *apb_pclk;
|
struct clk *apb_pclk;
|
||||||
|
|
||||||
void __iomem *regs;
|
void __iomem *regs;
|
||||||
|
|
||||||
|
atomic_t state;
|
||||||
|
|
||||||
/*depth of the FIFO buffer */
|
/*depth of the FIFO buffer */
|
||||||
u32 fifo_len;
|
u32 fifo_len;
|
||||||
/* max bus freq supported */
|
/* max bus freq supported */
|
||||||
|
@ -187,10 +191,6 @@ struct rockchip_spi {
|
||||||
void *rx;
|
void *rx;
|
||||||
void *rx_end;
|
void *rx_end;
|
||||||
|
|
||||||
u32 state;
|
|
||||||
/* protect state */
|
|
||||||
spinlock_t lock;
|
|
||||||
|
|
||||||
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
|
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
|
||||||
|
|
||||||
bool use_dma;
|
bool use_dma;
|
||||||
|
@ -302,28 +302,21 @@ static int rockchip_spi_prepare_message(struct spi_master *master,
|
||||||
static void rockchip_spi_handle_err(struct spi_master *master,
|
static void rockchip_spi_handle_err(struct spi_master *master,
|
||||||
struct spi_message *msg)
|
struct spi_message *msg)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct rockchip_spi *rs = spi_master_get_devdata(master);
|
struct rockchip_spi *rs = spi_master_get_devdata(master);
|
||||||
|
|
||||||
spin_lock_irqsave(&rs->lock, flags);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For DMA mode, we need terminate DMA channel and flush
|
* For DMA mode, we need terminate DMA channel and flush
|
||||||
* fifo for the next transfer if DMA thansfer timeout.
|
* fifo for the next transfer if DMA thansfer timeout.
|
||||||
* handle_err() was called by core if transfer failed.
|
* handle_err() was called by core if transfer failed.
|
||||||
* Maybe it is reasonable for error handling here.
|
* Maybe it is reasonable for error handling here.
|
||||||
*/
|
*/
|
||||||
if (rs->use_dma) {
|
if (atomic_read(&rs->state) & TXDMA)
|
||||||
if (rs->state & RXBUSY) {
|
dmaengine_terminate_async(rs->dma_tx.ch);
|
||||||
dmaengine_terminate_async(rs->dma_rx.ch);
|
|
||||||
flush_fifo(rs);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rs->state & TXBUSY)
|
if (atomic_read(&rs->state) & RXDMA) {
|
||||||
dmaengine_terminate_async(rs->dma_tx.ch);
|
dmaengine_terminate_async(rs->dma_rx.ch);
|
||||||
|
flush_fifo(rs);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&rs->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rockchip_spi_unprepare_message(struct spi_master *master,
|
static int rockchip_spi_unprepare_message(struct spi_master *master,
|
||||||
|
@ -398,48 +391,36 @@ static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
|
||||||
|
|
||||||
static void rockchip_spi_dma_rxcb(void *data)
|
static void rockchip_spi_dma_rxcb(void *data)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct rockchip_spi *rs = data;
|
struct rockchip_spi *rs = data;
|
||||||
|
int state = atomic_fetch_andnot(RXDMA, &rs->state);
|
||||||
|
|
||||||
spin_lock_irqsave(&rs->lock, flags);
|
if (state & TXDMA)
|
||||||
|
return;
|
||||||
|
|
||||||
rs->state &= ~RXBUSY;
|
spi_enable_chip(rs, false);
|
||||||
if (!(rs->state & TXBUSY)) {
|
spi_finalize_current_transfer(rs->master);
|
||||||
spi_enable_chip(rs, false);
|
|
||||||
spi_finalize_current_transfer(rs->master);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&rs->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rockchip_spi_dma_txcb(void *data)
|
static void rockchip_spi_dma_txcb(void *data)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct rockchip_spi *rs = data;
|
struct rockchip_spi *rs = data;
|
||||||
|
int state = atomic_fetch_andnot(TXDMA, &rs->state);
|
||||||
|
|
||||||
|
if (state & RXDMA)
|
||||||
|
return;
|
||||||
|
|
||||||
/* Wait until the FIFO data completely. */
|
/* Wait until the FIFO data completely. */
|
||||||
wait_for_idle(rs);
|
wait_for_idle(rs);
|
||||||
|
|
||||||
spin_lock_irqsave(&rs->lock, flags);
|
spi_enable_chip(rs, false);
|
||||||
|
spi_finalize_current_transfer(rs->master);
|
||||||
rs->state &= ~TXBUSY;
|
|
||||||
if (!(rs->state & RXBUSY)) {
|
|
||||||
spi_enable_chip(rs, false);
|
|
||||||
spi_finalize_current_transfer(rs->master);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&rs->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
|
static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct dma_async_tx_descriptor *rxdesc, *txdesc;
|
struct dma_async_tx_descriptor *rxdesc, *txdesc;
|
||||||
|
|
||||||
spin_lock_irqsave(&rs->lock, flags);
|
atomic_set(&rs->state, 0);
|
||||||
rs->state &= ~RXBUSY;
|
|
||||||
rs->state &= ~TXBUSY;
|
|
||||||
spin_unlock_irqrestore(&rs->lock, flags);
|
|
||||||
|
|
||||||
rxdesc = NULL;
|
rxdesc = NULL;
|
||||||
if (rs->rx) {
|
if (rs->rx) {
|
||||||
|
@ -490,9 +471,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
|
||||||
|
|
||||||
/* rx must be started before tx due to spi instinct */
|
/* rx must be started before tx due to spi instinct */
|
||||||
if (rxdesc) {
|
if (rxdesc) {
|
||||||
spin_lock_irqsave(&rs->lock, flags);
|
atomic_or(RXDMA, &rs->state);
|
||||||
rs->state |= RXBUSY;
|
|
||||||
spin_unlock_irqrestore(&rs->lock, flags);
|
|
||||||
dmaengine_submit(rxdesc);
|
dmaengine_submit(rxdesc);
|
||||||
dma_async_issue_pending(rs->dma_rx.ch);
|
dma_async_issue_pending(rs->dma_rx.ch);
|
||||||
}
|
}
|
||||||
|
@ -500,9 +479,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
|
||||||
spi_enable_chip(rs, true);
|
spi_enable_chip(rs, true);
|
||||||
|
|
||||||
if (txdesc) {
|
if (txdesc) {
|
||||||
spin_lock_irqsave(&rs->lock, flags);
|
atomic_or(TXDMA, &rs->state);
|
||||||
rs->state |= TXBUSY;
|
|
||||||
spin_unlock_irqrestore(&rs->lock, flags);
|
|
||||||
dmaengine_submit(txdesc);
|
dmaengine_submit(txdesc);
|
||||||
dma_async_issue_pending(rs->dma_tx.ch);
|
dma_async_issue_pending(rs->dma_tx.ch);
|
||||||
}
|
}
|
||||||
|
@ -716,8 +693,6 @@ static int rockchip_spi_probe(struct platform_device *pdev)
|
||||||
goto err_disable_spiclk;
|
goto err_disable_spiclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_init(&rs->lock);
|
|
||||||
|
|
||||||
pm_runtime_set_active(&pdev->dev);
|
pm_runtime_set_active(&pdev->dev);
|
||||||
pm_runtime_enable(&pdev->dev);
|
pm_runtime_enable(&pdev->dev);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue