serial: stm32: replace access to DMAR bit by dmaengine_pause/resume

It's rather advised to rely on DMA pause / resume instead of
clearing/setting DMA request enable bit for the same purpose. Some DMA
request/acknowledge race may encountered by doing so. We prefer to use
dmaengine_pause and resume instead to pause a dma transfer when it is
necessary.

Create two new functions (stm32_usart_rx_dma_pause, stm32_usart_rx_dma
_resume) to handle dma error when pausing/resuming.

And rename stm32_usart_start_rx_dma_cyclic() to
stm32_usart_rx_dma_start_or_resume() and use this function to resume an
rx dma transfer. If resume fail, stm32_usart_rx_dma_start_or_resume can
create a new transfer to continue.

It is also safer to close DMA before reset DMAR in stm32_usart_shutdown.

Signed-off-by: Valentin Caron <valentin.caron@foss.st.com>
Link: https://lore.kernel.org/r/20230808161906.178996-6-valentin.caron@foss.st.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Valentin Caron 2023-08-08 18:19:05 +02:00 committed by Greg Kroah-Hartman
parent 7f28bcea82
commit a01ae50d7e

View file

@ -326,6 +326,22 @@ static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
return ret;
}
static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
{
return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
DMA_IN_PROGRESS, dmaengine_pause,
stm32_usart_rx_dma_started,
stm32_usart_rx_dma_terminate);
}
static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
{
return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
DMA_PAUSED, dmaengine_resume,
stm32_usart_rx_dma_started,
stm32_usart_rx_dma_terminate);
}
/* Return true when data is pending (in pio mode), and false when no data is pending. */
static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
{
@ -485,7 +501,8 @@ static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force
rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
stm32_port->rx_ch->cookie,
&stm32_port->rx_dma_state);
if (rx_dma_status == DMA_IN_PROGRESS) {
if (rx_dma_status == DMA_IN_PROGRESS ||
rx_dma_status == DMA_PAUSED) {
/* Empty DMA buffer */
size = stm32_usart_receive_chars_dma(port);
sr = readl_relaxed(port->membase + ofs->isr);
@ -502,7 +519,6 @@ static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force
} else {
/* Disable RX DMA */
stm32_usart_rx_dma_terminate(stm32_port);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
/* Fall back to interrupt mode */
dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
size = stm32_usart_receive_chars_pio(port);
@ -514,6 +530,76 @@ static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force
return size;
}
static void stm32_usart_rx_dma_complete(void *arg)
{
struct uart_port *port = arg;
struct tty_port *tport = &port->state->port;
unsigned int size;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (size)
tty_flip_buffer_push(tport);
}
static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct dma_async_tx_descriptor *desc;
enum dma_status rx_dma_status;
int ret;
if (stm32_port->throttled)
return 0;
if (stm32_port->rx_dma_busy) {
rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
stm32_port->rx_ch->cookie,
NULL);
if (rx_dma_status == DMA_IN_PROGRESS)
return 0;
if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
return 0;
dev_err(port->dev, "DMA failed : status error.\n");
stm32_usart_rx_dma_terminate(stm32_port);
}
stm32_port->rx_dma_busy = true;
stm32_port->last_res = RX_BUF_L;
/* Prepare a DMA cyclic transaction */
desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
stm32_port->rx_dma_buf,
RX_BUF_L, RX_BUF_P,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(port->dev, "rx dma prep cyclic failed\n");
stm32_port->rx_dma_busy = false;
return -ENODEV;
}
desc->callback = stm32_usart_rx_dma_complete;
desc->callback_param = port;
/* Push current DMA transaction in the pending queue */
ret = dma_submit_error(dmaengine_submit(desc));
if (ret) {
dmaengine_terminate_sync(stm32_port->rx_ch);
stm32_port->rx_dma_busy = false;
return ret;
}
/* Issue pending DMA requests */
dma_async_issue_pending(stm32_port->rx_ch);
return 0;
}
static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
{
dmaengine_terminate_async(stm32_port->tx_ch);
@ -585,20 +671,6 @@ static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
}
static void stm32_usart_rx_dma_complete(void *arg)
{
struct uart_port *port = arg;
struct tty_port *tport = &port->state->port;
unsigned int size;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (size)
tty_flip_buffer_push(tport);
}
static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@ -924,11 +996,10 @@ static void stm32_usart_throttle(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
/*
* Disable DMA request line if enabled, so the RX data gets queued into the FIFO.
* Pause DMA transfer, so the RX data gets queued into the FIFO.
* Hardware flow control is triggered when RX FIFO is full.
*/
if (stm32_usart_rx_dma_started(stm32_port))
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
stm32_usart_rx_dma_pause(stm32_port);
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
@ -950,14 +1021,15 @@ static void stm32_usart_unthrottle(struct uart_port *port)
if (stm32_port->cr3_irq)
stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
stm32_port->throttled = false;
/*
* Switch back to DMA mode (re-enable DMA request line).
* Switch back to DMA mode (resume DMA).
* Hardware flow control is stopped when FIFO is not full any more.
*/
if (stm32_port->rx_ch)
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
stm32_usart_rx_dma_start_or_resume(port);
stm32_port->throttled = false;
spin_unlock_irqrestore(&port->lock, flags);
}
@ -968,8 +1040,7 @@ static void stm32_usart_stop_rx(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
/* Disable DMA request line. */
if (stm32_port->rx_ch)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
stm32_usart_rx_dma_pause(stm32_port);
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
@ -981,64 +1052,6 @@ static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
{
}
static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct dma_async_tx_descriptor *desc;
enum dma_status rx_dma_status;
int ret;
if (stm32_port->rx_dma_busy) {
rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
stm32_port->rx_ch->cookie,
NULL);
if (rx_dma_status == DMA_IN_PROGRESS)
return 0;
dev_err(port->dev, "DMA failed : status error.\n");
stm32_usart_rx_dma_terminate(stm32_port);
}
stm32_port->rx_dma_busy = true;
stm32_port->last_res = RX_BUF_L;
/* Prepare a DMA cyclic transaction */
desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
stm32_port->rx_dma_buf,
RX_BUF_L, RX_BUF_P,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(port->dev, "rx dma prep cyclic failed\n");
stm32_port->rx_dma_busy = false;
return -ENODEV;
}
desc->callback = stm32_usart_rx_dma_complete;
desc->callback_param = port;
/* Push current DMA transaction in the pending queue */
ret = dma_submit_error(dmaengine_submit(desc));
if (ret) {
dmaengine_terminate_sync(stm32_port->rx_ch);
stm32_port->rx_dma_busy = false;
return ret;
}
/* Issue pending DMA requests */
dma_async_issue_pending(stm32_port->rx_ch);
/*
* DMA request line not re-enabled at resume when port is throttled.
* It will be re-enabled by unthrottle ops.
*/
if (!stm32_port->throttled)
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
return 0;
}
static int stm32_usart_startup(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@ -1064,7 +1077,7 @@ static int stm32_usart_startup(struct uart_port *port)
writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
if (stm32_port->rx_ch) {
ret = stm32_usart_start_rx_dma_cyclic(port);
ret = stm32_usart_rx_dma_start_or_resume(port);
if (ret) {
free_irq(port->irq, port);
return ret;
@ -1811,11 +1824,6 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
cr3 = readl_relaxed(port->membase + ofs->cr3);
cr3 &= ~USART_CR3_EIE;
cr3 &= ~USART_CR3_DMAR;
cr3 &= ~USART_CR3_DDRE;
writel_relaxed(cr3, port->membase + ofs->cr3);
if (stm32_port->tx_ch) {
stm32_usart_of_dma_tx_remove(stm32_port, pdev);
@ -1827,7 +1835,12 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
dma_release_channel(stm32_port->rx_ch);
}
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
cr3 = readl_relaxed(port->membase + ofs->cr3);
cr3 &= ~USART_CR3_EIE;
cr3 &= ~USART_CR3_DMAR;
cr3 &= ~USART_CR3_DMAT;
cr3 &= ~USART_CR3_DDRE;
writel_relaxed(cr3, port->membase + ofs->cr3);
if (stm32_port->wakeup_src) {
dev_pm_clear_wake_irq(&pdev->dev);
@ -1999,7 +2012,7 @@ static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct tty_port *tport = &port->state->port;
int ret;
unsigned int size;
unsigned int size = 0;
unsigned long flags;
if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
@ -2021,10 +2034,9 @@ static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
*/
if (stm32_port->rx_ch) {
spin_lock_irqsave(&port->lock, flags);
/* Avoid race with RX IRQ when DMAR is cleared */
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
/* Poll data from DMA RX buffer if any */
size = stm32_usart_receive_chars(port, true);
if (!stm32_usart_rx_dma_pause(stm32_port))
size += stm32_usart_receive_chars(port, true);
stm32_usart_rx_dma_terminate(stm32_port);
uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (size)
@ -2035,7 +2047,7 @@ static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
stm32_usart_receive_chars(port, false);
} else {
if (stm32_port->rx_ch) {
ret = stm32_usart_start_rx_dma_cyclic(port);
ret = stm32_usart_rx_dma_start_or_resume(port);
if (ret)
return ret;
}