spi: spl022: switch to use default spi_transfer_one_message()

Except for polling mode, this driver's transfer_one_message() makes use
of interrupt handler and tasklet. This is problematic because
spi_transfer_delay_exec(), who may sleep, is called in interrupt handler
and tasklet. This causes the following warning:
BUG: sleeping function called from invalid context at drivers/spi/spi.c:1428

Switch to use the default spi_transfer_one_message() instead, which
calls spi_transfer_delay_exec() appropriately.

Signed-off-by: Nam Cao <namcao@linutronix.de>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Link: https://lore.kernel.org/r/ae1940abd6ff6a9e77b4373cff60007c641a0c6c.1701274975.git.namcao@linutronix.de
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
Nam Cao 2023-11-29 17:31:56 +01:00 committed by Mark Brown
parent 39cefd8509
commit 9b2ef250b3
No known key found for this signature in database
GPG key ID: 24D68B725D5487D0

View file

@ -338,7 +338,6 @@ struct vendor_data {
* @clk: outgoing clock "SPICLK" for the SPI bus * @clk: outgoing clock "SPICLK" for the SPI bus
* @host: SPI framework hookup * @host: SPI framework hookup
* @host_info: controller-specific data from machine setup * @host_info: controller-specific data from machine setup
* @pump_transfers: Tasklet used in Interrupt Transfer mode
* @cur_msg: Pointer to current spi_message being processed * @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer * @cur_transfer: Pointer to current spi_transfer
* @cur_chip: pointer to current clients chip(assigned from controller_state) * @cur_chip: pointer to current clients chip(assigned from controller_state)
@ -372,9 +371,6 @@ struct pl022 {
struct clk *clk; struct clk *clk;
struct spi_controller *host; struct spi_controller *host;
struct pl022_ssp_controller *host_info; struct pl022_ssp_controller *host_info;
/* Message per-transfer pump */
struct tasklet_struct pump_transfers;
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer; struct spi_transfer *cur_transfer;
struct chip_data *cur_chip; struct chip_data *cur_chip;
bool next_msg_cs_active; bool next_msg_cs_active;
@ -437,93 +433,23 @@ struct chip_data {
* (vendor extension). Each of the 5 LSB in the register controls one chip * (vendor extension). Each of the 5 LSB in the register controls one chip
* select signal. * select signal.
*/ */
static void internal_cs_control(struct pl022 *pl022, u32 command) static void internal_cs_control(struct pl022 *pl022, bool enable)
{ {
u32 tmp; u32 tmp;
tmp = readw(SSP_CSR(pl022->virtbase)); tmp = readw(SSP_CSR(pl022->virtbase));
if (command == SSP_CHIP_SELECT) if (enable)
tmp &= ~BIT(pl022->cur_cs); tmp &= ~BIT(pl022->cur_cs);
else else
tmp |= BIT(pl022->cur_cs); tmp |= BIT(pl022->cur_cs);
writew(tmp, SSP_CSR(pl022->virtbase)); writew(tmp, SSP_CSR(pl022->virtbase));
} }
static void pl022_cs_control(struct pl022 *pl022, u32 command) static void pl022_cs_control(struct spi_device *spi, bool enable)
{ {
struct pl022 *pl022 = spi_controller_get_devdata(spi->controller);
if (pl022->vendor->internal_cs_ctrl) if (pl022->vendor->internal_cs_ctrl)
internal_cs_control(pl022, command); internal_cs_control(pl022, enable);
else if (pl022->cur_gpiod)
/*
* This needs to be inverted since with GPIOLIB in
* control, the inversion will be handled by
* GPIOLIB's active low handling. The "command"
* passed into this function will be SSP_CHIP_SELECT
* which is enum:ed to 0, so we need the inverse
* (1) to activate chip select.
*/
gpiod_set_value(pl022->cur_gpiod, !command);
}
/**
* giveback - current spi_message is over, schedule next message and call
* callback of this message. Assumes that caller already
* set message->status; dma and pio irqs are blocked
* @pl022: SSP driver private data structure
*/
static void giveback(struct pl022 *pl022)
{
struct spi_transfer *last_transfer;
pl022->next_msg_cs_active = false;
last_transfer = list_last_entry(&pl022->cur_msg->transfers,
struct spi_transfer, transfer_list);
/* Delay if requested before any change in chip select */
/*
* FIXME: This runs in interrupt context.
* Is this really smart?
*/
spi_transfer_delay_exec(last_transfer);
if (!last_transfer->cs_change) {
struct spi_message *next_msg;
/*
* cs_change was not set. We can keep the chip select
* enabled if there is message in the queue and it is
* for the same spi device.
*
* We cannot postpone this until pump_messages, because
* after calling msg->complete (below) the driver that
* sent the current message could be unloaded, which
* could invalidate the cs_control() callback...
*/
/* get a pointer to the next message, if any */
next_msg = spi_get_next_queued_message(pl022->host);
/*
* see if the next and current messages point
* to the same spi device.
*/
if (next_msg && next_msg->spi != pl022->cur_msg->spi)
next_msg = NULL;
if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
pl022_cs_control(pl022, SSP_CHIP_DESELECT);
else
pl022->next_msg_cs_active = true;
}
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
/* disable the SPI/SSP operation */
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
spi_finalize_current_message(pl022->host);
} }
/** /**
@ -757,30 +683,6 @@ static void readwriter(struct pl022 *pl022)
*/ */
} }
/**
* next_transfer - Move to the Next transfer in the current spi message
* @pl022: SSP driver private data structure
*
* This function moves though the linked list of spi transfers in the
* current spi message and returns with the state of current spi
* message i.e whether its last transfer is done(STATE_DONE) or
* Next transfer is ready(STATE_RUNNING)
*/
static void *next_transfer(struct pl022 *pl022)
{
struct spi_message *msg = pl022->cur_msg;
struct spi_transfer *trans = pl022->cur_transfer;
/* Move to next transfer */
if (trans->transfer_list.next != &msg->transfers) {
pl022->cur_transfer =
list_entry(trans->transfer_list.next,
struct spi_transfer, transfer_list);
return STATE_RUNNING;
}
return STATE_DONE;
}
/* /*
* This DMA functionality is only compiled in if we have * This DMA functionality is only compiled in if we have
* access to the generic DMA devices/DMA engine. * access to the generic DMA devices/DMA engine.
@ -800,7 +702,6 @@ static void unmap_free_dma_scatter(struct pl022 *pl022)
static void dma_callback(void *data) static void dma_callback(void *data)
{ {
struct pl022 *pl022 = data; struct pl022 *pl022 = data;
struct spi_message *msg = pl022->cur_msg;
BUG_ON(!pl022->sgt_rx.sgl); BUG_ON(!pl022->sgt_rx.sgl);
@ -845,13 +746,7 @@ static void dma_callback(void *data)
unmap_free_dma_scatter(pl022); unmap_free_dma_scatter(pl022);
/* Update total bytes transferred */ spi_finalize_current_transfer(pl022->host);
msg->actual_length += pl022->cur_transfer->len;
/* Move to next transfer */
msg->state = next_transfer(pl022);
if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
pl022_cs_control(pl022, SSP_CHIP_DESELECT);
tasklet_schedule(&pl022->pump_transfers);
} }
static void setup_dma_scatter(struct pl022 *pl022, static void setup_dma_scatter(struct pl022 *pl022,
@ -1189,6 +1084,9 @@ static int pl022_dma_autoprobe(struct pl022 *pl022)
static void terminate_dma(struct pl022 *pl022) static void terminate_dma(struct pl022 *pl022)
{ {
if (!pl022->dma_running)
return;
struct dma_chan *rxchan = pl022->dma_rx_channel; struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel; struct dma_chan *txchan = pl022->dma_tx_channel;
@ -1200,8 +1098,7 @@ static void terminate_dma(struct pl022 *pl022)
static void pl022_dma_remove(struct pl022 *pl022) static void pl022_dma_remove(struct pl022 *pl022)
{ {
if (pl022->dma_running) terminate_dma(pl022);
terminate_dma(pl022);
if (pl022->dma_tx_channel) if (pl022->dma_tx_channel)
dma_release_channel(pl022->dma_tx_channel); dma_release_channel(pl022->dma_tx_channel);
if (pl022->dma_rx_channel) if (pl022->dma_rx_channel)
@ -1225,6 +1122,10 @@ static inline int pl022_dma_probe(struct pl022 *pl022)
return 0; return 0;
} }
static inline void terminate_dma(struct pl022 *pl022)
{
}
static inline void pl022_dma_remove(struct pl022 *pl022) static inline void pl022_dma_remove(struct pl022 *pl022)
{ {
} }
@ -1246,16 +1147,7 @@ static inline void pl022_dma_remove(struct pl022 *pl022)
static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
{ {
struct pl022 *pl022 = dev_id; struct pl022 *pl022 = dev_id;
struct spi_message *msg = pl022->cur_msg;
u16 irq_status = 0; u16 irq_status = 0;
if (unlikely(!msg)) {
dev_err(&pl022->adev->dev,
"bad message state in interrupt handler");
/* Never fail */
return IRQ_HANDLED;
}
/* Read the Interrupt Status Register */ /* Read the Interrupt Status Register */
irq_status = readw(SSP_MIS(pl022->virtbase)); irq_status = readw(SSP_MIS(pl022->virtbase));
@ -1287,10 +1179,8 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
writew((readw(SSP_CR1(pl022->virtbase)) & writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
msg->state = STATE_ERROR; pl022->cur_transfer->error |= SPI_TRANS_FAIL_IO;
spi_finalize_current_transfer(pl022->host);
/* Schedule message queue handler */
tasklet_schedule(&pl022->pump_transfers);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -1318,13 +1208,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
"number of bytes on a 16bit bus?)\n", "number of bytes on a 16bit bus?)\n",
(u32) (pl022->rx - pl022->rx_end)); (u32) (pl022->rx - pl022->rx_end));
} }
/* Update total bytes transferred */ spi_finalize_current_transfer(pl022->host);
msg->actual_length += pl022->cur_transfer->len;
/* Move to next transfer */
msg->state = next_transfer(pl022);
if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
pl022_cs_control(pl022, SSP_CHIP_DESELECT);
tasklet_schedule(&pl022->pump_transfers);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -1361,98 +1245,20 @@ static int set_up_next_transfer(struct pl022 *pl022,
return 0; return 0;
} }
/** static int do_interrupt_dma_transfer(struct pl022 *pl022)
* pump_transfers - Tasklet function which schedules next transfer
* when running in interrupt or DMA transfer mode.
* @data: SSP driver private data structure
*
*/
static void pump_transfers(unsigned long data)
{ {
struct pl022 *pl022 = (struct pl022 *) data; int ret;
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
/* Get current state information */
message = pl022->cur_msg;
transfer = pl022->cur_transfer;
/* Handle for abort */
if (message->state == STATE_ERROR) {
message->status = -EIO;
giveback(pl022);
return;
}
/* Handle end of message */
if (message->state == STATE_DONE) {
message->status = 0;
giveback(pl022);
return;
}
/* Delay if requested at end of transfer before CS change */
if (message->state == STATE_RUNNING) {
previous = list_entry(transfer->transfer_list.prev,
struct spi_transfer,
transfer_list);
/*
* FIXME: This runs in interrupt context.
* Is this really smart?
*/
spi_transfer_delay_exec(previous);
/* Reselect chip select only if cs_change was requested */
if (previous->cs_change)
pl022_cs_control(pl022, SSP_CHIP_SELECT);
} else {
/* STATE_START */
message->state = STATE_RUNNING;
}
if (set_up_next_transfer(pl022, transfer)) {
message->state = STATE_ERROR;
message->status = -EIO;
giveback(pl022);
return;
}
/* Flush the FIFOs and let's go! */
flush(pl022);
if (pl022->cur_chip->enable_dma) {
if (configure_dma(pl022)) {
dev_dbg(&pl022->adev->dev,
"configuration of DMA failed, fall back to interrupt mode\n");
goto err_config_dma;
}
return;
}
err_config_dma:
/* enable all interrupts except RX */
writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
}
static void do_interrupt_dma_transfer(struct pl022 *pl022)
{
/* /*
* Default is to enable all interrupts except RX - * Default is to enable all interrupts except RX -
* this will be enabled once TX is complete * this will be enabled once TX is complete
*/ */
u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM); u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
/* Enable target chip, if not already active */ ret = set_up_next_transfer(pl022, pl022->cur_transfer);
if (!pl022->next_msg_cs_active) if (ret)
pl022_cs_control(pl022, SSP_CHIP_SELECT); return ret;
if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
/* Error path */
pl022->cur_msg->state = STATE_ERROR;
pl022->cur_msg->status = -EIO;
giveback(pl022);
return;
}
/* If we're using DMA, set up DMA here */ /* If we're using DMA, set up DMA here */
if (pl022->cur_chip->enable_dma) { if (pl022->cur_chip->enable_dma) {
/* Configure DMA transfer */ /* Configure DMA transfer */
@ -1469,6 +1275,7 @@ static void do_interrupt_dma_transfer(struct pl022 *pl022)
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase)); SSP_CR1(pl022->virtbase));
writew(irqflags, SSP_IMSC(pl022->virtbase)); writew(irqflags, SSP_IMSC(pl022->virtbase));
return 1;
} }
static void print_current_status(struct pl022 *pl022) static void print_current_status(struct pl022 *pl022)
@ -1495,111 +1302,67 @@ static void print_current_status(struct pl022 *pl022)
} }
static void do_polling_transfer(struct pl022 *pl022) static int do_polling_transfer(struct pl022 *pl022)
{ {
struct spi_message *message = NULL; int ret;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
unsigned long time, timeout; unsigned long time, timeout;
message = pl022->cur_msg; /* Configuration Changing Per Transfer */
ret = set_up_next_transfer(pl022, pl022->cur_transfer);
if (ret)
return ret;
/* Flush FIFOs and enable SSP */
flush(pl022);
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
while (message->state != STATE_DONE) { dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
/* Handle for abort */
if (message->state == STATE_ERROR)
break;
transfer = pl022->cur_transfer;
/* Delay if requested at end of transfer */ timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
if (message->state == STATE_RUNNING) { while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
previous = time = jiffies;
list_entry(transfer->transfer_list.prev, readwriter(pl022);
struct spi_transfer, transfer_list); if (time_after(time, timeout)) {
spi_transfer_delay_exec(previous); dev_warn(&pl022->adev->dev,
if (previous->cs_change) "%s: timeout!\n", __func__);
pl022_cs_control(pl022, SSP_CHIP_SELECT); print_current_status(pl022);
} else { return -ETIMEDOUT;
/* STATE_START */
message->state = STATE_RUNNING;
if (!pl022->next_msg_cs_active)
pl022_cs_control(pl022, SSP_CHIP_SELECT);
} }
cpu_relax();
/* Configuration Changing Per Transfer */
if (set_up_next_transfer(pl022, transfer)) {
/* Error path */
message->state = STATE_ERROR;
break;
}
/* Flush FIFOs and enable SSP */
flush(pl022);
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
time = jiffies;
readwriter(pl022);
if (time_after(time, timeout)) {
dev_warn(&pl022->adev->dev,
"%s: timeout!\n", __func__);
message->state = STATE_TIMEOUT;
print_current_status(pl022);
goto out;
}
cpu_relax();
}
/* Update total byte transferred */
message->actual_length += pl022->cur_transfer->len;
/* Move to next transfer */
message->state = next_transfer(pl022);
if (message->state != STATE_DONE
&& pl022->cur_transfer->cs_change)
pl022_cs_control(pl022, SSP_CHIP_DESELECT);
} }
out:
/* Handle end of message */
if (message->state == STATE_DONE)
message->status = 0;
else if (message->state == STATE_TIMEOUT)
message->status = -EAGAIN;
else
message->status = -EIO;
giveback(pl022); return 0;
return;
} }
static int pl022_transfer_one_message(struct spi_controller *host, static int pl022_transfer_one(struct spi_controller *host, struct spi_device *spi,
struct spi_message *msg) struct spi_transfer *transfer)
{ {
struct pl022 *pl022 = spi_controller_get_devdata(host); struct pl022 *pl022 = spi_controller_get_devdata(host);
/* Initial message state */ pl022->cur_transfer = transfer;
pl022->cur_msg = msg;
msg->state = STATE_START;
pl022->cur_transfer = list_entry(msg->transfers.next,
struct spi_transfer, transfer_list);
/* Setup the SPI using the per chip configuration */ /* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(msg->spi); pl022->cur_chip = spi_get_ctldata(spi);
pl022->cur_cs = spi_get_chipselect(msg->spi, 0); pl022->cur_cs = spi_get_chipselect(spi, 0);
/* This is always available but may be set to -ENOENT */ /* This is always available but may be set to -ENOENT */
pl022->cur_gpiod = spi_get_csgpiod(msg->spi, 0); pl022->cur_gpiod = spi_get_csgpiod(spi, 0);
restore_state(pl022); restore_state(pl022);
flush(pl022); flush(pl022);
if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
do_polling_transfer(pl022); return do_polling_transfer(pl022);
else else
do_interrupt_dma_transfer(pl022); return do_interrupt_dma_transfer(pl022);
}
return 0; static void pl022_handle_err(struct spi_controller *ctlr, struct spi_message *message)
{
struct pl022 *pl022 = spi_controller_get_devdata(ctlr);
terminate_dma(pl022);
writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
} }
static int pl022_unprepare_transfer_hardware(struct spi_controller *host) static int pl022_unprepare_transfer_hardware(struct spi_controller *host)
@ -2138,7 +1901,9 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
host->cleanup = pl022_cleanup; host->cleanup = pl022_cleanup;
host->setup = pl022_setup; host->setup = pl022_setup;
host->auto_runtime_pm = true; host->auto_runtime_pm = true;
host->transfer_one_message = pl022_transfer_one_message; host->transfer_one = pl022_transfer_one;
host->set_cs = pl022_cs_control;
host->handle_err = pl022_handle_err;
host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware; host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
host->rt = platform_info->rt; host->rt = platform_info->rt;
host->dev.of_node = dev->of_node; host->dev.of_node = dev->of_node;
@ -2175,10 +1940,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk; goto err_no_clk;
} }
/* Initialize transfer pump */
tasklet_init(&pl022->pump_transfers, pump_transfers,
(unsigned long)pl022);
/* Disable SSP */ /* Disable SSP */
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase)); SSP_CR1(pl022->virtbase));
@ -2261,7 +2022,6 @@ pl022_remove(struct amba_device *adev)
pl022_dma_remove(pl022); pl022_dma_remove(pl022);
amba_release_regions(adev); amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
} }
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP