mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
ad4fe1264b
In case if at least one of the requested DMA engine channels doesn't support the hardware accelerated SG list entries traverse, the DMA driver will most likely work that around by performing the IRQ-based SG list entries resubmission. That might and will cause a problem if the DMA Tx channel is recharged and re-executed before the Rx DMA channel. Due to non-deterministic IRQ-handler execution latency the DMA Tx channel will start pushing data to the SPI bus before the Rx DMA channel is even reinitialized with the next inbound SG list entry. By doing so the DMA Tx channel will implicitly start filling the DW APB SSI Rx FIFO up, which while the DMA Rx channel being recharged and re-executed will eventually be overflown. In order to solve the problem we have to feed the DMA engine with SG list entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs synchronized and prevent the Rx FIFO overflow. Since in general the SPI tx_sg and rx_sg lists may have different number of entries of different lengths (though total length should match) we virtually split the SG-lists to the set of DMA transfers, which length is a minimum of the ordered SG-entries lengths. The solution described above is only executed if a full-duplex SPI transfer is requested and the DMA engine hasn't provided channels with hardware accelerated SG list traverse capability to handle both SG lists at once. Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru> Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Link: https://lore.kernel.org/r/20200920112322.24585-12-Sergey.Semin@baikalelectronics.ru Signed-off-by: Mark Brown <broonie@kernel.org>
278 lines
7.1 KiB
C
278 lines
7.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef DW_SPI_HEADER_H
|
|
#define DW_SPI_HEADER_H
|
|
|
|
#include <linux/completion.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/irqreturn.h>
|
|
#include <linux/io.h>
|
|
#include <linux/scatterlist.h>
|
|
|
|
/* Register offsets */
|
|
#define DW_SPI_CTRLR0 0x00
|
|
#define DW_SPI_CTRLR1 0x04
|
|
#define DW_SPI_SSIENR 0x08
|
|
#define DW_SPI_MWCR 0x0c
|
|
#define DW_SPI_SER 0x10
|
|
#define DW_SPI_BAUDR 0x14
|
|
#define DW_SPI_TXFTLR 0x18
|
|
#define DW_SPI_RXFTLR 0x1c
|
|
#define DW_SPI_TXFLR 0x20
|
|
#define DW_SPI_RXFLR 0x24
|
|
#define DW_SPI_SR 0x28
|
|
#define DW_SPI_IMR 0x2c
|
|
#define DW_SPI_ISR 0x30
|
|
#define DW_SPI_RISR 0x34
|
|
#define DW_SPI_TXOICR 0x38
|
|
#define DW_SPI_RXOICR 0x3c
|
|
#define DW_SPI_RXUICR 0x40
|
|
#define DW_SPI_MSTICR 0x44
|
|
#define DW_SPI_ICR 0x48
|
|
#define DW_SPI_DMACR 0x4c
|
|
#define DW_SPI_DMATDLR 0x50
|
|
#define DW_SPI_DMARDLR 0x54
|
|
#define DW_SPI_IDR 0x58
|
|
#define DW_SPI_VERSION 0x5c
|
|
#define DW_SPI_DR 0x60
|
|
#define DW_SPI_RX_SAMPLE_DLY 0xf0
|
|
#define DW_SPI_CS_OVERRIDE 0xf4
|
|
|
|
/* Bit fields in CTRLR0 */
|
|
#define SPI_DFS_OFFSET 0
|
|
|
|
#define SPI_FRF_OFFSET 4
|
|
#define SPI_FRF_SPI 0x0
|
|
#define SPI_FRF_SSP 0x1
|
|
#define SPI_FRF_MICROWIRE 0x2
|
|
#define SPI_FRF_RESV 0x3
|
|
|
|
#define SPI_MODE_OFFSET 6
|
|
#define SPI_SCPH_OFFSET 6
|
|
#define SPI_SCOL_OFFSET 7
|
|
|
|
#define SPI_TMOD_OFFSET 8
|
|
#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
|
|
#define SPI_TMOD_TR 0x0 /* xmit & recv */
|
|
#define SPI_TMOD_TO 0x1 /* xmit only */
|
|
#define SPI_TMOD_RO 0x2 /* recv only */
|
|
#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
|
|
|
|
#define SPI_SLVOE_OFFSET 10
|
|
#define SPI_SRL_OFFSET 11
|
|
#define SPI_CFS_OFFSET 12
|
|
|
|
/* Bit fields in CTRLR0 based on DWC_ssi_databook.pdf v1.01a */
|
|
#define DWC_SSI_CTRLR0_SRL_OFFSET 13
|
|
#define DWC_SSI_CTRLR0_TMOD_OFFSET 10
|
|
#define DWC_SSI_CTRLR0_TMOD_MASK GENMASK(11, 10)
|
|
#define DWC_SSI_CTRLR0_SCPOL_OFFSET 9
|
|
#define DWC_SSI_CTRLR0_SCPH_OFFSET 8
|
|
#define DWC_SSI_CTRLR0_FRF_OFFSET 6
|
|
#define DWC_SSI_CTRLR0_DFS_OFFSET 0
|
|
|
|
/* Bit fields in SR, 7 bits */
|
|
#define SR_MASK 0x7f /* cover 7 bits */
|
|
#define SR_BUSY (1 << 0)
|
|
#define SR_TF_NOT_FULL (1 << 1)
|
|
#define SR_TF_EMPT (1 << 2)
|
|
#define SR_RF_NOT_EMPT (1 << 3)
|
|
#define SR_RF_FULL (1 << 4)
|
|
#define SR_TX_ERR (1 << 5)
|
|
#define SR_DCOL (1 << 6)
|
|
|
|
/* Bit fields in ISR, IMR, RISR, 7 bits */
|
|
#define SPI_INT_TXEI (1 << 0)
|
|
#define SPI_INT_TXOI (1 << 1)
|
|
#define SPI_INT_RXUI (1 << 2)
|
|
#define SPI_INT_RXOI (1 << 3)
|
|
#define SPI_INT_RXFI (1 << 4)
|
|
#define SPI_INT_MSTI (1 << 5)
|
|
|
|
/* Bit fields in DMACR */
|
|
#define SPI_DMA_RDMAE (1 << 0)
|
|
#define SPI_DMA_TDMAE (1 << 1)
|
|
|
|
/* TX RX interrupt level threshold, max can be 256 */
|
|
#define SPI_INT_THRESHOLD 32
|
|
|
|
enum dw_ssi_type {
|
|
SSI_MOTO_SPI = 0,
|
|
SSI_TI_SSP,
|
|
SSI_NS_MICROWIRE,
|
|
};
|
|
|
|
struct dw_spi;
|
|
struct dw_spi_dma_ops {
|
|
int (*dma_init)(struct device *dev, struct dw_spi *dws);
|
|
void (*dma_exit)(struct dw_spi *dws);
|
|
int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer);
|
|
bool (*can_dma)(struct spi_controller *master, struct spi_device *spi,
|
|
struct spi_transfer *xfer);
|
|
int (*dma_transfer)(struct dw_spi *dws, struct spi_transfer *xfer);
|
|
void (*dma_stop)(struct dw_spi *dws);
|
|
};
|
|
|
|
struct dw_spi {
|
|
struct spi_controller *master;
|
|
enum dw_ssi_type type;
|
|
|
|
void __iomem *regs;
|
|
unsigned long paddr;
|
|
int irq;
|
|
u32 fifo_len; /* depth of the FIFO buffer */
|
|
u32 max_freq; /* max bus freq supported */
|
|
|
|
int cs_override;
|
|
u32 reg_io_width; /* DR I/O width in bytes */
|
|
u16 bus_num;
|
|
u16 num_cs; /* supported slave numbers */
|
|
void (*set_cs)(struct spi_device *spi, bool enable);
|
|
u32 (*update_cr0)(struct spi_controller *master, struct spi_device *spi,
|
|
struct spi_transfer *transfer);
|
|
|
|
/* Current message transfer state info */
|
|
size_t len;
|
|
void *tx;
|
|
void *tx_end;
|
|
spinlock_t buf_lock;
|
|
void *rx;
|
|
void *rx_end;
|
|
int dma_mapped;
|
|
u8 n_bytes; /* current is a 1/2 bytes op */
|
|
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
|
|
u32 current_freq; /* frequency in hz */
|
|
u32 cur_rx_sample_dly;
|
|
u32 def_rx_sample_dly_ns;
|
|
|
|
/* DMA info */
|
|
struct dma_chan *txchan;
|
|
u32 txburst;
|
|
struct dma_chan *rxchan;
|
|
u32 rxburst;
|
|
u32 dma_sg_burst;
|
|
unsigned long dma_chan_busy;
|
|
dma_addr_t dma_addr; /* phy address of the Data register */
|
|
const struct dw_spi_dma_ops *dma_ops;
|
|
struct completion dma_completion;
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
struct dentry *debugfs;
|
|
struct debugfs_regset32 regset;
|
|
#endif
|
|
};
|
|
|
|
static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
|
|
{
|
|
return __raw_readl(dws->regs + offset);
|
|
}
|
|
|
|
static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
|
|
{
|
|
return __raw_readw(dws->regs + offset);
|
|
}
|
|
|
|
static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
|
|
{
|
|
__raw_writel(val, dws->regs + offset);
|
|
}
|
|
|
|
static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
|
|
{
|
|
__raw_writew(val, dws->regs + offset);
|
|
}
|
|
|
|
static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
|
|
{
|
|
switch (dws->reg_io_width) {
|
|
case 2:
|
|
return dw_readw(dws, offset);
|
|
case 4:
|
|
default:
|
|
return dw_readl(dws, offset);
|
|
}
|
|
}
|
|
|
|
static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
|
|
{
|
|
switch (dws->reg_io_width) {
|
|
case 2:
|
|
dw_writew(dws, offset, val);
|
|
break;
|
|
case 4:
|
|
default:
|
|
dw_writel(dws, offset, val);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static inline void spi_enable_chip(struct dw_spi *dws, int enable)
|
|
{
|
|
dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
|
|
}
|
|
|
|
static inline void spi_set_clk(struct dw_spi *dws, u16 div)
|
|
{
|
|
dw_writel(dws, DW_SPI_BAUDR, div);
|
|
}
|
|
|
|
/* Disable IRQ bits */
|
|
static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
|
|
{
|
|
u32 new_mask;
|
|
|
|
new_mask = dw_readl(dws, DW_SPI_IMR) & ~mask;
|
|
dw_writel(dws, DW_SPI_IMR, new_mask);
|
|
}
|
|
|
|
/* Enable IRQ bits */
|
|
static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
|
|
{
|
|
u32 new_mask;
|
|
|
|
new_mask = dw_readl(dws, DW_SPI_IMR) | mask;
|
|
dw_writel(dws, DW_SPI_IMR, new_mask);
|
|
}
|
|
|
|
/*
|
|
* This does disable the SPI controller, interrupts, and re-enable the
|
|
* controller back. Transmit and receive FIFO buffers are cleared when the
|
|
* device is disabled.
|
|
*/
|
|
static inline void spi_reset_chip(struct dw_spi *dws)
|
|
{
|
|
spi_enable_chip(dws, 0);
|
|
spi_mask_intr(dws, 0xff);
|
|
spi_enable_chip(dws, 1);
|
|
}
|
|
|
|
static inline void spi_shutdown_chip(struct dw_spi *dws)
|
|
{
|
|
spi_enable_chip(dws, 0);
|
|
spi_set_clk(dws, 0);
|
|
}
|
|
|
|
extern void dw_spi_set_cs(struct spi_device *spi, bool enable);
|
|
extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
|
|
extern void dw_spi_remove_host(struct dw_spi *dws);
|
|
extern int dw_spi_suspend_host(struct dw_spi *dws);
|
|
extern int dw_spi_resume_host(struct dw_spi *dws);
|
|
extern u32 dw_spi_update_cr0(struct spi_controller *master,
|
|
struct spi_device *spi,
|
|
struct spi_transfer *transfer);
|
|
extern u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master,
|
|
struct spi_device *spi,
|
|
struct spi_transfer *transfer);
|
|
|
|
#ifdef CONFIG_SPI_DW_DMA
|
|
|
|
extern void dw_spi_dma_setup_mfld(struct dw_spi *dws);
|
|
extern void dw_spi_dma_setup_generic(struct dw_spi *dws);
|
|
|
|
#else
|
|
|
|
static inline void dw_spi_dma_setup_mfld(struct dw_spi *dws) {}
|
|
static inline void dw_spi_dma_setup_generic(struct dw_spi *dws) {}
|
|
|
|
#endif /* !CONFIG_SPI_DW_DMA */
|
|
|
|
#endif /* DW_SPI_HEADER_H */
|