spi: rspi: Replace spi_master by spi_controller

As of commit 8caab75fd2 ('spi: Generalize SPI "master" to
"controller"'), the old master-centric names are compatibility wrappers
for the new controller-centric names.

Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
Geert Uytterhoeven 2019-02-08 10:09:07 +01:00 committed by Mark Brown
parent 2e5c888873
commit 9428a073eb
No known key found for this signature in database
GPG key ID: 24D68B725D5487D0

View file

@ -180,7 +180,7 @@
struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
struct spi_master *master;
struct spi_controller *ctlr;
wait_queue_head_t wait;
struct clk *clk;
u16 spcmd;
@ -237,8 +237,8 @@ static u16 rspi_read_data(const struct rspi_data *rspi)
/* optional functions */
struct spi_ops {
int (*set_config_register)(struct rspi_data *rspi, int access_size);
int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer);
int (*transfer_one)(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer);
u16 mode_bits;
u16 flags;
u16 fifo_size;
@ -466,7 +466,7 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data)
{
int error = rspi_wait_for_tx_empty(rspi);
if (error < 0) {
dev_err(&rspi->master->dev, "transmit timeout\n");
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return error;
}
rspi_write_data(rspi, data);
@ -480,7 +480,7 @@ static int rspi_data_in(struct rspi_data *rspi)
error = rspi_wait_for_rx_full(rspi);
if (error < 0) {
dev_err(&rspi->master->dev, "receive timeout\n");
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return error;
}
data = rspi_read_data(rspi);
@ -526,8 +526,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EAGAIN;
@ -546,8 +546,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
}
if (tx) {
desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
@ -584,9 +584,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
/* Now start DMA */
if (rx)
dma_async_issue_pending(rspi->master->dma_rx);
dma_async_issue_pending(rspi->ctlr->dma_rx);
if (tx)
dma_async_issue_pending(rspi->master->dma_tx);
dma_async_issue_pending(rspi->ctlr->dma_tx);
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
@ -594,13 +594,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = 0;
} else {
if (!ret) {
dev_err(&rspi->master->dev, "DMA timeout\n");
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
}
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
dmaengine_terminate_all(rspi->ctlr->dma_tx);
if (rx)
dmaengine_terminate_all(rspi->master->dma_rx);
dmaengine_terminate_all(rspi->ctlr->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
@ -614,12 +614,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
no_dma_tx:
if (rx)
dmaengine_terminate_all(rspi->master->dma_rx);
dmaengine_terminate_all(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
dev_driver_string(&rspi->master->dev),
dev_name(&rspi->master->dev));
dev_driver_string(&rspi->ctlr->dev),
dev_name(&rspi->ctlr->dev));
}
return ret;
}
@ -660,10 +660,10 @@ static bool __rspi_can_dma(const struct rspi_data *rspi,
return xfer->len > rspi->ops->fifo_size;
}
static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
return __rspi_can_dma(rspi, xfer);
}
@ -671,7 +671,7 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
return -EAGAIN;
/* rx_buf can be NULL on RSPI on SH in TX-only Mode */
@ -698,10 +698,10 @@ static int rspi_common_transfer(struct rspi_data *rspi,
return 0;
}
static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer)
static int rspi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
u8 spcr;
spcr = rspi_read8(rspi, RSPI_SPCR);
@ -716,11 +716,11 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
return rspi_common_transfer(rspi, xfer);
}
static int rspi_rz_transfer_one(struct spi_master *master,
static int rspi_rz_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
rspi_rz_receive_init(rspi);
@ -739,7 +739,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
if (n == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
dev_err(&rspi->master->dev, "transmit timeout\n");
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < n; i++)
@ -747,7 +747,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
dev_err(&rspi->master->dev, "receive timeout\n");
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < n; i++)
@ -785,7 +785,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
unsigned int i, len;
int ret;
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
if (ret != -EAGAIN)
return ret;
@ -796,7 +796,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
if (len == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
dev_err(&rspi->master->dev, "transmit timeout\n");
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < len; i++)
@ -822,7 +822,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
unsigned int i, len;
int ret;
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
if (ret != -EAGAIN)
return ret;
@ -833,7 +833,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
if (len == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
dev_err(&rspi->master->dev, "receive timeout\n");
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < len; i++)
@ -849,10 +849,10 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
return 0;
}
static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer)
static int qspi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
if (spi->mode & SPI_LOOP) {
return qspi_transfer_out_in(rspi, xfer);
@ -870,7 +870,7 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
static int rspi_setup(struct spi_device *spi)
{
struct rspi_data *rspi = spi_master_get_devdata(spi->master);
struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
rspi->max_speed_hz = spi->max_speed_hz;
@ -955,10 +955,10 @@ static int qspi_setup_sequencer(struct rspi_data *rspi,
return 0;
}
static int rspi_prepare_message(struct spi_master *master,
static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
int ret;
if (msg->spi->mode &
@ -974,10 +974,10 @@ static int rspi_prepare_message(struct spi_master *master,
return 0;
}
static int rspi_unprepare_message(struct spi_master *master,
static int rspi_unprepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
/* Disable SPI function */
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
@ -1081,7 +1081,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
return chan;
}
static int rspi_request_dma(struct device *dev, struct spi_master *master,
static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
const struct resource *res)
{
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
@ -1099,37 +1099,37 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master,
return 0;
}
master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
res->start + RSPI_SPDR);
if (!master->dma_tx)
ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
res->start + RSPI_SPDR);
if (!ctlr->dma_tx)
return -ENODEV;
master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
res->start + RSPI_SPDR);
if (!master->dma_rx) {
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
res->start + RSPI_SPDR);
if (!ctlr->dma_rx) {
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
return -ENODEV;
}
master->can_dma = rspi_can_dma;
ctlr->can_dma = rspi_can_dma;
dev_info(dev, "DMA available");
return 0;
}
static void rspi_release_dma(struct spi_master *master)
static void rspi_release_dma(struct spi_controller *ctlr)
{
if (master->dma_tx)
dma_release_channel(master->dma_tx);
if (master->dma_rx)
dma_release_channel(master->dma_rx);
if (ctlr->dma_tx)
dma_release_channel(ctlr->dma_tx);
if (ctlr->dma_rx)
dma_release_channel(ctlr->dma_rx);
}
static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = platform_get_drvdata(pdev);
rspi_release_dma(rspi->master);
rspi_release_dma(rspi->ctlr);
pm_runtime_disable(&pdev->dev);
return 0;
@ -1139,7 +1139,7 @@ static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_MASTER_MUST_TX,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
};
@ -1147,7 +1147,7 @@ static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
};
@ -1157,7 +1157,7 @@ static const struct spi_ops qspi_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
};
@ -1174,7 +1174,7 @@ static const struct of_device_id rspi_of_match[] = {
MODULE_DEVICE_TABLE(of, rspi_of_match);
static int rspi_parse_dt(struct device *dev, struct spi_master *master)
static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
u32 num_cs;
int error;
@ -1186,12 +1186,12 @@ static int rspi_parse_dt(struct device *dev, struct spi_master *master)
return error;
}
master->num_chipselect = num_cs;
ctlr->num_chipselect = num_cs;
return 0;
}
#else
#define rspi_of_match NULL
static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
return -EINVAL;
}
@ -1212,28 +1212,28 @@ static int rspi_request_irq(struct device *dev, unsigned int irq,
static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct spi_controller *ctlr;
struct rspi_data *rspi;
int ret;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (master == NULL)
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (ctlr == NULL)
return -ENOMEM;
ops = of_device_get_match_data(&pdev->dev);
if (ops) {
ret = rspi_parse_dt(&pdev->dev, master);
ret = rspi_parse_dt(&pdev->dev, ctlr);
if (ret)
goto error1;
} else {
ops = (struct spi_ops *)pdev->id_entry->driver_data;
rspi_pd = dev_get_platdata(&pdev->dev);
if (rspi_pd && rspi_pd->num_chipselect)
master->num_chipselect = rspi_pd->num_chipselect;
ctlr->num_chipselect = rspi_pd->num_chipselect;
else
master->num_chipselect = 2; /* default */
ctlr->num_chipselect = 2; /* default */
}
/* ops parameter check */
@ -1243,10 +1243,10 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
rspi = spi_master_get_devdata(master);
rspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
rspi->master = master;
rspi->ctlr = ctlr;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rspi->addr = devm_ioremap_resource(&pdev->dev, res);
@ -1266,15 +1266,15 @@ static int rspi_probe(struct platform_device *pdev)
init_waitqueue_head(&rspi->wait);
master->bus_num = pdev->id;
master->setup = rspi_setup;
master->auto_runtime_pm = true;
master->transfer_one = ops->transfer_one;
master->prepare_message = rspi_prepare_message;
master->unprepare_message = rspi_unprepare_message;
master->mode_bits = ops->mode_bits;
master->flags = ops->flags;
master->dev.of_node = pdev->dev.of_node;
ctlr->bus_num = pdev->id;
ctlr->setup = rspi_setup;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = ops->transfer_one;
ctlr->prepare_message = rspi_prepare_message;
ctlr->unprepare_message = rspi_unprepare_message;
ctlr->mode_bits = ops->mode_bits;
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
ret = platform_get_irq_byname(pdev, "rx");
if (ret < 0) {
@ -1311,13 +1311,13 @@ static int rspi_probe(struct platform_device *pdev)
goto error2;
}
ret = rspi_request_dma(&pdev->dev, master, res);
ret = rspi_request_dma(&pdev->dev, ctlr, res);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
ret = devm_spi_register_master(&pdev->dev, master);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error3;
}
@ -1326,11 +1326,11 @@ static int rspi_probe(struct platform_device *pdev)
return 0;
error3:
rspi_release_dma(master);
rspi_release_dma(ctlr);
error2:
pm_runtime_disable(&pdev->dev);
error1:
spi_master_put(master);
spi_controller_put(ctlr);
return ret;
}
@ -1349,14 +1349,14 @@ static int rspi_suspend(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
return spi_master_suspend(rspi->master);
return spi_controller_suspend(rspi->ctlr);
}
static int rspi_resume(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
return spi_master_resume(rspi->master);
return spi_controller_resume(rspi->ctlr);
}
static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);