dmaengine updates for v6.5

New support:
  - TI J721S2 CSI BCDMA support
 
  Updates:
  - Native HDMI support for dw edma driver
  - ste dma40 updates for supporting proper SRAM handle in DT
  - removal of dma device chancnt setting in drivers
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmSmXBgACgkQfBQHDyUj
 g0cI0Q/8DcHi1Hn6rRIzeKyZIhttG/fDW7DcroTeHoJROCkjMi1vV/B7gkSqQxgD
 KxciVPtaOeMPSSf6rrWAmVmE8DiPhODHisaUt4vKaNAjyRlfv2lyqRnCcZ9F5Y+C
 V/spkSEkaC0AZ4FcXM8GMYFc4erS23J6fi56vH39jJvR6LrIP6g7RpOck50Ep2WX
 wqEXuMSo7tRwa7sPKDqh4c03NLQRMf9HSzH7g1bAOrtGRmdmA4go1Mi7o3KpnjMO
 epnTraE2UHoLFdEz/qWOly6r7hn0W1+0B21NmZLMR+AWGFNXRJKltkA7EsiYydE6
 v6XeSHvdlSK7FUEEBfkzp/sjsok7H9xXPMnJZfsiZ899fKBwZ5uuvf+Fp5ashYj5
 xLPbWH5UHUXMzLTvLotySpxkLBThE8jlnSLM7CzThnrDOJMlYTpCtCW37rLugUGy
 fB5HfPAx74nftheV39dxfTPJvMjWf5CYVnaQiiGiinZgUbuMvTyxXguKsBQeMTFc
 ExmE4KuAUcjmPX6sG9rVcNsC+aBmKEM9rDoFHEH/sz7sY3Wth1qKihwzkkfmT7I2
 /O8tyJ6NCt2cWEiJDUG8jElv40y7rPoHLe74EbcomBug8hYlG9862/jd0hIbX1op
 MWj3i1pJ9P7l5vWDRjmfNcQibwaa2F/iRZJJobsmqf7mrSB5cxY=
 =pGmA
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "New support:
   - TI J721S2 CSI BCDMA support

  Updates:
   - Native HDMI support for dw edma driver
   - ste dma40 updates for supporting proper SRAM handle in DT
   - removal of dma device chancnt setting in drivers"

* tag 'dmaengine-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (28 commits)
  dmaengine: sprd: Don't set chancnt
  dmaengine: hidma: Don't set chancnt
  dmaengine: plx_dma: Don't set chancnt
  dmaengine: axi-dmac: Don't set chancnt
  dmaengine: dw-axi-dmac: Don't set chancnt
  dmaengine: qcom: bam_dma: allow omitting num-{channels,ees}
  dmaengine: dw-edma: Add HDMA DebugFS support
  dmaengine: dw-edma: Add support for native HDMA
  dmaengine: dw-edma: Create a new dw_edma_core_ops structure to abstract controller operation
  dmaengine: dw-edma: Rename dw_edma_core_ops structure to dw_edma_plat_ops
  dmaengine: ste_dma40: use proper format string for resource_size_t
  dmaengine: make QCOM_HIDMA depend on HAS_IOMEM
  dmaengine: ste_dma40: fix typo in enum documentation
  dmaengine: ste_dma40: use correct print specfier for resource_size_t
  MAINTAINERS: Add myself as the DW eDMA driver reviewer
  MAINTAINERS: Add Manivannan to DW eDMA driver maintainers list
  MAINTAINERS: Demote Gustavo Pimentel to DW EDMA driver reviewer
  dmaengine: ti: k3-udma: Add support for J721S2 CSI BCDMA instance
  dt-bindings: dma: ti: Add J721S2 BCDMA
  dmaengine: ti: k3-psil-j721s2: Add PSI-L thread map for main CPSW2G
  ...
This commit is contained in:
Linus Torvalds 2023-07-06 09:48:31 -07:00
commit 0b90730502
30 changed files with 1074 additions and 401 deletions

View File

@ -112,14 +112,23 @@ properties:
- const: stericsson,dma40
reg:
items:
- description: DMA40 memory base
- description: LCPA memory base
oneOf:
- items:
- description: DMA40 memory base
- items:
- description: DMA40 memory base
- description: LCPA memory base, deprecated, use eSRAM pool instead
deprecated: true
reg-names:
items:
- const: base
- const: lcpa
oneOf:
- items:
- const: base
- items:
- const: base
- const: lcpa
deprecated: true
interrupts:
maxItems: 1
@ -127,6 +136,15 @@ properties:
clocks:
maxItems: 1
sram:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: A phandle array with inner size 1 (no arg cells).
First phandle is the LCPA (Logical Channel Parameter Address) memory.
Second phandle is the LCLA (Logical Channel Link base Address) memory.
maxItems: 2
items:
maxItems: 1
memcpy-channels:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: Array of u32 elements indicating which channels on the DMA
@ -138,6 +156,7 @@ required:
- reg
- interrupts
- clocks
- sram
- memcpy-channels
additionalProperties: false
@ -149,8 +168,9 @@ examples:
#include <dt-bindings/mfd/dbx500-prcmu.h>
dma-controller@801c0000 {
compatible = "stericsson,db8500-dma40", "stericsson,dma40";
reg = <0x801c0000 0x1000>, <0x40010000 0x800>;
reg-names = "base", "lcpa";
reg = <0x801c0000 0x1000>;
reg-names = "base";
sram = <&lcpa>, <&lcla>;
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <3>;
memcpy-channels = <56 57 58 59 60>;

View File

@ -33,6 +33,7 @@ properties:
enum:
- ti,am62a-dmss-bcdma-csirx
- ti,am64-dmss-bcdma
- ti,j721s2-dmss-bcdma-csi
reg:
minItems: 3
@ -151,7 +152,12 @@ allOf:
required:
- power-domains
else:
- if:
properties:
compatible:
contains:
const: ti,am64-dmss-bcdma
then:
properties:
reg:
minItems: 5
@ -168,6 +174,28 @@ allOf:
- ti,sci-rm-range-bchan
- ti,sci-rm-range-tchan
- if:
properties:
compatible:
contains:
const: ti,j721s2-dmss-bcdma-csi
then:
properties:
ti,sci-rm-range-bchan: false
reg:
maxItems: 4
reg-names:
items:
- const: gcfg
- const: rchanrt
- const: tchanrt
- const: ringrt
required:
- ti,sci-rm-range-tchan
unevaluatedProperties: false
examples:

View File

@ -41,6 +41,9 @@ properties:
clock-names:
const: axi_clk
power-domains:
maxItems: 1
required:
- "#dma-cells"
- compatible
@ -48,12 +51,14 @@ required:
- interrupts
- clocks
- clock-names
- power-domains
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/xlnx-zynqmp-power.h>
dma: dma-controller@fd4c0000 {
compatible = "xlnx,zynqmp-dpdma";
@ -63,6 +68,7 @@ examples:
clocks = <&dpdma_clk>;
clock-names = "axi_clk";
#dma-cells = <1>;
power-domains = <&zynqmp_firmware PD_DP>;
};
...

View File

@ -5932,7 +5932,9 @@ S: Orphan
F: drivers/mtd/nand/raw/denali*
DESIGNWARE EDMA CORE IP DRIVER
M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
R: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
R: Serge Semin <fancer.lancer@gmail.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/dw-edma/

View File

@ -553,6 +553,7 @@ config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
select DMA_ENGINE
select SRAM
help
Support for ST-Ericsson DMA40 controller

View File

@ -963,7 +963,6 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->device_terminate_all = axi_dmac_terminate_all;
dma_dev->device_synchronize = axi_dmac_synchronize;
dma_dev->dev = &pdev->dev;
dma_dev->chancnt = 1;
dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
dma_dev->directions = BIT(dmac->chan.direction);

View File

@ -1466,7 +1466,6 @@ static int dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
/* DMA capabilities */
dw->dma.chancnt = hdata->nr_channels;
dw->dma.max_burst = hdata->axi_rw_burst_len;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;

View File

@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DW_EDMA) += dw-edma.o
dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o
dw-edma-objs := dw-edma-core.o \
dw-edma-v0-core.o $(dw-edma-y)
dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o \
dw-hdma-v0-debugfs.o
dw-edma-objs := dw-edma-core.o \
dw-edma-v0-core.o \
dw-hdma-v0-core.o $(dw-edma-y)
obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o

View File

@ -18,6 +18,7 @@
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
#include "dw-hdma-v0-core.h"
#include "../dmaengine.h"
#include "../virt-dma.h"
@ -183,6 +184,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
static int dw_edma_start_transfer(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
struct dw_edma_chunk *child;
struct dw_edma_desc *desc;
struct virt_dma_desc *vd;
@ -200,7 +202,7 @@ static int dw_edma_start_transfer(struct dw_edma_chan *chan)
if (!child)
return 0;
dw_edma_v0_core_start(child, !desc->xfer_sz);
dw_edma_core_start(dw, child, !desc->xfer_sz);
desc->xfer_sz += child->ll_region.sz;
dw_edma_free_burst(child);
list_del(&child->list);
@ -287,7 +289,7 @@ static int dw_edma_device_terminate_all(struct dma_chan *dchan)
chan->configured = false;
} else if (chan->status == EDMA_ST_IDLE) {
chan->configured = false;
} else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
} else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) {
/*
* The channel is in a false BUSY state, probably didn't
* receive or lost an interrupt
@ -599,8 +601,6 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
struct virt_dma_desc *vd;
unsigned long flags;
dw_edma_v0_core_clear_done_int(chan);
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
@ -641,8 +641,6 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
struct virt_dma_desc *vd;
unsigned long flags;
dw_edma_v0_core_clear_abort_int(chan);
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
@ -654,63 +652,32 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
chan->status = EDMA_ST_IDLE;
}
static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
{
struct dw_edma_irq *dw_irq = data;
struct dw_edma *dw = dw_irq->dw;
unsigned long total, pos, val;
unsigned long off;
u32 mask;
if (write) {
total = dw->wr_ch_cnt;
off = 0;
mask = dw_irq->wr_mask;
} else {
total = dw->rd_ch_cnt;
off = dw->wr_ch_cnt;
mask = dw_irq->rd_mask;
}
val = dw_edma_v0_core_status_done_int(dw, write ?
EDMA_DIR_WRITE :
EDMA_DIR_READ);
val &= mask;
for_each_set_bit(pos, &val, total) {
struct dw_edma_chan *chan = &dw->chan[pos + off];
dw_edma_done_interrupt(chan);
}
val = dw_edma_v0_core_status_abort_int(dw, write ?
EDMA_DIR_WRITE :
EDMA_DIR_READ);
val &= mask;
for_each_set_bit(pos, &val, total) {
struct dw_edma_chan *chan = &dw->chan[pos + off];
dw_edma_abort_interrupt(chan);
}
return IRQ_HANDLED;
}
static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
{
return dw_edma_interrupt(irq, data, true);
struct dw_edma_irq *dw_irq = data;
return dw_edma_core_handle_int(dw_irq, EDMA_DIR_WRITE,
dw_edma_done_interrupt,
dw_edma_abort_interrupt);
}
static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
{
return dw_edma_interrupt(irq, data, false);
struct dw_edma_irq *dw_irq = data;
return dw_edma_core_handle_int(dw_irq, EDMA_DIR_READ,
dw_edma_done_interrupt,
dw_edma_abort_interrupt);
}
static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
{
dw_edma_interrupt(irq, data, true);
dw_edma_interrupt(irq, data, false);
irqreturn_t ret = IRQ_NONE;
return IRQ_HANDLED;
ret |= dw_edma_interrupt_write(irq, data);
ret |= dw_edma_interrupt_read(irq, data);
return ret;
}
static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
@ -811,7 +778,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
vchan_init(&chan->vc, dma);
dw_edma_v0_core_device_config(chan);
dw_edma_core_ch_config(chan);
}
/* Set DMA channel capabilities */
@ -956,14 +923,19 @@ int dw_edma_probe(struct dw_edma_chip *chip)
dw->chip = chip;
if (dw->chip->mf == EDMA_MF_HDMA_NATIVE)
dw_hdma_v0_core_register(dw);
else
dw_edma_v0_core_register(dw);
raw_spin_lock_init(&dw->lock);
dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
dw_edma_core_ch_count(dw, EDMA_DIR_WRITE));
dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
dw_edma_core_ch_count(dw, EDMA_DIR_READ));
dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
@ -982,7 +954,7 @@ int dw_edma_probe(struct dw_edma_chip *chip)
dev_name(chip->dev));
/* Disable eDMA, only to establish the ideal initial conditions */
dw_edma_v0_core_off(dw);
dw_edma_core_off(dw);
/* Request IRQs */
err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
@ -995,7 +967,7 @@ int dw_edma_probe(struct dw_edma_chip *chip)
goto err_irq_free;
/* Turn debugfs on */
dw_edma_v0_core_debugfs_on(dw);
dw_edma_core_debugfs_on(dw);
chip->dw = dw;
@ -1021,7 +993,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
return -ENODEV;
/* Disable eDMA */
dw_edma_v0_core_off(dw);
dw_edma_core_off(dw);
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)

View File

@ -111,6 +111,21 @@ struct dw_edma {
raw_spinlock_t lock; /* Only for legacy */
struct dw_edma_chip *chip;
const struct dw_edma_core_ops *core;
};
typedef void (*dw_edma_handler_t)(struct dw_edma_chan *);
struct dw_edma_core_ops {
void (*off)(struct dw_edma *dw);
u16 (*ch_count)(struct dw_edma *dw, enum dw_edma_dir dir);
enum dma_status (*ch_status)(struct dw_edma_chan *chan);
irqreturn_t (*handle_int)(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
dw_edma_handler_t done, dw_edma_handler_t abort);
void (*start)(struct dw_edma_chunk *chunk, bool first);
void (*ch_config)(struct dw_edma_chan *chan);
void (*debugfs_on)(struct dw_edma *dw);
};
struct dw_edma_sg {
@ -148,4 +163,47 @@ struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan)
return vc2dw_edma_chan(to_virt_chan(dchan));
}
static inline
void dw_edma_core_off(struct dw_edma *dw)
{
dw->core->off(dw);
}
static inline
u16 dw_edma_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
return dw->core->ch_count(dw, dir);
}
static inline
enum dma_status dw_edma_core_ch_status(struct dw_edma_chan *chan)
{
return chan->dw->core->ch_status(chan);
}
static inline irqreturn_t
dw_edma_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
dw_edma_handler_t done, dw_edma_handler_t abort)
{
return dw_irq->dw->core->handle_int(dw_irq, dir, done, abort);
}
static inline
void dw_edma_core_start(struct dw_edma *dw, struct dw_edma_chunk *chunk, bool first)
{
dw->core->start(chunk, first);
}
static inline
void dw_edma_core_ch_config(struct dw_edma_chan *chan)
{
chan->dw->core->ch_config(chan);
}
static inline
void dw_edma_core_debugfs_on(struct dw_edma *dw)
{
dw->core->debugfs_on(dw);
}
#endif /* _DW_EDMA_CORE_H */

View File

@ -109,7 +109,7 @@ static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr)
return region.start;
}
static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
static const struct dw_edma_plat_ops dw_edma_pcie_plat_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
.pci_address = dw_edma_pcie_address,
};
@ -225,7 +225,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->mf = vsec_data.mf;
chip->nr_irqs = nr_irqs;
chip->ops = &dw_edma_pcie_core_ops;
chip->ops = &dw_edma_pcie_plat_ops;
chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
chip->ll_rd_cnt = vsec_data.rd_ch_cnt;

View File

@ -7,7 +7,7 @@
*/
#include <linux/bitfield.h>
#include <linux/irqreturn.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "dw-edma-core.h"
@ -160,7 +160,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
static void dw_edma_v0_core_off(struct dw_edma *dw)
{
SET_BOTH_32(dw, int_mask,
EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
@ -169,7 +169,7 @@ void dw_edma_v0_core_off(struct dw_edma *dw)
SET_BOTH_32(dw, engine_en, 0);
}
u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
static u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
u32 num_ch;
@ -186,7 +186,7 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
return (u16)num_ch;
}
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
static enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
u32 tmp;
@ -202,7 +202,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
return DMA_ERROR;
}
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
static void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
@ -210,7 +210,7 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
}
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
static void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
@ -218,18 +218,64 @@ void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
}
u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
static u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
return FIELD_GET(EDMA_V0_DONE_INT_MASK,
GET_RW_32(dw, dir, int_status));
}
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
GET_RW_32(dw, dir, int_status));
}
static irqreturn_t
dw_edma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
dw_edma_handler_t done, dw_edma_handler_t abort)
{
struct dw_edma *dw = dw_irq->dw;
unsigned long total, pos, val;
irqreturn_t ret = IRQ_NONE;
struct dw_edma_chan *chan;
unsigned long off;
u32 mask;
if (dir == EDMA_DIR_WRITE) {
total = dw->wr_ch_cnt;
off = 0;
mask = dw_irq->wr_mask;
} else {
total = dw->rd_ch_cnt;
off = dw->wr_ch_cnt;
mask = dw_irq->rd_mask;
}
val = dw_edma_v0_core_status_done_int(dw, dir);
val &= mask;
for_each_set_bit(pos, &val, total) {
chan = &dw->chan[pos + off];
dw_edma_v0_core_clear_done_int(chan);
done(chan);
ret = IRQ_HANDLED;
}
val = dw_edma_v0_core_status_abort_int(dw, dir);
val &= mask;
for_each_set_bit(pos, &val, total) {
chan = &dw->chan[pos + off];
dw_edma_v0_core_clear_abort_int(chan);
abort(chan);
ret = IRQ_HANDLED;
}
return ret;
}
static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
u32 control, u32 size, u64 sar, u64 dar)
{
@ -300,7 +346,7 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->dw;
@ -371,7 +417,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
}
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
static void dw_edma_v0_core_ch_config(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
u32 tmp = 0;
@ -438,12 +484,25 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
break;
}
return 0;
}
/* eDMA debugfs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
static void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
dw_edma_v0_debugfs_on(dw);
}
static const struct dw_edma_core_ops dw_edma_v0_core = {
.off = dw_edma_v0_core_off,
.ch_count = dw_edma_v0_core_ch_count,
.ch_status = dw_edma_v0_core_ch_status,
.handle_int = dw_edma_v0_core_handle_int,
.start = dw_edma_v0_core_start,
.ch_config = dw_edma_v0_core_ch_config,
.debugfs_on = dw_edma_v0_core_debugfs_on,
};
void dw_edma_v0_core_register(struct dw_edma *dw)
{
dw->core = &dw_edma_v0_core;
}

View File

@ -11,17 +11,7 @@
#include <linux/dma/edma.h>
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *chan);
u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir);
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan);
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan);
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan);
u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir);
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir);
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
/* eDMA core register */
void dw_edma_v0_core_register(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */

View File

@ -0,0 +1,296 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Cai Huoqing
* Synopsys DesignWare HDMA v0 core
*/
#include <linux/bitfield.h>
#include <linux/irqreturn.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "dw-edma-core.h"
#include "dw-hdma-v0-core.h"
#include "dw-hdma-v0-regs.h"
#include "dw-hdma-v0-debugfs.h"
enum dw_hdma_control {
DW_HDMA_V0_CB = BIT(0),
DW_HDMA_V0_TCB = BIT(1),
DW_HDMA_V0_LLP = BIT(2),
DW_HDMA_V0_LIE = BIT(3),
DW_HDMA_V0_RIE = BIT(4),
DW_HDMA_V0_CCS = BIT(8),
DW_HDMA_V0_LLE = BIT(9),
};
static inline struct dw_hdma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
return dw->chip->reg_base;
}
static inline struct dw_hdma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{
if (dir == EDMA_DIR_WRITE)
return &(__dw_regs(dw)->ch[ch].wr);
else
return &(__dw_regs(dw)->ch[ch].rd);
}
#define SET_CH_32(dw, dir, ch, name, value) \
writel(value, &(__dw_ch_regs(dw, dir, ch)->name))
#define GET_CH_32(dw, dir, ch, name) \
readl(&(__dw_ch_regs(dw, dir, ch)->name))
#define SET_BOTH_CH_32(dw, ch, name, value) \
do { \
writel(value, &(__dw_ch_regs(dw, EDMA_DIR_WRITE, ch)->name)); \
writel(value, &(__dw_ch_regs(dw, EDMA_DIR_READ, ch)->name)); \
} while (0)
/* HDMA management callbacks */
static void dw_hdma_v0_core_off(struct dw_edma *dw)
{
int id;
for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
SET_BOTH_CH_32(dw, id, int_setup,
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
SET_BOTH_CH_32(dw, id, int_clear,
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
SET_BOTH_CH_32(dw, id, ch_en, 0);
}
}
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
u32 num_ch = 0;
int id;
for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
num_ch++;
}
if (num_ch > HDMA_V0_MAX_NR_CH)
num_ch = HDMA_V0_MAX_NR_CH;
return (u16)num_ch;
}
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
u32 tmp;
tmp = FIELD_GET(HDMA_V0_CH_STATUS_MASK,
GET_CH_32(dw, chan->id, chan->dir, ch_stat));
if (tmp == 1)
return DMA_IN_PROGRESS;
else if (tmp == 3)
return DMA_COMPLETE;
else
return DMA_ERROR;
}
static void dw_hdma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_STOP_INT_MASK);
}
static void dw_hdma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_ABORT_INT_MASK);
}
static u32 dw_hdma_v0_core_status_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
return GET_CH_32(dw, chan->dir, chan->id, int_stat);
}
static irqreturn_t
dw_hdma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
dw_edma_handler_t done, dw_edma_handler_t abort)
{
struct dw_edma *dw = dw_irq->dw;
unsigned long total, pos, val;
irqreturn_t ret = IRQ_NONE;
struct dw_edma_chan *chan;
unsigned long off, mask;
if (dir == EDMA_DIR_WRITE) {
total = dw->wr_ch_cnt;
off = 0;
mask = dw_irq->wr_mask;
} else {
total = dw->rd_ch_cnt;
off = dw->wr_ch_cnt;
mask = dw_irq->rd_mask;
}
for_each_set_bit(pos, &mask, total) {
chan = &dw->chan[pos + off];
val = dw_hdma_v0_core_status_int(chan);
if (FIELD_GET(HDMA_V0_STOP_INT_MASK, val)) {
dw_hdma_v0_core_clear_done_int(chan);
done(chan);
ret = IRQ_HANDLED;
}
if (FIELD_GET(HDMA_V0_ABORT_INT_MASK, val)) {
dw_hdma_v0_core_clear_abort_int(chan);
abort(chan);
ret = IRQ_HANDLED;
}
}
return ret;
}
static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
u32 control, u32 size, u64 sar, u64 dar)
{
ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli);
if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
lli->control = control;
lli->transfer_size = size;
lli->sar.reg = sar;
lli->dar.reg = dar;
} else {
struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
writel(control, &lli->control);
writel(size, &lli->transfer_size);
writeq(sar, &lli->sar.reg);
writeq(dar, &lli->dar.reg);
}
}
static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk,
int i, u32 control, u64 pointer)
{
ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli);
if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
llp->control = control;
llp->llp.reg = pointer;
} else {
struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
writel(control, &llp->control);
writeq(pointer, &llp->llp.reg);
}
}
static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
struct dw_edma_chan *chan = chunk->chan;
u32 control = 0, i = 0;
int j;
if (chunk->cb)
control = DW_HDMA_V0_CB;
j = chunk->bursts_alloc;
list_for_each_entry(child, &chunk->burst->list, list) {
j--;
if (!j) {
control |= DW_HDMA_V0_LIE;
if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
control |= DW_HDMA_V0_RIE;
}
dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz,
child->sar, child->dar);
}
control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB;
if (!chunk->cb)
control |= DW_HDMA_V0_CB;
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->dw;
u32 tmp;
dw_hdma_v0_core_write_chunk(chunk);
if (first) {
/* Enable engine */
SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
/* Interrupt enable&unmask - done, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
/* Channel control */
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
/* Linked list */
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
}
/* Set consumer cycle */
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
/* Doorbell */
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
}
static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->dw;
/* MSI done addr - low, high */
SET_CH_32(dw, chan->dir, chan->id, msi_stop.lsb, chan->msi.address_lo);
SET_CH_32(dw, chan->dir, chan->id, msi_stop.msb, chan->msi.address_hi);
/* MSI abort addr - low, high */
SET_CH_32(dw, chan->dir, chan->id, msi_abort.lsb, chan->msi.address_lo);
SET_CH_32(dw, chan->dir, chan->id, msi_abort.msb, chan->msi.address_hi);
/* config MSI data */
SET_CH_32(dw, chan->dir, chan->id, msi_msgdata, chan->msi.data);
}
/* HDMA debugfs callbacks */
static void dw_hdma_v0_core_debugfs_on(struct dw_edma *dw)
{
dw_hdma_v0_debugfs_on(dw);
}
static const struct dw_edma_core_ops dw_hdma_v0_core = {
.off = dw_hdma_v0_core_off,
.ch_count = dw_hdma_v0_core_ch_count,
.ch_status = dw_hdma_v0_core_ch_status,
.handle_int = dw_hdma_v0_core_handle_int,
.start = dw_hdma_v0_core_start,
.ch_config = dw_hdma_v0_core_ch_config,
.debugfs_on = dw_hdma_v0_core_debugfs_on,
};
void dw_hdma_v0_core_register(struct dw_edma *dw)
{
dw->core = &dw_hdma_v0_core;
}

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Cai Huoqing
* Synopsys DesignWare HDMA v0 core
*
* Author: Cai Huoqing <cai.huoqing@linux.dev>
*/
#ifndef _DW_HDMA_V0_CORE_H
#define _DW_HDMA_V0_CORE_H
#include <linux/dma/edma.h>
/* HDMA core register */
void dw_hdma_v0_core_register(struct dw_edma *dw);
#endif /* _DW_HDMA_V0_CORE_H */

View File

@ -0,0 +1,170 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Cai Huoqing
* Synopsys DesignWare HDMA v0 debugfs
*
* Author: Cai Huoqing <cai.huoqing@linux.dev>
*/
#include <linux/debugfs.h>
#include <linux/bitfield.h>
#include "dw-hdma-v0-debugfs.h"
#include "dw-hdma-v0-regs.h"
#include "dw-edma-core.h"
#define REGS_ADDR(dw, name) \
({ \
struct dw_hdma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \
\
(void __iomem *)&__regs->name; \
})
#define REGS_CH_ADDR(dw, name, _dir, _ch) \
({ \
struct dw_hdma_v0_ch_regs __iomem *__ch_regs; \
\
if (_dir == EDMA_DIR_READ) \
__ch_regs = REGS_ADDR(dw, ch[_ch].rd); \
else \
__ch_regs = REGS_ADDR(dw, ch[_ch].wr); \
\
(void __iomem *)&__ch_regs->name; \
})
#define CTX_REGISTER(dw, name, dir, ch) \
{#name, REGS_CH_ADDR(dw, name, dir, ch)}
#define WRITE_STR "write"
#define READ_STR "read"
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
struct dw_hdma_debugfs_entry {
const char *name;
void __iomem *reg;
};
static int dw_hdma_debugfs_u32_get(void *data, u64 *val)
{
struct dw_hdma_debugfs_entry *entry = data;
void __iomem *reg = entry->reg;
*val = readl(reg);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_hdma_debugfs_u32_get, NULL, "0x%08llx\n");
static void dw_hdma_debugfs_create_x32(struct dw_edma *dw,
const struct dw_hdma_debugfs_entry ini[],
int nr_entries, struct dentry *dent)
{
struct dw_hdma_debugfs_entry *entries;
int i;
entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return;
for (i = 0; i < nr_entries; i++) {
entries[i] = ini[i];
debugfs_create_file_unsafe(entries[i].name, 0444, dent,
&entries[i], &fops_x32);
}
}
static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
u16 ch, struct dentry *dent)
{
const struct dw_hdma_debugfs_entry debugfs_regs[] = {
CTX_REGISTER(dw, ch_en, dir, ch),
CTX_REGISTER(dw, doorbell, dir, ch),
CTX_REGISTER(dw, prefetch, dir, ch),
CTX_REGISTER(dw, handshake, dir, ch),
CTX_REGISTER(dw, llp.lsb, dir, ch),
CTX_REGISTER(dw, llp.msb, dir, ch),
CTX_REGISTER(dw, cycle_sync, dir, ch),
CTX_REGISTER(dw, transfer_size, dir, ch),
CTX_REGISTER(dw, sar.lsb, dir, ch),
CTX_REGISTER(dw, sar.msb, dir, ch),
CTX_REGISTER(dw, dar.lsb, dir, ch),
CTX_REGISTER(dw, dar.msb, dir, ch),
CTX_REGISTER(dw, watermark_en, dir, ch),
CTX_REGISTER(dw, control1, dir, ch),
CTX_REGISTER(dw, func_num, dir, ch),
CTX_REGISTER(dw, qos, dir, ch),
CTX_REGISTER(dw, ch_stat, dir, ch),
CTX_REGISTER(dw, int_stat, dir, ch),
CTX_REGISTER(dw, int_setup, dir, ch),
CTX_REGISTER(dw, int_clear, dir, ch),
CTX_REGISTER(dw, msi_stop.lsb, dir, ch),
CTX_REGISTER(dw, msi_stop.msb, dir, ch),
CTX_REGISTER(dw, msi_watermark.lsb, dir, ch),
CTX_REGISTER(dw, msi_watermark.msb, dir, ch),
CTX_REGISTER(dw, msi_abort.lsb, dir, ch),
CTX_REGISTER(dw, msi_abort.msb, dir, ch),
CTX_REGISTER(dw, msi_msgdata, dir, ch),
};
int nr_entries = ARRAY_SIZE(debugfs_regs);
dw_hdma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent);
}
static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
{
struct dentry *regs_dent, *ch_dent;
char name[16];
int i;
regs_dent = debugfs_create_dir(WRITE_STR, dent);
for (i = 0; i < dw->wr_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
ch_dent = debugfs_create_dir(name, regs_dent);
dw_hdma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent);
}
}
static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent)
{
struct dentry *regs_dent, *ch_dent;
char name[16];
int i;
regs_dent = debugfs_create_dir(READ_STR, dent);
for (i = 0; i < dw->rd_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
ch_dent = debugfs_create_dir(name, regs_dent);
dw_hdma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent);
}
}
static void dw_hdma_debugfs_regs(struct dw_edma *dw)
{
struct dentry *regs_dent;
regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root);
dw_hdma_debugfs_regs_wr(dw, regs_dent);
dw_hdma_debugfs_regs_rd(dw, regs_dent);
}
void dw_hdma_v0_debugfs_on(struct dw_edma *dw)
{
if (!debugfs_initialized())
return;
debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf);
debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt);
debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt);
dw_hdma_debugfs_regs(dw);
}

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Cai Huoqing
* Synopsys DesignWare HDMA v0 debugfs
*
* Author: Cai Huoqing <cai.huoqing@linux.dev>
*/
#ifndef _DW_HDMA_V0_DEBUG_FS_H
#define _DW_HDMA_V0_DEBUG_FS_H
#include <linux/dma/edma.h>
#ifdef CONFIG_DEBUG_FS
void dw_hdma_v0_debugfs_on(struct dw_edma *dw);
#else
static inline void dw_hdma_v0_debugfs_on(struct dw_edma *dw)
{
}
#endif /* CONFIG_DEBUG_FS */
#endif /* _DW_HDMA_V0_DEBUG_FS_H */

View File

@ -0,0 +1,129 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Cai Huoqing
* Synopsys DesignWare HDMA v0 reg
*
* Author: Cai Huoqing <cai.huoqing@linux.dev>
*/
#ifndef _DW_HDMA_V0_REGS_H
#define _DW_HDMA_V0_REGS_H
#include <linux/dmaengine.h>
#define HDMA_V0_MAX_NR_CH 8
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
#define HDMA_V0_ABORT_INT_MASK BIT(2)
#define HDMA_V0_STOP_INT_MASK BIT(0)
#define HDMA_V0_LINKLIST_EN BIT(0)
#define HDMA_V0_CONSUMER_CYCLE_STAT BIT(1)
#define HDMA_V0_CONSUMER_CYCLE_BIT BIT(0)
#define HDMA_V0_DOORBELL_START BIT(0)
#define HDMA_V0_CH_STATUS_MASK GENMASK(1, 0)
struct dw_hdma_v0_ch_regs {
u32 ch_en; /* 0x0000 */
u32 doorbell; /* 0x0004 */
u32 prefetch; /* 0x0008 */
u32 handshake; /* 0x000c */
union {
u64 reg; /* 0x0010..0x0014 */
struct {
u32 lsb; /* 0x0010 */
u32 msb; /* 0x0014 */
};
} llp;
u32 cycle_sync; /* 0x0018 */
u32 transfer_size; /* 0x001c */
union {
u64 reg; /* 0x0020..0x0024 */
struct {
u32 lsb; /* 0x0020 */
u32 msb; /* 0x0024 */
};
} sar;
union {
u64 reg; /* 0x0028..0x002c */
struct {
u32 lsb; /* 0x0028 */
u32 msb; /* 0x002c */
};
} dar;
u32 watermark_en; /* 0x0030 */
u32 control1; /* 0x0034 */
u32 func_num; /* 0x0038 */
u32 qos; /* 0x003c */
u32 padding_1[16]; /* 0x0040..0x007c */
u32 ch_stat; /* 0x0080 */
u32 int_stat; /* 0x0084 */
u32 int_setup; /* 0x0088 */
u32 int_clear; /* 0x008c */
union {
u64 reg; /* 0x0090..0x0094 */
struct {
u32 lsb; /* 0x0090 */
u32 msb; /* 0x0094 */
};
} msi_stop;
union {
u64 reg; /* 0x0098..0x009c */
struct {
u32 lsb; /* 0x0098 */
u32 msb; /* 0x009c */
};
} msi_watermark;
union {
u64 reg; /* 0x00a0..0x00a4 */
struct {
u32 lsb; /* 0x00a0 */
u32 msb; /* 0x00a4 */
};
} msi_abort;
u32 msi_msgdata; /* 0x00a8 */
u32 padding_2[21]; /* 0x00ac..0x00fc */
} __packed;
struct dw_hdma_v0_ch {
struct dw_hdma_v0_ch_regs wr; /* 0x0000 */
struct dw_hdma_v0_ch_regs rd; /* 0x0100 */
} __packed;
struct dw_hdma_v0_regs {
struct dw_hdma_v0_ch ch[HDMA_V0_MAX_NR_CH]; /* 0x0000..0x0fa8 */
} __packed;
struct dw_hdma_v0_lli {
u32 control;
u32 transfer_size;
union {
u64 reg;
struct {
u32 lsb;
u32 msb;
};
} sar;
union {
u64 reg;
struct {
u32 lsb;
u32 msb;
};
} dar;
} __packed;
struct dw_hdma_v0_llp {
u32 control;
u32 reserved;
union {
u64 reg;
struct {
u32 lsb;
u32 msb;
};
} llp;
} __packed;
#endif /* _DW_HDMA_V0_REGS_H */

View File

@ -517,7 +517,6 @@ static int plx_dma_create(struct pci_dev *pdev)
plxdev->bar = pcim_iomap_table(pdev)[0];
dma = &plxdev->dma_dev;
dma->chancnt = 1;
INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->copy_align = DMAENGINE_ALIGN_1_BYTE;

View File

@ -45,6 +45,7 @@ config QCOM_HIDMA_MGMT
config QCOM_HIDMA
tristate "Qualcomm Technologies HIDMA Channel support"
depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for the Qualcomm Technologies HIDMA controller.

View File

@ -1272,7 +1272,15 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->powered_remotely = of_property_read_bool(pdev->dev.of_node,
"qcom,powered-remotely");
if (bdev->controlled_remotely || bdev->powered_remotely) {
if (bdev->controlled_remotely || bdev->powered_remotely)
bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
else
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
if (IS_ERR(bdev->bamclk))
return PTR_ERR(bdev->bamclk);
if (!bdev->bamclk) {
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
&bdev->num_channels);
if (ret)
@ -1284,14 +1292,6 @@ static int bam_dma_probe(struct platform_device *pdev)
dev_err(bdev->dev, "num-ees unspecified in dt\n");
}
if (bdev->controlled_remotely || bdev->powered_remotely)
bdev->bamclk = devm_clk_get_optional(bdev->dev, "bam_clk");
else
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
if (IS_ERR(bdev->bamclk))
return PTR_ERR(bdev->bamclk);
ret = clk_prepare_enable(bdev->bamclk);
if (ret) {
dev_err(bdev->dev, "failed to prepare/enable clock\n");

View File

@ -214,7 +214,6 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
spin_lock_init(&mchan->lock);
list_add_tail(&mchan->chan.device_node, &ddev->channels);
dmadev->ddev.chancnt++;
return 0;
}

View File

@ -1169,7 +1169,6 @@ static int sprd_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
sdev->total_chns = chn_count;
sdev->dma_dev.chancnt = chn_count;
INIT_LIST_HEAD(&sdev->dma_dev.channels);
INIT_LIST_HEAD(&sdev->dma_dev.global_node);
sdev->dma_dev.dev = &pdev->dev;

View File

@ -19,14 +19,43 @@
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_dma.h>
#include <linux/amba/bus.h>
#include <linux/regulator/consumer.h>
#include <linux/platform_data/dma-ste-dma40.h>
#include "dmaengine.h"
#include "ste_dma40.h"
#include "ste_dma40_ll.h"
/**
* struct stedma40_platform_data - Configuration struct for the dma device.
*
* @dev_tx: mapping between destination event line and io address
* @dev_rx: mapping between source event line and io address
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
* @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
* which avoids HW bug that exists in some versions of the controller.
* SoftLLI introduces relink overhead that could impact performace for
* certain use cases.
* @num_of_soft_lli_chans: The number of channels that needs to be configured
* to use SoftLLI.
* @use_esram_lcla: flag for mapping the lcla into esram region
* @num_of_memcpy_chans: The number of channels reserved for memcpy.
* @num_of_phy_chans: The number of physical channels implemented in HW.
* 0 means reading the number of channels from DMA HW but this is only valid
* for 'multiple of 4' channels, like 8.
*/
struct stedma40_platform_data {
int disabled_channels[STEDMA40_MAX_PHYS];
int *soft_lli_chans;
int num_of_soft_lli_chans;
bool use_esram_lcla;
int num_of_memcpy_chans;
int num_of_phy_chans;
};
#define D40_NAME "dma40"
#define D40_PHY_CHAN -1
@ -107,7 +136,7 @@ static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
};
/**
* enum 40_command - The different commands and/or statuses.
* enum d40_command - The different commands and/or statuses.
*
* @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
* @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
@ -525,8 +554,6 @@ struct d40_gen_dmac {
* @virtbase: The virtual base address of the DMA's register.
* @rev: silicon revision detected.
* @clk: Pointer to the DMA clock structure.
* @phy_start: Physical memory start of the DMA registers.
* @phy_size: Size of the DMA register map.
* @irq: The IRQ number.
* @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
* transfers).
@ -570,8 +597,6 @@ struct d40_base {
void __iomem *virtbase;
u8 rev:4;
struct clk *clk;
phys_addr_t phy_start;
resource_size_t phy_size;
int irq;
int num_memcpy_chans;
int num_phy_chans;
@ -2268,7 +2293,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
return NULL;
}
bool stedma40_filter(struct dma_chan *chan, void *data)
static bool stedma40_filter(struct dma_chan *chan, void *data)
{
struct stedma40_chan_cfg *info = data;
struct d40_chan *d40c =
@ -2287,7 +2312,6 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
return err == 0;
}
EXPORT_SYMBOL(stedma40_filter);
static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
{
@ -3100,64 +3124,57 @@ static int __init d40_phy_res_init(struct d40_base *base)
return num_phy_chans_avail;
}
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
/* Called from the registered devm action */
static void d40_drop_kmem_cache_action(void *d)
{
struct kmem_cache *desc_slab = d;
kmem_cache_destroy(desc_slab);
}
static int __init d40_hw_detect_init(struct platform_device *pdev,
struct d40_base **retbase)
{
struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct clk *clk;
void __iomem *virtbase;
struct resource *res;
struct d40_base *base;
int num_log_chans;
int num_phy_chans;
int num_memcpy_chans;
int clk_ret = -EINVAL;
int i;
u32 pid;
u32 cid;
u8 rev;
int ret;
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
d40_err(&pdev->dev, "No matching clock found\n");
goto check_prepare_enabled;
}
clk_ret = clk_prepare_enable(clk);
if (clk_ret) {
d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
goto disable_unprepare;
}
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
/* Get IO for DMAC base address */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
if (!res)
goto disable_unprepare;
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O base") == NULL)
goto release_region;
virtbase = ioremap(res->start, resource_size(res));
if (!virtbase)
goto release_region;
virtbase = devm_platform_ioremap_resource_byname(pdev, "base");
if (IS_ERR(virtbase))
return PTR_ERR(virtbase);
/* This is just a regular AMBA PrimeCell ID actually */
for (pid = 0, i = 0; i < 4; i++)
pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
pid |= (readl(virtbase + SZ_4K - 0x20 + 4 * i)
& 255) << (i * 8);
for (cid = 0, i = 0; i < 4; i++)
cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
cid |= (readl(virtbase + SZ_4K - 0x10 + 4 * i)
& 255) << (i * 8);
if (cid != AMBA_CID) {
d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
goto unmap_io;
d40_err(dev, "Unknown hardware! No PrimeCell ID\n");
return -EINVAL;
}
if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
d40_err(dev, "Unknown designer! Got %x wanted %x\n",
AMBA_MANF_BITS(pid),
AMBA_VENDOR_ST);
goto unmap_io;
return -EINVAL;
}
/*
* HW revision:
@ -3170,8 +3187,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
*/
rev = AMBA_REV_BITS(pid);
if (rev < 2) {
d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
goto unmap_io;
d40_err(dev, "hardware revision: %d is not supported", rev);
return -EINVAL;
}
/* The number of physical channels on this HW */
@ -3188,27 +3205,26 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
dev_info(&pdev->dev,
"hardware rev: %d @ %pa with %d physical and %d logical channels\n",
rev, &res->start, num_phy_chans, num_log_chans);
dev_info(dev,
"hardware rev: %d with %d physical and %d logical channels\n",
rev, num_phy_chans, num_log_chans);
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
(num_phy_chans + num_log_chans + num_memcpy_chans) *
sizeof(struct d40_chan), GFP_KERNEL);
base = devm_kzalloc(dev,
ALIGN(sizeof(struct d40_base), 4) +
(num_phy_chans + num_log_chans + num_memcpy_chans) *
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL)
goto unmap_io;
if (!base)
return -ENOMEM;
base->rev = rev;
base->clk = clk;
base->num_memcpy_chans = num_memcpy_chans;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
base->phy_start = res->start;
base->phy_size = resource_size(res);
base->virtbase = virtbase;
base->plat_data = plat_data;
base->dev = &pdev->dev;
base->dev = dev;
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
base->log_chans = &base->phy_chans[num_phy_chans];
@ -3242,76 +3258,57 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
}
base->phy_res = kcalloc(num_phy_chans,
sizeof(*base->phy_res),
GFP_KERNEL);
base->phy_res = devm_kcalloc(dev, num_phy_chans,
sizeof(*base->phy_res),
GFP_KERNEL);
if (!base->phy_res)
goto free_base;
return -ENOMEM;
base->lookup_phy_chans = kcalloc(num_phy_chans,
sizeof(*base->lookup_phy_chans),
GFP_KERNEL);
base->lookup_phy_chans = devm_kcalloc(dev, num_phy_chans,
sizeof(*base->lookup_phy_chans),
GFP_KERNEL);
if (!base->lookup_phy_chans)
goto free_phy_res;
return -ENOMEM;
base->lookup_log_chans = kcalloc(num_log_chans,
sizeof(*base->lookup_log_chans),
GFP_KERNEL);
base->lookup_log_chans = devm_kcalloc(dev, num_log_chans,
sizeof(*base->lookup_log_chans),
GFP_KERNEL);
if (!base->lookup_log_chans)
goto free_phy_chans;
return -ENOMEM;
base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
base->reg_val_backup_chan = devm_kmalloc_array(dev, base->num_phy_chans,
sizeof(d40_backup_regs_chan),
GFP_KERNEL);
if (!base->reg_val_backup_chan)
goto free_log_chans;
return -ENOMEM;
base->lcla_pool.alloc_map = kcalloc(num_phy_chans
base->lcla_pool.alloc_map = devm_kcalloc(dev, num_phy_chans
* D40_LCLA_LINK_PER_EVENT_GRP,
sizeof(*base->lcla_pool.alloc_map),
GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto free_backup_chan;
return -ENOMEM;
base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
base->regs_interrupt = devm_kmalloc_array(dev, base->gen_dmac.il_size,
sizeof(*base->regs_interrupt),
GFP_KERNEL);
if (!base->regs_interrupt)
goto free_map;
return -ENOMEM;
base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (base->desc_slab == NULL)
goto free_regs;
if (!base->desc_slab)
return -ENOMEM;
ret = devm_add_action_or_reset(dev, d40_drop_kmem_cache_action,
base->desc_slab);
if (ret)
return ret;
return base;
free_regs:
kfree(base->regs_interrupt);
free_map:
kfree(base->lcla_pool.alloc_map);
free_backup_chan:
kfree(base->reg_val_backup_chan);
free_log_chans:
kfree(base->lookup_log_chans);
free_phy_chans:
kfree(base->lookup_phy_chans);
free_phy_res:
kfree(base->phy_res);
free_base:
kfree(base);
unmap_io:
iounmap(virtbase);
release_region:
release_mem_region(res->start, resource_size(res));
check_prepare_enabled:
if (!clk_ret)
disable_unprepare:
clk_disable_unprepare(clk);
if (!IS_ERR(clk))
clk_put(clk);
return NULL;
*retbase = base;
return 0;
}
static void __init d40_hw_init(struct d40_base *base)
@ -3451,14 +3448,14 @@ static int __init d40_lcla_allocate(struct d40_base *base)
return ret;
}
static int __init d40_of_probe(struct platform_device *pdev,
static int __init d40_of_probe(struct device *dev,
struct device_node *np)
{
struct stedma40_platform_data *pdata;
int num_phy = 0, num_memcpy = 0, num_disabled = 0;
const __be32 *list;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@ -3471,7 +3468,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
num_memcpy /= sizeof(*list);
if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
d40_err(&pdev->dev,
d40_err(dev,
"Invalid number of memcpy channels specified (%d)\n",
num_memcpy);
return -EINVAL;
@ -3486,7 +3483,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
num_disabled /= sizeof(*list);
if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
d40_err(&pdev->dev,
d40_err(dev,
"Invalid number of disabled channels specified (%d)\n",
num_disabled);
return -EINVAL;
@ -3497,35 +3494,30 @@ static int __init d40_of_probe(struct platform_device *pdev,
num_disabled);
pdata->disabled_channels[num_disabled] = -1;
pdev->dev.platform_data = pdata;
dev->platform_data = pdata;
return 0;
}
static int __init d40_probe(struct platform_device *pdev)
{
struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
int ret = -ENOENT;
struct device_node *np_lcpa;
struct d40_base *base;
struct resource *res;
struct resource res_lcpa;
int num_reserved_chans;
u32 val;
int ret;
if (!plat_data) {
if (np) {
if (d40_of_probe(pdev, np)) {
ret = -ENOMEM;
goto report_failure;
}
} else {
d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
goto report_failure;
}
if (d40_of_probe(dev, np)) {
ret = -ENOMEM;
goto report_failure;
}
base = d40_hw_detect_init(pdev);
if (!base)
ret = d40_hw_detect_init(pdev, &base);
if (ret)
goto report_failure;
num_reserved_chans = d40_phy_res_init(base);
@ -3535,37 +3527,38 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->interrupt_lock);
spin_lock_init(&base->execmd_lock);
/* Get IO for logical channel parameter address */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
if (!res) {
ret = -ENOENT;
d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
goto destroy_cache;
/* Get IO for logical channel parameter address (LCPA) */
np_lcpa = of_parse_phandle(np, "sram", 0);
if (!np_lcpa) {
dev_err(dev, "no LCPA SRAM node\n");
ret = -EINVAL;
goto report_failure;
}
base->lcpa_size = resource_size(res);
base->phy_lcpa = res->start;
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
goto destroy_cache;
/* This is no device so read the address directly from the node */
ret = of_address_to_resource(np_lcpa, 0, &res_lcpa);
if (ret) {
dev_err(dev, "no LCPA SRAM resource\n");
goto report_failure;
}
base->lcpa_size = resource_size(&res_lcpa);
base->phy_lcpa = res_lcpa.start;
dev_info(dev, "found LCPA SRAM at %pad, size %pa\n",
&base->phy_lcpa, &base->lcpa_size);
/* We make use of ESRAM memory for this. */
val = readl(base->virtbase + D40_DREG_LCPA);
if (res->start != val && val != 0) {
dev_warn(&pdev->dev,
"[%s] Mismatch LCPA dma 0x%x, def %pa\n",
__func__, val, &res->start);
if (base->phy_lcpa != val && val != 0) {
dev_warn(dev,
"[%s] Mismatch LCPA dma 0x%x, def %08x\n",
__func__, val, (u32)base->phy_lcpa);
} else
writel(res->start, base->virtbase + D40_DREG_LCPA);
writel(base->phy_lcpa, base->virtbase + D40_DREG_LCPA);
base->lcpa_base = ioremap(res->start, resource_size(res));
base->lcpa_base = devm_ioremap(dev, base->phy_lcpa, base->lcpa_size);
if (!base->lcpa_base) {
ret = -ENOMEM;
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto destroy_cache;
d40_err(dev, "Failed to ioremap LCPA region\n");
goto report_failure;
}
/* If lcla has to be located in ESRAM we don't need to allocate */
if (base->plat_data->use_esram_lcla) {
@ -3573,23 +3566,23 @@ static int __init d40_probe(struct platform_device *pdev)
"lcla_esram");
if (!res) {
ret = -ENOENT;
d40_err(&pdev->dev,
d40_err(dev,
"No \"lcla_esram\" memory resource\n");
goto destroy_cache;
goto report_failure;
}
base->lcla_pool.base = ioremap(res->start,
resource_size(res));
base->lcla_pool.base = devm_ioremap(dev, res->start,
resource_size(res));
if (!base->lcla_pool.base) {
ret = -ENOMEM;
d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
goto destroy_cache;
d40_err(dev, "Failed to ioremap LCLA region\n");
goto report_failure;
}
writel(res->start, base->virtbase + D40_DREG_LCLA);
} else {
ret = d40_lcla_allocate(base);
if (ret) {
d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
d40_err(dev, "Failed to allocate LCLA area\n");
goto destroy_cache;
}
}
@ -3600,7 +3593,7 @@ static int __init d40_probe(struct platform_device *pdev)
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
d40_err(&pdev->dev, "No IRQ defined\n");
d40_err(dev, "No IRQ defined\n");
goto destroy_cache;
}
@ -3608,7 +3601,7 @@ static int __init d40_probe(struct platform_device *pdev)
base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
if (IS_ERR(base->lcpa_regulator)) {
d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
d40_err(dev, "Failed to get lcpa_regulator\n");
ret = PTR_ERR(base->lcpa_regulator);
base->lcpa_regulator = NULL;
goto destroy_cache;
@ -3616,7 +3609,7 @@ static int __init d40_probe(struct platform_device *pdev)
ret = regulator_enable(base->lcpa_regulator);
if (ret) {
d40_err(&pdev->dev,
d40_err(dev,
"Failed to enable lcpa_regulator\n");
regulator_put(base->lcpa_regulator);
base->lcpa_regulator = NULL;
@ -3639,31 +3632,23 @@ static int __init d40_probe(struct platform_device *pdev)
ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
if (ret) {
d40_err(&pdev->dev, "Failed to set dma max seg size\n");
d40_err(dev, "Failed to set dma max seg size\n");
goto destroy_cache;
}
d40_hw_init(base);
if (np) {
ret = of_dma_controller_register(np, d40_xlate, NULL);
if (ret)
dev_err(&pdev->dev,
"could not register of_dma_controller\n");
ret = of_dma_controller_register(np, d40_xlate, NULL);
if (ret) {
dev_err(dev,
"could not register of_dma_controller\n");
goto destroy_cache;
}
dev_info(base->dev, "initialized\n");
return 0;
destroy_cache:
kmem_cache_destroy(base->desc_slab);
if (base->virtbase)
iounmap(base->virtbase);
if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
iounmap(base->lcla_pool.base);
base->lcla_pool.base = NULL;
}
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
@ -3675,32 +3660,13 @@ static int __init d40_probe(struct platform_device *pdev)
kfree(base->lcla_pool.base_unaligned);
if (base->lcpa_base)
iounmap(base->lcpa_base);
if (base->phy_lcpa)
release_mem_region(base->phy_lcpa,
base->lcpa_size);
if (base->phy_start)
release_mem_region(base->phy_start,
base->phy_size);
if (base->clk) {
clk_disable_unprepare(base->clk);
clk_put(base->clk);
}
if (base->lcpa_regulator) {
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
kfree(base->phy_res);
kfree(base);
report_failure:
d40_err(&pdev->dev, "probe failed\n");
d40_err(dev, "probe failed\n");
return ret;
}

View File

@ -1,19 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2007-2010
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
*/
#ifndef STE_DMA40_H
#define STE_DMA40_H
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
/*
* Maxium size for a single dma descriptor
* Size is limited to 16 bits.
@ -118,92 +107,4 @@ struct stedma40_chan_cfg {
int phy_channel;
};
/**
* struct stedma40_platform_data - Configuration struct for the dma device.
*
* @dev_tx: mapping between destination event line and io address
* @dev_rx: mapping between source event line and io address
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
* @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
* which avoids HW bug that exists in some versions of the controller.
* SoftLLI introduces relink overhead that could impact performace for
* certain use cases.
* @num_of_soft_lli_chans: The number of channels that needs to be configured
* to use SoftLLI.
* @use_esram_lcla: flag for mapping the lcla into esram region
* @num_of_memcpy_chans: The number of channels reserved for memcpy.
* @num_of_phy_chans: The number of physical channels implemented in HW.
* 0 means reading the number of channels from DMA HW but this is only valid
* for 'multiple of 4' channels, like 8.
*/
struct stedma40_platform_data {
int disabled_channels[STEDMA40_MAX_PHYS];
int *soft_lli_chans;
int num_of_soft_lli_chans;
bool use_esram_lcla;
int num_of_memcpy_chans;
int num_of_phy_chans;
};
#ifdef CONFIG_STE_DMA40
/**
* stedma40_filter() - Provides stedma40_chan_cfg to the
* ste_dma40 dma driver via the dmaengine framework.
* does some checking of what's provided.
*
* Never directly called by client. It used by dmaengine.
* @chan: dmaengine handle.
* @data: Must be of type: struct stedma40_chan_cfg and is
* the configuration of the framework.
*
*
*/
bool stedma40_filter(struct dma_chan *chan, void *data);
/**
* stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
* (=device)
*
* @chan: dmaengine handle
* @addr: source or destination physicall address.
* @size: bytes to transfer
* @direction: direction of transfer
* @flags: is actually enum dma_ctrl_flags. See dmaengine.h
*/
static inline struct
dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
dma_addr_t addr,
unsigned int size,
enum dma_transfer_direction direction,
unsigned long flags)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg.dma_address = addr;
sg.length = size;
return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
}
#else
static inline bool stedma40_filter(struct dma_chan *chan, void *data)
{
return false;
}
static inline struct
dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
dma_addr_t addr,
unsigned int size,
enum dma_transfer_direction direction,
unsigned long flags)
{
return NULL;
}
#endif
#endif
#endif /* STE_DMA40_H */

View File

@ -6,8 +6,9 @@
*/
#include <linux/kernel.h>
#include <linux/platform_data/dma-ste-dma40.h>
#include <linux/dmaengine.h>
#include "ste_dma40.h"
#include "ste_dma40_ll.h"
static u8 d40_width_to_bits(enum dma_slave_buswidth width)

View File

@ -99,6 +99,8 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x461d),
PSIL_PDMA_XY_PKT(0x461e),
PSIL_PDMA_XY_PKT(0x461f),
/* MAIN_CPSW2G */
PSIL_ETHERNET(0x4640),
/* PDMA_USART_G0 - UART0-1 */
PSIL_PDMA_XY_PKT(0x4700),
PSIL_PDMA_XY_PKT(0x4701),
@ -161,6 +163,15 @@ static struct psil_ep j721s2_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
/* MAIN_CPSW2G */
PSIL_ETHERNET(0xc640),
PSIL_ETHERNET(0xc641),
PSIL_ETHERNET(0xc642),
PSIL_ETHERNET(0xc643),
PSIL_ETHERNET(0xc644),
PSIL_ETHERNET(0xc645),
PSIL_ETHERNET(0xc646),
PSIL_ETHERNET(0xc647),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),

View File

@ -4308,6 +4308,15 @@ static struct udma_soc_data am62a_dmss_csi_soc_data = {
},
};
static struct udma_soc_data j721s2_bcdma_csi_soc_data = {
.oes = {
.bcdma_tchan_data = 0x800,
.bcdma_tchan_ring = 0xa00,
.bcdma_rchan_data = 0xe00,
.bcdma_rchan_ring = 0x1000,
},
};
static struct udma_match_data am62a_bcdma_csirx_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x3100,
@ -4346,6 +4355,18 @@ static struct udma_match_data am64_pktdma_data = {
},
};
static struct udma_match_data j721s2_bcdma_csi_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x2000,
.enable_memcpy_support = false,
.burst_size = {
TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
0, /* No H Channels */
0, /* No UH Channels */
},
.soc_data = &j721s2_bcdma_csi_soc_data,
};
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@ -4373,6 +4394,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,am62a-dmss-bcdma-csirx",
.data = &am62a_bcdma_csirx_data,
},
{
.compatible = "ti,j721s2-dmss-bcdma-csi",
.data = &j721s2_bcdma_csi_data,
},
{ /* Sentinel */ },
};

View File

@ -834,7 +834,7 @@ static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
return platform_get_irq_byname_optional(pdev, name);
}
static struct dw_edma_core_ops dw_pcie_edma_ops = {
static struct dw_edma_plat_ops dw_pcie_edma_ops = {
.irq_vector = dw_pcie_edma_irq_vector,
};

View File

@ -40,7 +40,7 @@ struct dw_edma_region {
* iATU windows. That will be done by the controller
* automatically.
*/
struct dw_edma_core_ops {
struct dw_edma_plat_ops {
int (*irq_vector)(struct device *dev, unsigned int nr);
u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
};
@ -48,7 +48,8 @@ struct dw_edma_core_ops {
enum dw_edma_map_format {
EDMA_MF_EDMA_LEGACY = 0x0,
EDMA_MF_EDMA_UNROLL = 0x1,
EDMA_MF_HDMA_COMPAT = 0x5
EDMA_MF_HDMA_COMPAT = 0x5,
EDMA_MF_HDMA_NATIVE = 0x7,
};
/**
@ -80,7 +81,7 @@ enum dw_edma_chip_flags {
struct dw_edma_chip {
struct device *dev;
int nr_irqs;
const struct dw_edma_core_ops *ops;
const struct dw_edma_plat_ops *ops;
u32 flags;
void __iomem *reg_base;