dmaengine updates for v6.9

New support:
   - Allwinner H616 dma support
   - Renesas r8a779h0 dma controller support
   - TI CSI2RX dma support
 
  Updates:
   - Freescale edma driver updates for TCD64csupport for i.MX95
   - constify of pointers and args
   - Yaml conversion for MediaTek High-Speed controller binding
   - TI k3 udma support for TX/RX DMA channels for thread IDs
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmX0MpIACgkQfBQHDyUj
 g0eMQQ//d9YF7tSxm8yNY+q4kilDm2kZCV/5DZzKbCxAZ7kQLGJF7byuDeGRWd1t
 mmRgVVebLUcNEH2U9Ato+db2yyNl6lTqs3ghq8dtMBCt0JQwVU2aOm/bZDCHx5lw
 6j7YLYk6r0qLEI7oTX5VHg7KFXBkZvRK8LReVPW4GCKH0ERbvRC5ZjfV0pprj1k6
 mV6RvNDw63EX9s5vuNzfWGHuziX6XoOUas7rNo3hwOUGp5tJcKrJVIQtRynwHrSB
 C+dveuqybn+a59gJSekeGqsDebx/ZH1g+r1KWzxsPHKW/Xw+fMneNj2iTyPj2L86
 qGxEcedLon2LE4PvrALighBc2itOCI7NnXQm2q83lJUby7iYds1awYeHUqmOIU+N
 RKtcl7hfMuX2F71vMTv7QZCOR8eBIkFKkqc+iU4BD2mMm9tv6AAG5UNnxFDencQ7
 1Rytw3u5N93l9lw6ISUb8WvVbumYn9EQ3A9Nr71NFpVG50e4pRhwDN2VWaKkBmM5
 NbMUjzxfKdW68eRHAnWr9hGHHWzeep3e37x+DAgmPEtu99qOxnBv2L/xx1tiPrlV
 nv6VKNIxJOaUgmwTfXyrAHRwyOKqd4yXVRB0RY7npUPB3GELCKH+pcvom7zx4PAF
 h/yyNpF1VfXgevgkqNoZlX+sNNdurhhoDpiJxb2tZah9uYr8WT0=
 =DZRZ
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "New hardware support:
   - Allwinner H616 dma support
   - Renesas r8a779h0 dma controller support
   - TI CSI2RX dma support

  Updates:
   - Freescale edma driver updates for TCD64csupport for i.MX95
   - constify of pointers and args
   - Yaml conversion for MediaTek High-Speed controller binding
   - TI k3 udma support for TX/RX DMA channels for thread IDs:

* tag 'dmaengine-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (25 commits)
  dmaengine: of: constify of_phandle_args in of_dma_find_controller()
  dmaengine: pl08x: constify pointer to char in filter function
  MAINTAINERS: change in AMD ptdma maintainer
  MAINTAINERS: adjust file entry in MEDIATEK DMA DRIVER
  dmaengine: idxd: constify the struct device_type usage
  dt-bindings: renesas,rcar-dmac: Add r8a779h0 support
  dt-bindings: dma: convert MediaTek High-Speed controller to the json-schema
  dmaengine: idxd: make dsa_bus_type const
  dmaengine: fsl-edma: integrate TCD64 support for i.MX95
  dt-bindings: fsl-dma: fsl-edma: add fsl,imx95-edma5 compatible string
  dmaengine: mcf-edma: utilize edma_write_tcdreg() macro for TCD Access
  dmaengine: fsl-edma: add address for channel mux register in fsl_edma_chan
  dmaengine: fsl-edma: fix spare build warning
  dmaengine: fsl-edma: involve help macro fsl_edma_set(get)_tcd()
  dt-bindings: mmp-dma: convert to YAML
  dmaengine: ti: k3-psil-j721s2: Add entry for CSI2RX
  dmaengine: ti: k3-udma-glue: Add function to request RX chan for thread ID
  dmaengine: ti: k3-udma-glue: Add function to request TX chan for thread ID
  dmaengine: ti: k3-udma-glue: Update name for remote RX channel device
  dmaengine: ti: k3-udma-glue: Add function to parse channel by ID
  ...
This commit is contained in:
Linus Torvalds 2024-03-15 12:25:13 -07:00
commit 2b3a4192dd
25 changed files with 705 additions and 301 deletions

View File

@ -28,6 +28,9 @@ properties:
- items:
- const: allwinner,sun8i-r40-dma
- const: allwinner,sun50i-a64-dma
- items:
- const: allwinner,sun50i-h616-dma
- const: allwinner,sun50i-a100-dma
reg:
maxItems: 1
@ -59,10 +62,11 @@ required:
if:
properties:
compatible:
enum:
- allwinner,sun20i-d1-dma
- allwinner,sun50i-a100-dma
- allwinner,sun50i-h6-dma
contains:
enum:
- allwinner,sun20i-d1-dma
- allwinner,sun50i-a100-dma
- allwinner,sun50i-h6-dma
then:
properties:

View File

@ -25,6 +25,7 @@ properties:
- fsl,imx8qm-edma
- fsl,imx93-edma3
- fsl,imx93-edma4
- fsl,imx95-edma5
- items:
- const: fsl,ls1028a-edma
- const: fsl,vf610-edma
@ -83,6 +84,7 @@ allOf:
- fsl,imx8qm-edma
- fsl,imx93-edma3
- fsl,imx93-edma4
- fsl,imx95-edma5
then:
properties:
"#dma-cells":

View File

@ -0,0 +1,72 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/marvell,mmp-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Marvell MMP DMA controller
maintainers:
- Duje Mihanović <duje.mihanovic@skole.hr>
description:
Marvell MMP SoCs may have two types of DMA controllers, peripheral and audio.
properties:
compatible:
enum:
- marvell,pdma-1.0
- marvell,adma-1.0
- marvell,pxa910-squ
reg:
maxItems: 1
interrupts:
description:
Interrupt lines for the controller, may be shared or one per DMA channel
minItems: 1
asram:
description:
A phandle to the SRAM pool
$ref: /schemas/types.yaml#/definitions/phandle
'#dma-channels':
deprecated: true
'#dma-requests':
deprecated: true
required:
- compatible
- reg
- interrupts
- '#dma-cells'
allOf:
- $ref: dma-controller.yaml#
- if:
properties:
compatible:
contains:
enum:
- marvell,pdma-1.0
then:
properties:
asram: false
else:
required:
- asram
unevaluatedProperties: false
examples:
- |
dma-controller@d4000000 {
compatible = "marvell,pdma-1.0";
reg = <0xd4000000 0x10000>;
interrupts = <47>;
#dma-cells = <2>;
dma-channels = <16>;
};

View File

@ -0,0 +1,63 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/mediatek,mt7622-hsdma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek High-Speed DMA Controller
maintainers:
- Sean Wang <sean.wang@mediatek.com>
allOf:
- $ref: dma-controller.yaml#
properties:
compatible:
enum:
- mediatek,mt7622-hsdma
- mediatek,mt7623-hsdma
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
clock-names:
const: hsdma
power-domains:
maxItems: 1
"#dma-cells":
description: Channel number
const: 1
required:
- reg
- interrupts
- clocks
- clock-names
- power-domains
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/mt2701-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/mt2701-power.h>
dma-controller@1b007000 {
compatible = "mediatek,mt7623-hsdma";
reg = <0x1b007000 0x1000>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_LOW>;
clocks = <&ethsys CLK_ETHSYS_HSDMA>;
clock-names = "hsdma";
power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
#dma-cells = <1>;
};

View File

@ -1,81 +0,0 @@
* MARVELL MMP DMA controller
Marvell Peripheral DMA Controller
Used platforms: pxa688, pxa910, pxa3xx, etc
Required properties:
- compatible: Should be "marvell,pdma-1.0"
- reg: Should contain DMA registers location and length.
- interrupts: Either contain all of the per-channel DMA interrupts
or one irq for pdma device
Optional properties:
- dma-channels: Number of DMA channels supported by the controller (defaults
to 32 when not specified)
- #dma-channels: deprecated
- dma-requests: Number of DMA requestor lines supported by the controller
(defaults to 32 when not specified)
- #dma-requests: deprecated
"marvell,pdma-1.0"
Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688.
Examples:
/*
* Each channel has specific irq
* ICU parse out irq channel from ICU register,
* while DMA controller may not able to distinguish the irq channel
* Using this method, interrupt-parent is required as demuxer
* For example, pxa688 icu register 0x128, bit 0~15 is PDMA channel irq,
* 18~21 is ADMA irq
*/
pdma: dma-controller@d4000000 {
compatible = "marvell,pdma-1.0";
reg = <0xd4000000 0x10000>;
interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>;
interrupt-parent = <&intcmux32>;
dma-channels = <16>;
};
/*
* One irq for all channels
* Dmaengine driver (DMA controller) distinguish irq channel via
* parsing internal register
*/
pdma: dma-controller@d4000000 {
compatible = "marvell,pdma-1.0";
reg = <0xd4000000 0x10000>;
interrupts = <47>;
dma-channels = <16>;
};
Marvell Two Channel DMA Controller used specifically for audio
Used platforms: pxa688, pxa910
Required properties:
- compatible: Should be "marvell,adma-1.0" or "marvell,pxa910-squ"
- reg: Should contain DMA registers location and length.
- interrupts: Either contain all of the per-channel DMA interrupts
or one irq for dma device
"marvell,adma-1.0" used on pxa688
"marvell,pxa910-squ" used on pxa910
Examples:
/* each channel has specific irq */
adma0: dma-controller@d42a0800 {
compatible = "marvell,adma-1.0";
reg = <0xd42a0800 0x100>;
interrupts = <18 19>;
interrupt-parent = <&intcmux32>;
};
/* One irq for all channels */
squ: dma-controller@d42a0800 {
compatible = "marvell,pxa910-squ";
reg = <0xd42a0800 0x100>;
interrupts = <46>;
};

View File

@ -1,33 +0,0 @@
MediaTek High-Speed DMA Controller
==================================
This device follows the generic DMA bindings defined in dma/dma.txt.
Required properties:
- compatible: Must be one of
"mediatek,mt7622-hsdma": for MT7622 SoC
"mediatek,mt7623-hsdma": for MT7623 SoC
- reg: Should contain the register's base address and length.
- interrupts: Should contain a reference to the interrupt used by this
device.
- clocks: Should be the clock specifiers corresponding to the entry in
clock-names property.
- clock-names: Should contain "hsdma" entries.
- power-domains: Phandle to the power domain that the device is part of
- #dma-cells: The length of the DMA specifier, must be <1>. This one cell
in dmas property of a client device represents the channel
number.
Example:
hsdma: dma-controller@1b007000 {
compatible = "mediatek,mt7623-hsdma";
reg = <0 0x1b007000 0 0x1000>;
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_LOW>;
clocks = <&ethsys CLK_ETHSYS_HSDMA>;
clock-names = "hsdma";
power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
#dma-cells = <1>;
};
DMA clients must use the format described in dma/dma.txt file.

View File

@ -46,6 +46,7 @@ properties:
- renesas,dmac-r8a779a0 # R-Car V3U
- renesas,dmac-r8a779f0 # R-Car S4-8
- renesas,dmac-r8a779g0 # R-Car V4H
- renesas,dmac-r8a779h0 # R-Car V4M
- const: renesas,rcar-gen4-dmac # R-Car Gen4
reg: true

View File

@ -1064,7 +1064,7 @@ F: include/linux/amd-pstate.h
F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com>
M: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/ptdma/
@ -13719,7 +13719,7 @@ L: dmaengine@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/dma/mtk-*
F: Documentation/devicetree/bindings/dma/mediatek,*
F: drivers/dma/mediatek/
MEDIATEK ETHERNET DRIVER

View File

@ -643,16 +643,16 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
tristate "NVIDIA Tegra210 ADMA support"
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
depends on (ARCH_TEGRA || COMPILE_TEST)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support for the NVIDIA Tegra210 ADMA controller driver. The
DMA controller has multiple DMA channels and is used to service
various audio clients in the Tegra210 audio processing engine
(APE). This DMA controller transfers data from memory to
peripheral and vice versa. It does not support memory to
memory data transfer.
Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA
controller driver. The DMA controller has multiple DMA channels
and is used to service various audio clients in the Tegra210
audio processing engine (APE). This DMA controller transfers
data from memory to peripheral and vice versa. It does not
support memory to memory data transfer.
config TIMB_DMA
tristate "Timberdale FPGA DMA support"

View File

@ -2239,7 +2239,7 @@ static int pl08x_resume(struct dma_chan *chan)
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
struct pl08x_dma_chan *plchan;
char *name = chan_id;
const char *name = chan_id;
/* Reject channels for devices not bound to this driver */
if (chan->device->dev->driver != &pl08x_amba_driver.drv)

View File

@ -90,13 +90,8 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
bcom_sram->rh = rh_create(4);
/* Attach the free zones */
#if 0
/* Currently disabled ... for future use only */
reg_addr_p = of_get_property(sram_node, "available", &psize);
#else
regaddr_p = NULL;
psize = 0;
#endif
if (!regaddr_p || !psize) {
/* Attach the whole zone */

View File

@ -97,8 +97,8 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
* ch_mux: With the exception of 0, attempts to write a value
* already in use will be forced to 0.
*/
if (!edma_readl_chreg(fsl_chan, ch_mux))
edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
}
val = edma_readl_chreg(fsl_chan, ch_csr);
@ -134,7 +134,7 @@ static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
flags = fsl_edma_drvflags(fsl_chan);
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
edma_writel_chreg(fsl_chan, 0, ch_mux);
edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
val &= ~EDMA_V3_CH_CSR_ERQ;
edma_writel_chreg(fsl_chan, val, ch_csr);
@ -351,39 +351,45 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
enum dma_transfer_direction dir = edesc->dirn;
dma_addr_t cur_addr, dma_addr;
dma_addr_t cur_addr, dma_addr, old_addr;
size_t len, size;
u32 nbytes = 0;
int i;
/* calculate the total size in this desc */
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
}
if (!in_progress)
return len;
if (dir == DMA_MEM_TO_DEV)
cur_addr = edma_read_tcdreg(fsl_chan, saddr);
else
cur_addr = edma_read_tcdreg(fsl_chan, daddr);
/* 64bit read is not atomic, need read retry when high 32bit changed */
do {
if (dir == DMA_MEM_TO_DEV) {
old_addr = edma_read_tcdreg(fsl_chan, saddr);
cur_addr = edma_read_tcdreg(fsl_chan, saddr);
} else {
old_addr = edma_read_tcdreg(fsl_chan, daddr);
cur_addr = edma_read_tcdreg(fsl_chan, daddr);
}
} while (upper_32_bits(cur_addr) != upper_32_bits(old_addr));
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes);
if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter);
if (dir == DMA_MEM_TO_DEV)
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr);
else
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr);
len -= size;
if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
@ -426,8 +432,7 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_hw_tcd *tcd)
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd)
{
u16 csr = 0;
@ -439,26 +444,26 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
*/
edma_write_tcdreg(fsl_chan, 0, csr);
edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
edma_cp_tcd_to_reg(fsl_chan, tcd, saddr);
edma_cp_tcd_to_reg(fsl_chan, tcd, daddr);
edma_write_tcdreg(fsl_chan, tcd->attr, attr);
edma_write_tcdreg(fsl_chan, tcd->soff, soff);
edma_cp_tcd_to_reg(fsl_chan, tcd, attr);
edma_cp_tcd_to_reg(fsl_chan, tcd, soff);
edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
edma_write_tcdreg(fsl_chan, tcd->slast, slast);
edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes);
edma_cp_tcd_to_reg(fsl_chan, tcd, slast);
edma_write_tcdreg(fsl_chan, tcd->citer, citer);
edma_write_tcdreg(fsl_chan, tcd->biter, biter);
edma_write_tcdreg(fsl_chan, tcd->doff, doff);
edma_cp_tcd_to_reg(fsl_chan, tcd, citer);
edma_cp_tcd_to_reg(fsl_chan, tcd, biter);
edma_cp_tcd_to_reg(fsl_chan, tcd, doff);
edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga);
csr = le16_to_cpu(tcd->csr);
csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr);
if (fsl_chan->is_sw) {
csr |= EDMA_TCD_CSR_START;
tcd->csr = cpu_to_le16(csr);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
}
/*
@ -473,14 +478,14 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
edma_write_tcdreg(fsl_chan, tcd->csr, csr);
edma_cp_tcd_to_reg(fsl_chan, tcd, csr);
}
static inline
void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
struct dma_slave_config *cfg = &fsl_chan->cfg;
@ -493,12 +498,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
* So we put the value in little endian in memory, waiting
* for fsl_edma_set_tcd_regs doing the swap.
*/
tcd->saddr = cpu_to_le32(src);
tcd->daddr = cpu_to_le32(dst);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr);
tcd->attr = cpu_to_le16(attr);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr);
tcd->soff = cpu_to_le16(soff);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
if (fsl_chan->is_multi_fifo) {
/* set mloff to support multiple fifo */
@ -515,15 +520,16 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
}
}
tcd->nbytes = cpu_to_le32(nbytes);
tcd->slast = cpu_to_le32(slast);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast);
tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
tcd->doff = cpu_to_le16(doff);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff);
tcd->dlast_sga = cpu_to_le32(dlast_sga);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter);
tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
if (major_int)
csr |= EDMA_TCD_CSR_INT_MAJOR;
@ -539,7 +545,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (fsl_chan->is_sw)
csr |= EDMA_TCD_CSR_START;
tcd->csr = cpu_to_le16(csr);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@ -580,8 +586,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dma_addr_t dma_buf_next;
bool major_int = true;
int sg_len, i;
u32 src_addr, dst_addr, last_sg, nbytes;
dma_addr_t src_addr, dst_addr, last_sg;
u16 soff, doff, iter;
u32 nbytes;
if (!is_slave_direction(direction))
return NULL;
@ -653,8 +660,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
struct scatterlist *sg;
u32 src_addr, dst_addr, last_sg, nbytes;
dma_addr_t src_addr, dst_addr, last_sg;
u16 soff, doff, iter;
u32 nbytes;
int i;
if (!is_slave_direction(direction))
@ -803,7 +811,8 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
sizeof(struct fsl_edma_hw_tcd),
fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
return 0;
}

View File

@ -88,6 +88,20 @@ struct fsl_edma_hw_tcd {
__le16 biter;
};
struct fsl_edma_hw_tcd64 {
__le64 saddr;
__le16 soff;
__le16 attr;
__le32 nbytes;
__le64 slast;
__le64 daddr;
__le64 dlast_sga;
__le16 doff;
__le16 citer;
__le16 csr;
__le16 biter;
} __packed;
struct fsl_edma3_ch_reg {
__le32 ch_csr;
__le32 ch_es;
@ -97,7 +111,10 @@ struct fsl_edma3_ch_reg {
__le32 ch_mux;
__le32 ch_mattr; /* edma4, reserved for edma3 */
__le32 ch_reserved;
struct fsl_edma_hw_tcd tcd;
union {
struct fsl_edma_hw_tcd tcd;
struct fsl_edma_hw_tcd64 tcd64;
};
} __packed;
/*
@ -126,7 +143,7 @@ struct edma_regs {
struct fsl_edma_sw_tcd {
dma_addr_t ptcd;
struct fsl_edma_hw_tcd *vtcd;
void *vtcd;
};
struct fsl_edma_chan {
@ -145,7 +162,8 @@ struct fsl_edma_chan {
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[32];
struct fsl_edma_hw_tcd __iomem *tcd;
void __iomem *tcd;
void __iomem *mux_addr;
u32 real_count;
struct work_struct issue_worker;
struct platform_device *pdev;
@ -188,6 +206,7 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13)
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
#define FSL_EDMA_DRV_TCD64 BIT(15)
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
@ -207,6 +226,8 @@ struct fsl_edma_drvdata {
u32 chreg_off;
u32 chreg_space_sz;
u32 flags;
u32 mux_off; /* channel mux register offset */
u32 mux_skip; /* how much skip for each channel */
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
@ -229,23 +250,108 @@ struct fsl_edma_engine {
struct fsl_edma_chan chans[] __counted_by(n_chans);
};
#define edma_read_tcdreg(chan, __name) \
(sizeof(chan->tcd->__name) == sizeof(u32) ? \
edma_readl(chan->edma, &chan->tcd->__name) : \
edma_readw(chan->edma, &chan->tcd->__name))
#define edma_read_tcdreg_c(chan, _tcd, __name) \
(sizeof((_tcd)->__name) == sizeof(u64) ? \
edma_readq(chan->edma, &(_tcd)->__name) : \
((sizeof((_tcd)->__name) == sizeof(u32)) ? \
edma_readl(chan->edma, &(_tcd)->__name) : \
edma_readw(chan->edma, &(_tcd)->__name) \
))
#define edma_write_tcdreg(chan, val, __name) \
(sizeof(chan->tcd->__name) == sizeof(u32) ? \
edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
#define edma_read_tcdreg(chan, __name) \
((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \
edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \
)
#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
do { \
switch (sizeof(_tcd->__name)) { \
case sizeof(u64): \
edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \
break; \
case sizeof(u32): \
edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \
break; \
case sizeof(u16): \
edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \
break; \
case sizeof(u8): \
edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \
break; \
} \
} while (0)
#define edma_write_tcdreg(chan, val, __name) \
do { \
struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
\
if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
edma_write_tcdreg_c(chan, tcd64_r, val, __name); \
else \
edma_write_tcdreg_c(chan, tcd_r, val, __name); \
} while (0)
#define edma_cp_tcd_to_reg(chan, __tcd, __name) \
do { \
struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \
struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \
struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \
struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \
\
if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \
edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \
else \
edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \
} while (0)
#define edma_readl_chreg(chan, __name) \
edma_readl(chan->edma, \
(void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
(void __iomem *)&(container_of(((__force void *)chan->tcd),\
struct fsl_edma3_ch_reg, tcd)->__name))
#define edma_writel_chreg(chan, val, __name) \
edma_writel(chan->edma, val, \
(void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
(void __iomem *)&(container_of(((__force void *)chan->tcd),\
struct fsl_edma3_ch_reg, tcd)->__name))
#define fsl_edma_get_tcd(_chan, _tcd, _field) \
(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
(((struct fsl_edma_hw_tcd *)_tcd)->_field))
#define fsl_edma_le_to_cpu(x) \
(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \
(sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \
le16_to_cpu((__force __le16)(x))))
#define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \
fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
#define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
do { \
switch (sizeof((_tcd)->_field)) { \
case sizeof(u64): \
*(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \
break; \
case sizeof(u32): \
*(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \
break; \
case sizeof(u16): \
*(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \
break; \
} \
} while (0)
#define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
do { \
if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \
fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \
else \
fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
} while (0)
/*
* R/W functions for big- or little-endian registers:
@ -253,6 +359,21 @@ struct fsl_edma_engine {
* For the big-endian IP module, the offset for 8-bit or 16-bit registers
* should also be swapped opposite to that in little-endian IP.
*/
static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr)
{
u64 l, h;
if (edma->big_endian) {
l = ioread32be(addr);
h = ioread32be(addr + 4);
} else {
l = ioread32(addr);
h = ioread32(addr + 4);
}
return (h << 32) | l;
}
static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
if (edma->big_endian)
@ -298,6 +419,18 @@ static inline void edma_writel(struct fsl_edma_engine *edma,
iowrite32(val, addr);
}
static inline void edma_writeq(struct fsl_edma_engine *edma,
u64 val, void __iomem *addr)
{
if (edma->big_endian) {
iowrite32be(val & 0xFFFFFFFF, addr);
iowrite32be(val >> 32, addr + 4);
} else {
iowrite32(val & 0xFFFFFFFF, addr);
iowrite32(val >> 32, addr + 4);
}
}
static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
{
return container_of(chan, struct fsl_edma_chan, vchan.chan);

View File

@ -360,6 +360,18 @@ static struct fsl_edma_drvdata imx93_data4 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
.mux_skip = 0x8000,
.setup_irq = fsl_edma3_irq_init,
};
static struct fsl_edma_drvdata imx95_data5 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
FSL_EDMA_DRV_TCD64,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x200,
.mux_skip = sizeof(u32),
.setup_irq = fsl_edma3_irq_init,
};
@ -371,6 +383,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
@ -511,6 +524,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
return ret;
}
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
for (i = 0; i < fsl_edma->n_chans; i++) {
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
@ -533,11 +549,12 @@ static int fsl_edma_probe(struct platform_device *pdev)
offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
fsl_chan->tcd = fsl_edma->membase
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
edma_write_tcdreg(fsl_chan, 0, csr);
edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}

View File

@ -72,7 +72,7 @@ static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env
return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0);
}
struct bus_type dsa_bus_type = {
const struct bus_type dsa_bus_type = {
.name = "dsa",
.match = idxd_config_bus_match,
.probe = idxd_config_bus_probe,

View File

@ -152,7 +152,7 @@ static void idxd_file_dev_release(struct device *dev)
mutex_unlock(&wq->wq_lock);
}
static struct device_type idxd_cdev_file_type = {
static const struct device_type idxd_cdev_file_type = {
.name = "idxd_file",
.release = idxd_file_dev_release,
.groups = cdev_file_attribute_groups,
@ -169,7 +169,7 @@ static void idxd_cdev_dev_release(struct device *dev)
kfree(idxd_cdev);
}
static struct device_type idxd_cdev_device_type = {
static const struct device_type idxd_cdev_device_type = {
.name = "idxd_cdev",
.release = idxd_cdev_dev_release,
};

View File

@ -282,7 +282,7 @@ typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd);
struct idxd_driver_data {
const char *name_prefix;
enum idxd_type type;
struct device_type *dev_type;
const struct device_type *dev_type;
int compl_size;
int align;
int evl_cr_off;
@ -515,15 +515,15 @@ static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
}
extern struct bus_type dsa_bus_type;
extern const struct bus_type dsa_bus_type;
extern bool support_enqcmd;
extern struct ida idxd_ida;
extern struct device_type dsa_device_type;
extern struct device_type iax_device_type;
extern struct device_type idxd_wq_device_type;
extern struct device_type idxd_engine_device_type;
extern struct device_type idxd_group_device_type;
extern const struct device_type dsa_device_type;
extern const struct device_type iax_device_type;
extern const struct device_type idxd_wq_device_type;
extern const struct device_type idxd_engine_device_type;
extern const struct device_type idxd_group_device_type;
static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
{

View File

@ -91,7 +91,7 @@ static void idxd_conf_engine_release(struct device *dev)
kfree(engine);
}
struct device_type idxd_engine_device_type = {
const struct device_type idxd_engine_device_type = {
.name = "engine",
.release = idxd_conf_engine_release,
.groups = idxd_engine_attribute_groups,
@ -577,7 +577,7 @@ static void idxd_conf_group_release(struct device *dev)
kfree(group);
}
struct device_type idxd_group_device_type = {
const struct device_type idxd_group_device_type = {
.name = "group",
.release = idxd_conf_group_release,
.groups = idxd_group_attribute_groups,
@ -1369,7 +1369,7 @@ static void idxd_conf_wq_release(struct device *dev)
kfree(wq);
}
struct device_type idxd_wq_device_type = {
const struct device_type idxd_wq_device_type = {
.name = "wq",
.release = idxd_conf_wq_release,
.groups = idxd_wq_attribute_groups,
@ -1798,13 +1798,13 @@ static void idxd_conf_device_release(struct device *dev)
kfree(idxd);
}
struct device_type dsa_device_type = {
const struct device_type dsa_device_type = {
.name = "dsa",
.release = idxd_conf_device_release,
.groups = idxd_attribute_groups,
};
struct device_type iax_device_type = {
const struct device_type iax_device_type = {
.name = "iax",
.release = idxd_conf_device_release,
.groups = idxd_attribute_groups,

View File

@ -202,7 +202,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
+ i * sizeof(struct fsl_edma_hw_tcd);
iowrite32(0x0, &mcf_chan->tcd->csr);
edma_write_tcdreg(mcf_chan, cpu_to_le32(0), csr);
}
iowrite32(~0, regs->inth);

View File

@ -29,7 +29,7 @@ static DEFINE_MUTEX(of_dma_lock);
* to the DMA data stored is retuned. A NULL pointer is returned if no match is
* found.
*/
static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec)
{
struct of_dma *ofdma;

View File

@ -2588,6 +2588,7 @@ static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
desc->status = PREP;
desc->txd.callback = NULL;
desc->txd.callback_result = NULL;
}
spin_unlock_irqrestore(lock, flags);

View File

@ -57,6 +57,14 @@
}, \
}
#define PSIL_CSI2RX(x) \
{ \
.thread_id = x, \
.ep_config = { \
.ep_type = PSIL_EP_NATIVE, \
}, \
}
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721s2_src_ep_map[] = {
/* PDMA_MCASP - McASP0-4 */
@ -114,6 +122,71 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
/* CSI2RX */
PSIL_CSI2RX(0x4940),
PSIL_CSI2RX(0x4941),
PSIL_CSI2RX(0x4942),
PSIL_CSI2RX(0x4943),
PSIL_CSI2RX(0x4944),
PSIL_CSI2RX(0x4945),
PSIL_CSI2RX(0x4946),
PSIL_CSI2RX(0x4947),
PSIL_CSI2RX(0x4948),
PSIL_CSI2RX(0x4949),
PSIL_CSI2RX(0x494a),
PSIL_CSI2RX(0x494b),
PSIL_CSI2RX(0x494c),
PSIL_CSI2RX(0x494d),
PSIL_CSI2RX(0x494e),
PSIL_CSI2RX(0x494f),
PSIL_CSI2RX(0x4950),
PSIL_CSI2RX(0x4951),
PSIL_CSI2RX(0x4952),
PSIL_CSI2RX(0x4953),
PSIL_CSI2RX(0x4954),
PSIL_CSI2RX(0x4955),
PSIL_CSI2RX(0x4956),
PSIL_CSI2RX(0x4957),
PSIL_CSI2RX(0x4958),
PSIL_CSI2RX(0x4959),
PSIL_CSI2RX(0x495a),
PSIL_CSI2RX(0x495b),
PSIL_CSI2RX(0x495c),
PSIL_CSI2RX(0x495d),
PSIL_CSI2RX(0x495e),
PSIL_CSI2RX(0x495f),
PSIL_CSI2RX(0x4960),
PSIL_CSI2RX(0x4961),
PSIL_CSI2RX(0x4962),
PSIL_CSI2RX(0x4963),
PSIL_CSI2RX(0x4964),
PSIL_CSI2RX(0x4965),
PSIL_CSI2RX(0x4966),
PSIL_CSI2RX(0x4967),
PSIL_CSI2RX(0x4968),
PSIL_CSI2RX(0x4969),
PSIL_CSI2RX(0x496a),
PSIL_CSI2RX(0x496b),
PSIL_CSI2RX(0x496c),
PSIL_CSI2RX(0x496d),
PSIL_CSI2RX(0x496e),
PSIL_CSI2RX(0x496f),
PSIL_CSI2RX(0x4970),
PSIL_CSI2RX(0x4971),
PSIL_CSI2RX(0x4972),
PSIL_CSI2RX(0x4973),
PSIL_CSI2RX(0x4974),
PSIL_CSI2RX(0x4975),
PSIL_CSI2RX(0x4976),
PSIL_CSI2RX(0x4977),
PSIL_CSI2RX(0x4978),
PSIL_CSI2RX(0x4979),
PSIL_CSI2RX(0x497a),
PSIL_CSI2RX(0x497b),
PSIL_CSI2RX(0x497c),
PSIL_CSI2RX(0x497d),
PSIL_CSI2RX(0x497e),
PSIL_CSI2RX(0x497f),
/* MAIN SA2UL */
PSIL_SA2UL(0x4a40, 0),
PSIL_SA2UL(0x4a41, 0),

View File

@ -111,6 +111,35 @@ static int of_k3_udma_glue_parse(struct device_node *udmax_np,
return 0;
}
static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
bool tx_chn)
{
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
return -EINVAL;
if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
return -EINVAL;
/* get psil endpoint config */
common->ep_config = psil_get_ep_config(thread_id);
if (IS_ERR(common->ep_config)) {
dev_err(common->dev,
"No configuration for psi-l thread 0x%04x\n",
thread_id);
return PTR_ERR(common->ep_config);
}
common->epib = common->ep_config->needs_epib;
common->psdata_size = common->ep_config->psd_size;
if (tx_chn)
common->dst_thread = thread_id;
else
common->src_thread = thread_id;
return 0;
}
static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common,
bool tx_chn)
@ -153,38 +182,32 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
common->atype_asel = dma_spec.args[1];
}
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
ret = -EINVAL;
goto out_put_spec;
}
if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
ret = -EINVAL;
goto out_put_spec;
}
/* get psil endpoint config */
common->ep_config = psil_get_ep_config(thread_id);
if (IS_ERR(common->ep_config)) {
dev_err(common->dev,
"No configuration for psi-l thread 0x%04x\n",
thread_id);
ret = PTR_ERR(common->ep_config);
goto out_put_spec;
}
common->epib = common->ep_config->needs_epib;
common->psdata_size = common->ep_config->psd_size;
if (tx_chn)
common->dst_thread = thread_id;
else
common->src_thread = thread_id;
ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
out_put_spec:
of_node_put(dma_spec.np);
return ret;
};
}
static int
of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
bool tx_chn, u32 thread_id)
{
int ret = 0;
if (unlikely(!udmax_np))
return -EINVAL;
ret = of_k3_udma_glue_parse(udmax_np, common);
if (ret)
goto out_put_spec;
ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
out_put_spec:
of_node_put(udmax_np);
return ret;
}
static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
@ -251,29 +274,13 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
static int
k3_udma_glue_request_tx_chn_common(struct device *dev,
struct k3_udma_glue_tx_channel *tx_chn,
struct k3_udma_glue_tx_channel_cfg *cfg)
{
struct k3_udma_glue_tx_channel *tx_chn;
int ret;
tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
if (!tx_chn)
return ERR_PTR(-ENOMEM);
tx_chn->common.dev = dev;
tx_chn->common.swdata_size = cfg->swdata_size;
tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
/* parse of udmap channel */
ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
&tx_chn->common, true);
if (ret)
goto err;
tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
tx_chn->common.psdata_size,
tx_chn->common.swdata_size);
@ -289,7 +296,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret);
goto err;
return ret;
}
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
@ -302,7 +309,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&tx_chn->common.chan_dev);
tx_chn->common.chan_dev.parent = NULL;
goto err;
return ret;
}
if (xudma_is_pktdma(tx_chn->common.udmax)) {
@ -326,7 +333,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
&tx_chn->ringtxcq);
if (ret) {
dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
goto err;
return ret;
}
/* Set the dma_dev for the rings to be configured */
@ -342,13 +349,13 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
goto err;
return ret;
}
ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
goto err;
return ret;
}
/* request and cfg psi-l */
@ -359,11 +366,42 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_udma_glue_cfg_tx_chn(tx_chn);
if (ret) {
dev_err(dev, "Failed to cfg tchan %d\n", ret);
goto err;
return ret;
}
k3_udma_glue_dump_tx_chn(tx_chn);
return 0;
}
struct k3_udma_glue_tx_channel *
k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
struct k3_udma_glue_tx_channel_cfg *cfg)
{
struct k3_udma_glue_tx_channel *tx_chn;
int ret;
tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
if (!tx_chn)
return ERR_PTR(-ENOMEM);
tx_chn->common.dev = dev;
tx_chn->common.swdata_size = cfg->swdata_size;
tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
/* parse of udmap channel */
ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
&tx_chn->common, true);
if (ret)
goto err;
ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
if (ret)
goto err;
return tx_chn;
err:
@ -372,6 +410,41 @@ err:
}
EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
struct k3_udma_glue_tx_channel *
k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
struct k3_udma_glue_tx_channel_cfg *cfg,
struct device_node *udmax_np, u32 thread_id)
{
struct k3_udma_glue_tx_channel *tx_chn;
int ret;
tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
if (!tx_chn)
return ERR_PTR(-ENOMEM);
tx_chn->common.dev = dev;
tx_chn->common.swdata_size = cfg->swdata_size;
tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
if (ret)
goto err;
ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
if (ret)
goto err;
return tx_chn;
err:
k3_udma_glue_release_tx_chn(tx_chn);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
if (tx_chn->psil_paired) {
@ -1000,12 +1073,59 @@ err:
return ERR_PTR(ret);
}
static int
k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
struct k3_udma_glue_rx_channel_cfg *cfg,
struct device *dev)
{
int ret, i;
rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
rx_chn->common.psdata_size,
rx_chn->common.swdata_size);
rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
sizeof(*rx_chn->flows), GFP_KERNEL);
if (!rx_chn->flows)
return -ENOMEM;
rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
rx_chn->common.src_thread, rx_chn->flow_id_base);
ret = device_register(&rx_chn->common.chan_dev);
if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&rx_chn->common.chan_dev);
rx_chn->common.chan_dev.parent = NULL;
return ret;
}
if (xudma_is_pktdma(rx_chn->common.udmax)) {
/* prepare the channel device as coherent */
rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
}
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
if (ret)
return ret;
for (i = 0; i < rx_chn->flow_num; i++)
rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
k3_udma_glue_dump_rx_chn(rx_chn);
return 0;
}
static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)
{
struct k3_udma_glue_rx_channel *rx_chn;
int ret, i;
int ret;
if (cfg->flow_id_num <= 0 ||
cfg->flow_id_use_rxchan_id ||
@ -1036,45 +1156,10 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
if (ret)
goto err;
rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
rx_chn->common.psdata_size,
rx_chn->common.swdata_size);
rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
sizeof(*rx_chn->flows), GFP_KERNEL);
if (!rx_chn->flows) {
ret = -ENOMEM;
goto err;
}
rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
rx_chn->common.src_thread);
ret = device_register(&rx_chn->common.chan_dev);
if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&rx_chn->common.chan_dev);
rx_chn->common.chan_dev.parent = NULL;
goto err;
}
if (xudma_is_pktdma(rx_chn->common.udmax)) {
/* prepare the channel device as coherent */
rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
}
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
if (ret)
goto err;
for (i = 0; i < rx_chn->flow_num; i++)
rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
k3_udma_glue_dump_rx_chn(rx_chn);
return rx_chn;
err:
@ -1082,6 +1167,53 @@ err:
return ERR_PTR(ret);
}
struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
struct k3_udma_glue_rx_channel_cfg *cfg,
struct device_node *udmax_np, u32 thread_id)
{
struct k3_udma_glue_rx_channel *rx_chn;
int ret;
if (cfg->flow_id_num <= 0 ||
cfg->flow_id_use_rxchan_id ||
cfg->def_flow_cfg ||
cfg->flow_id_base < 0)
return ERR_PTR(-EINVAL);
/*
* Remote RX channel is under control of Remote CPU core, so
* Linux can only request and manipulate by dedicated RX flows
*/
rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
if (!rx_chn)
return ERR_PTR(-ENOMEM);
rx_chn->common.dev = dev;
rx_chn->common.swdata_size = cfg->swdata_size;
rx_chn->remote = true;
rx_chn->udma_rchan_id = -1;
rx_chn->flow_num = cfg->flow_id_num;
rx_chn->flow_id_base = cfg->flow_id_base;
rx_chn->psil_paired = false;
ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
if (ret)
goto err;
ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
if (ret)
goto err;
return rx_chn;
err:
k3_udma_glue_release_rx_chn(rx_chn);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)

View File

@ -112,7 +112,9 @@
/* Register Direct Mode Registers */
#define XILINX_DMA_REG_VSIZE 0x0000
#define XILINX_DMA_VSIZE_MASK GENMASK(12, 0)
#define XILINX_DMA_REG_HSIZE 0x0004
#define XILINX_DMA_HSIZE_MASK GENMASK(15, 0)
#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
@ -2050,6 +2052,10 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
if (!xt->numf || !xt->sgl[0].size)
return NULL;
if (xt->numf & ~XILINX_DMA_VSIZE_MASK ||
xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK)
return NULL;
if (xt->frame_size != 1)
return NULL;

View File

@ -26,6 +26,11 @@ struct k3_udma_glue_tx_channel;
struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
struct k3_udma_glue_tx_channel *
k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
struct k3_udma_glue_tx_channel_cfg *cfg,
struct device_node *udmax_np, u32 thread_id);
void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
struct cppi5_host_desc_t *desc_tx,
@ -109,6 +114,11 @@ struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg);
struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
struct k3_udma_glue_rx_channel_cfg *cfg,
struct device_node *udmax_np, u32 thread_id);
void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);