pci-v6.3-changes

-----BEGIN PGP SIGNATURE-----
 
 iQJIBAABCgAyFiEEgMe7l+5h9hnxdsnuWYigwDrT+vwFAmP2dbsUHGJoZWxnYWFz
 QGdvb2dsZS5jb20ACgkQWYigwDrT+vzBDg//aW2IeJYku5ENXwwnCQjBlyjBGOOZ
 456KGpFt/ky0N9Jp0ZS3nQSa5YN7q+L8XY48gu6I7s1hXly8iLZKLrJN++S//k55
 BadXu7mDUyVoY74LYvBe0nlXuwJul2qnq9IJLufRucrn1yoyqApAh39IRdCzi4U8
 mP+wad7sQA0Si4bpf80uwn6Yq8SrDoO0mtmO/dZSXJooM2t2SnDXEL/fxMwTNDA4
 XsVSP9FrbPmcTLo8mkDa8Dy7JKbL6KQJF9yDlmYzuA2spQpTf+YLLfsNnmE+850h
 WTtfCjVaYtlik7i9qTB+VcN1CsGVepYKK3H5the16Aeql2Fu+Ji5KSt74C220Yi9
 ZSDA93d/EfGc5egKyBdUUMFgqhe46srRUAoWcMrx2T4ARGuOm5EYCa9C8C7dFmO0
 j6f9MYL3j2Sw3FROEKViRVOFfbIfVW1TXIo3x0fE0ud3xkg73eKp/++X8QeTMjox
 2ArY2AWPNQpUI1oMlKxlSEd5XjFf7n/hHDtFqj9bIuJzt0/8wXQf0jCYTjhpGkRB
 pmO+lColK6lp+bg8aWRRkiwN73xGdQhKaeXLo0Iq4T6xr0Lb3XoskHZvt6NIGe/A
 ds5/uwtErq6kCf2G9YG1xfh+G1bimbjWwsHCNfSNXzTsWGDFTCb8tvqF90m+7+yl
 bllxTXA6PO312Tw=
 =/y4d
 -----END PGP SIGNATURE-----

Merge tag 'pci-v6.3-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci

Pull PCI updates from Bjorn Helgaas:
 "Enumeration:

   - Rework portdrv shutdown so it disables interrupts but doesn't
     disable bus mastering, which leads to hangs on Loongson LS7A

   - Add mechanism to prevent Max_Read_Request_Size (MRRS) increases,
     again to avoid hardware issues on Loongson LS7A (and likely other
     devices based on DesignWare IP)

   - Ignore devices with a firmware (DT or ACPI) node that says the
     device is disabled

  Resource management:

   - Distribute spare resources to unconfigured hotplug bridges at
     boot-time (not just when hot-adding such a bridge), which makes
     hot-adding devices to docks work better. Tried this in v6.1 but had
     to revert for regressions, so try again

   - Fix root bus issue that dropped resources that happened to end
     at 0, e.g., [bus 00]

  PCI device hotplug:

   - Remove device locking when marking device as disconnected so this
     doesn't have to wait for concurrent driver bind/unbind to complete

   - Quirk more Qualcomm bridges that don't fully implement the PCIe
     Slot Status 'Command Completed' bit

  Power management:

   - Account for _S0W of the target bridge in acpi_pci_bridge_d3() so we
     don't miss hot-add notifications for USB4 docks, Thunderbolt, etc

  Reset:

   - Observe delay after reset, e.g., resuming from system sleep,
     regardless of whether a bridge can suspend to D3cold at runtime

   - Wait for secondary bus to become ready after a bridge reset

  Virtualization:

   - Avoid FLR on some AMD FCH AHCI adapters where it doesn't work

   - Allow independent IOMMU groups for some Wangxun NICs that prevent
     peer-to-peer transactions but don't advertise an ACS Capability

  Error handling:

   - Configure End-to-End-CRC (ECRC) only if Linux owns the AER
     Capability

   - Remove redundant Device Control Error Reporting Enable in the AER
     service driver since this is already done for all devices during
     enumeration

  ASPM:

   - Add pci_enable_link_state() interface to allow drivers to enable
     ASPM link state

  Endpoint framework:

   - Move dra7xx and tegra194 linkup processing from hard IRQ to
     threaded IRQ handler

   - Add a separate lock for endpoint controller list of endpoint
     function drivers to prevent deadlock in callbacks

   - Pass events from endpoint controller to endpoint function drivers
     via callbacks instead of notifiers

  Synopsys DesignWare eDMA controller driver (acked by Vinod):

   - Fix CPU vs PCI address issues

   - Fix source vs destination address issues

   - Fix issues with interleaved transfer semantics

   - Fix channel count initialization issue (issue still exists in
     several other drivers)

   - Clean up and improve debugfs usage so it will work on platforms
     with several eDMA devices

  Baikal T-1 PCIe controller driver:

   - Set a 64-bit DMA mask

  Freescale i.MX6 PCIe controller driver:

   - Add i.MX8MM, i.MX8MQ, i.MX8MP endpoint mode DT binding and driver
     support

  Intel VMD host bridge driver:

   - Add quirk to configure PCIe ASPM and LTR. This is normally done by
     BIOS, and will be for future products

  Marvell MVEBU PCIe controller driver:

   - Mark this driver as broken in Kconfig since bugs prevent its daily
     usage

  MediaTek MT7621 PCIe controller driver:

   - Delay PHY port initialization to improve boot reliability for ZBT
     WE1326, ZBT WF3526-P, and some Netgear models

  Qualcomm PCIe controller driver:

   - Add MSM8998 DT compatible string

   - Unify MSM8996 and MSM8998 clock orderings

   - Add SM8350 DT binding and driver support

   - Add IPQ8074 Gen3 DT binding and driver support

   - Correct qcom,perst-regs in DT binding

   - Add qcom_pcie_host_deinit() so the PHY is powered off and
     regulators and clocks are disabled on late host-init errors

  Socionext UniPhier Pro5 controller driver:

   - Clean up uniphier-ep reg, clocks, resets, and their names in DT
     binding

  Synopsys DesignWare PCIe controller driver:

   - Restrict coherent DMA mask to 32 bits for MSI, but allow controller
     drivers to set 64-bit streaming DMA mask

   - Add eDMA engine support in both Root Port and Endpoint controllers

  Miscellaneous:

   - Remove MODULE_LICENSE from boolean drivers so they don't look like
     modules so modprobe can complain about them"

* tag 'pci-v6.3-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci: (86 commits)
  PCI: dwc: Add Root Port and Endpoint controller eDMA engine support
  PCI: bt1: Set 64-bit DMA mask
  PCI: dwc: Restrict only coherent DMA mask for MSI address allocation
  dmaengine: dw-edma: Prepare dw_edma_probe() for builtin callers
  dmaengine: dw-edma: Depend on DW_EDMA instead of selecting it
  dmaengine: dw-edma: Add mem-mapped LL-entries support
  PCI: Remove MODULE_LICENSE so boolean drivers don't look like modules
  PCI: hv: Drop duplicate PCI_MSI dependency
  PCI/P2PDMA: Annotate RCU dereference
  PCI/sysfs: Constify struct kobj_type pci_slot_ktype
  PCI: hotplug: Allow marking devices as disconnected during bind/unbind
  PCI: pciehp: Add Qualcomm quirk for Command Completed erratum
  PCI: qcom: Add IPQ8074 Gen3 port support
  dt-bindings: PCI: qcom: Add IPQ8074 Gen3 port
  dt-bindings: PCI: qcom: Sort compatibles alphabetically
  PCI: qcom: Fix host-init error handling
  PCI: qcom: Add SM8350 support
  dt-bindings: PCI: qcom: Add SM8350
  dt-bindings: PCI: qcom-ep: Correct qcom,perst-regs
  dt-bindings: PCI: qcom: Unify MSM8996 and MSM8998 clock order
  ...
This commit is contained in:
Linus Torvalds 2023-02-24 16:51:40 -08:00
commit 90ddb3f034
66 changed files with 1573 additions and 814 deletions

View file

@ -4303,7 +4303,9 @@
specified, e.g., 12@pci:8086:9c22:103c:198f
for 4096-byte alignment.
ecrc= Enable/disable PCIe ECRC (transaction layer
end-to-end CRC checking).
end-to-end CRC checking). Only effective if
OS has native AER control (either granted by
ACPI _OSC or forced via "pcie_ports=native")
bios: Use BIOS/firmware settings. This is the
the default.
off: Turn ECRC off

View file

@ -24,6 +24,9 @@ properties:
- fsl,imx8mq-pcie
- fsl,imx8mm-pcie
- fsl,imx8mp-pcie
- fsl,imx8mm-pcie-ep
- fsl,imx8mq-pcie-ep
- fsl,imx8mp-pcie-ep
reg:
items:

View file

@ -47,8 +47,10 @@ properties:
enable registers
$ref: "/schemas/types.yaml#/definitions/phandle-array"
items:
minItems: 3
maxItems: 3
- items:
- description: Syscon to TCSR system registers
- description: Perst enable offset
- description: Perst separation enable offset
interrupts:
items:

View file

@ -16,25 +16,31 @@ description: |
properties:
compatible:
enum:
- qcom,pcie-ipq8064
- qcom,pcie-ipq8064-v2
- qcom,pcie-apq8064
- qcom,pcie-apq8084
- qcom,pcie-msm8996
- qcom,pcie-ipq4019
- qcom,pcie-ipq8074
- qcom,pcie-qcs404
- qcom,pcie-sa8540p
- qcom,pcie-sc7280
- qcom,pcie-sc8180x
- qcom,pcie-sc8280xp
- qcom,pcie-sdm845
- qcom,pcie-sm8150
- qcom,pcie-sm8250
- qcom,pcie-sm8450-pcie0
- qcom,pcie-sm8450-pcie1
- qcom,pcie-ipq6018
oneOf:
- enum:
- qcom,pcie-apq8064
- qcom,pcie-apq8084
- qcom,pcie-ipq4019
- qcom,pcie-ipq6018
- qcom,pcie-ipq8064
- qcom,pcie-ipq8064-v2
- qcom,pcie-ipq8074
- qcom,pcie-ipq8074-gen3
- qcom,pcie-msm8996
- qcom,pcie-qcs404
- qcom,pcie-sa8540p
- qcom,pcie-sc7280
- qcom,pcie-sc8180x
- qcom,pcie-sc8280xp
- qcom,pcie-sdm845
- qcom,pcie-sm8150
- qcom,pcie-sm8250
- qcom,pcie-sm8350
- qcom,pcie-sm8450-pcie0
- qcom,pcie-sm8450-pcie1
- items:
- const: qcom,pcie-msm8998
- const: qcom,pcie-msm8996
reg:
minItems: 4
@ -153,6 +159,7 @@ allOf:
contains:
enum:
- qcom,pcie-ipq6018
- qcom,pcie-ipq8074-gen3
then:
properties:
reg:
@ -195,6 +202,7 @@ allOf:
- qcom,pcie-sc8180x
- qcom,pcie-sc8280xp
- qcom,pcie-sm8250
- qcom,pcie-sm8350
- qcom,pcie-sm8450-pcie0
- qcom,pcie-sm8450-pcie1
then:
@ -312,27 +320,17 @@ allOf:
enum:
- qcom,pcie-msm8996
then:
oneOf:
- properties:
clock-names:
items:
- const: pipe # Pipe Clock driving internal logic
- const: aux # Auxiliary (AUX) clock
- const: cfg # Configuration clock
- const: bus_master # Master AXI clock
- const: bus_slave # Slave AXI clock
- properties:
clock-names:
items:
- const: pipe # Pipe Clock driving internal logic
- const: bus_master # Master AXI clock
- const: bus_slave # Slave AXI clock
- const: cfg # Configuration clock
- const: aux # Auxiliary (AUX) clock
properties:
clocks:
minItems: 5
maxItems: 5
clock-names:
items:
- const: pipe # Pipe Clock driving internal logic
- const: aux # Auxiliary (AUX) clock
- const: cfg # Configuration clock
- const: bus_master # Master AXI clock
- const: bus_slave # Slave AXI clock
resets: false
reset-names: false
@ -373,6 +371,7 @@ allOf:
contains:
enum:
- qcom,pcie-ipq6018
- qcom,pcie-ipq8074-gen3
then:
properties:
clocks:
@ -550,6 +549,35 @@ allOf:
items:
- const: pci # PCIe core reset
- if:
properties:
compatible:
contains:
enum:
- qcom,pcie-sm8350
then:
properties:
clocks:
minItems: 8
maxItems: 9
clock-names:
minItems: 8
items:
- const: aux # Auxiliary clock
- const: cfg # Configuration clock
- const: bus_master # Master AXI clock
- const: bus_slave # Slave AXI clock
- const: slave_q2a # Slave Q2A clock
- const: tbu # PCIe TBU clock
- const: ddrss_sf_tbu # PCIe SF TBU clock
- const: aggre1 # Aggre NoC PCIe1 AXI clock
- const: aggre0 # Aggre NoC PCIe0 AXI clock
resets:
maxItems: 1
reset-names:
items:
- const: pci # PCIe core reset
- if:
properties:
compatible:
@ -664,6 +692,7 @@ allOf:
- qcom,pcie-ipq8064
- qcom,pcie-ipq8064v2
- qcom,pcie-ipq8074
- qcom,pcie-ipq8074-gen3
- qcom,pcie-qcs404
then:
required:
@ -692,6 +721,7 @@ allOf:
- qcom,pcie-sdm845
- qcom,pcie-sm8150
- qcom,pcie-sm8250
- qcom,pcie-sm8350
- qcom,pcie-sm8450-pcie0
- qcom,pcie-sm8450-pcie1
then:
@ -746,6 +776,7 @@ allOf:
- qcom,pcie-ipq8064
- qcom,pcie-ipq8064-v2
- qcom,pcie-ipq8074
- qcom,pcie-ipq8074-gen3
- qcom,pcie-qcs404
- qcom,pcie-sa8540p
then:

View file

@ -15,9 +15,6 @@ description: |
maintainers:
- Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
allOf:
- $ref: /schemas/pci/snps,dw-pcie-ep.yaml#
properties:
compatible:
enum:
@ -29,40 +26,25 @@ properties:
maxItems: 5
reg-names:
oneOf:
- items:
- const: dbi
- const: dbi2
- const: link
- const: addr_space
- items:
- const: dbi
- const: dbi2
- const: link
- const: addr_space
- const: atu
minItems: 4
items:
- const: dbi
- const: dbi2
- const: link
- const: addr_space
- const: atu
clocks:
minItems: 1
maxItems: 2
clock-names:
oneOf:
- items: # for Pro5
- const: gio
- const: link
- const: link # for NX1
clock-names: true
resets:
minItems: 1
maxItems: 2
reset-names:
oneOf:
- items: # for Pro5
- const: gio
- const: link
- const: link # for NX1
reset-names: true
num-ib-windows:
const: 16
@ -78,6 +60,46 @@ properties:
phy-names:
const: pcie-phy
allOf:
- $ref: /schemas/pci/snps,dw-pcie-ep.yaml#
- if:
properties:
compatible:
contains:
const: socionext,uniphier-pro5-pcie-ep
then:
properties:
reg:
maxItems: 4
reg-names:
maxItems: 4
clocks:
minItems: 2
clock-names:
items:
- const: gio
- const: link
resets:
minItems: 2
reset-names:
items:
- const: gio
- const: link
else:
properties:
reg:
minItems: 5
reg-names:
minItems: 5
clocks:
maxItems: 1
clock-names:
const: link
resets:
maxItems: 1
reset-names:
const: link
required:
- compatible
- reg

View file

@ -484,6 +484,25 @@ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev)
acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL);
}
/**
* acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling
* @adev: ACPI companion of the target device.
*
* Evaluate _S0W for @adev and return the value produced by it or return
* ACPI_STATE_UNKNOWN on errors (including _S0W not present).
*/
u8 acpi_dev_power_state_for_wake(struct acpi_device *adev)
{
unsigned long long state;
acpi_status status;
status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
if (ACPI_FAILURE(status))
return ACPI_STATE_UNKNOWN;
return state;
}
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
static DEFINE_MUTEX(acpi_pm_notifier_install_lock);

View file

@ -9,11 +9,14 @@ config DW_EDMA
Support the Synopsys DesignWare eDMA controller, normally
implemented on endpoints SoCs.
if DW_EDMA
config DW_EDMA_PCIE
tristate "Synopsys DesignWare eDMA PCIe driver"
depends on PCI && PCI_MSI
select DW_EDMA
help
Provides a glue-logic between the Synopsys DesignWare
eDMA controller and an endpoint PCIe device. This also serves
as a reference design to whom desires to use this IP.
endif # DW_EDMA

View file

@ -39,6 +39,17 @@ struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
return container_of(vd, struct dw_edma_desc, vd);
}
static inline
u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
{
struct dw_edma_chip *chip = chan->dw->chip;
if (chip->ops->pci_address)
return chip->ops->pci_address(chip->dev, cpu_addr);
return cpu_addr;
}
static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *burst;
@ -197,6 +208,24 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
desc->chunks_alloc--;
}
static void dw_edma_device_caps(struct dma_chan *dchan,
struct dma_slave_caps *caps)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
if (chan->dir == EDMA_DIR_READ)
caps->directions = BIT(DMA_DEV_TO_MEM);
else
caps->directions = BIT(DMA_MEM_TO_DEV);
} else {
if (chan->dir == EDMA_DIR_WRITE)
caps->directions = BIT(DMA_DEV_TO_MEM);
else
caps->directions = BIT(DMA_MEM_TO_DEV);
}
}
static int dw_edma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
{
@ -327,11 +356,12 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
enum dma_transfer_direction dir = xfer->direction;
phys_addr_t src_addr, dst_addr;
struct scatterlist *sg = NULL;
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
u64 src_addr, dst_addr;
size_t fsz = 0;
u32 cnt = 0;
int i;
@ -381,9 +411,9 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (xfer->xfer.sg.len < 1)
return NULL;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
if (!xfer->xfer.il->numf)
if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
return NULL;
if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
return NULL;
} else {
return NULL;
@ -405,16 +435,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
dst_addr = chan->config.dst_addr;
}
if (dir == DMA_DEV_TO_MEM)
src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
else
dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
if (xfer->type == EDMA_XFER_CYCLIC) {
cnt = xfer->xfer.cyclic.cnt;
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
cnt = xfer->xfer.sg.len;
sg = xfer->xfer.sg.sgl;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
if (xfer->xfer.il->numf > 0)
cnt = xfer->xfer.il->numf;
else
cnt = xfer->xfer.il->frame_size;
cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
fsz = xfer->xfer.il->frame_size;
}
for (i = 0; i < cnt; i++) {
@ -436,7 +469,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
burst->sz = sg_dma_len(sg);
else if (xfer->type == EDMA_XFER_INTERLEAVED)
burst->sz = xfer->xfer.il->sgl[i].size;
burst->sz = xfer->xfer.il->sgl[i % fsz].size;
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
@ -455,6 +488,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
burst->dar = dst_addr;
}
} else {
burst->dar = dst_addr;
@ -470,25 +505,24 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
burst->sar = src_addr;
}
}
if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
sg = sg_next(sg);
} else if (xfer->type == EDMA_XFER_INTERLEAVED &&
xfer->xfer.il->frame_size > 0) {
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
struct dma_interleaved_template *il = xfer->xfer.il;
struct data_chunk *dc = &il->sgl[i];
struct data_chunk *dc = &il->sgl[i % fsz];
if (il->src_sgl) {
src_addr += burst->sz;
src_addr += burst->sz;
if (il->src_sgl)
src_addr += dmaengine_get_src_icg(il, dc);
}
if (il->dst_sgl) {
dst_addr += burst->sz;
dst_addr += burst->sz;
if (il->dst_sgl)
dst_addr += dmaengine_get_dst_icg(il, dc);
}
}
}
@ -701,92 +735,76 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
}
}
static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
u32 wr_alloc, u32 rd_alloc)
static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
{
struct dw_edma_chip *chip = dw->chip;
struct dw_edma_region *dt_region;
struct device *dev = chip->dev;
struct dw_edma_chan *chan;
struct dw_edma_irq *irq;
struct dma_device *dma;
u32 alloc, off_alloc;
u32 i, j, cnt;
int err = 0;
u32 i, ch_cnt;
u32 pos;
if (write) {
i = 0;
cnt = dw->wr_ch_cnt;
dma = &dw->wr_edma;
alloc = wr_alloc;
off_alloc = 0;
} else {
i = dw->wr_ch_cnt;
cnt = dw->rd_ch_cnt;
dma = &dw->rd_edma;
alloc = rd_alloc;
off_alloc = wr_alloc;
}
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
dma = &dw->dma;
INIT_LIST_HEAD(&dma->channels);
for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
for (i = 0; i < ch_cnt; i++) {
chan = &dw->chan[i];
dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
if (!dt_region)
return -ENOMEM;
chan->vc.chan.private = dt_region;
chan->dw = dw;
chan->id = j;
chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
if (i < dw->wr_ch_cnt) {
chan->id = i;
chan->dir = EDMA_DIR_WRITE;
} else {
chan->id = i - dw->wr_ch_cnt;
chan->dir = EDMA_DIR_READ;
}
chan->configured = false;
chan->request = EDMA_REQ_NONE;
chan->status = EDMA_ST_IDLE;
if (write)
chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
if (chan->dir == EDMA_DIR_WRITE)
chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
else
chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
write ? "write" : "read", j, chan->ll_max);
chan->dir == EDMA_DIR_WRITE ? "write" : "read",
chan->id, chan->ll_max);
if (dw->nr_irqs == 1)
pos = 0;
else if (chan->dir == EDMA_DIR_WRITE)
pos = chan->id % wr_alloc;
else
pos = off_alloc + (j % alloc);
pos = wr_alloc + chan->id % rd_alloc;
irq = &dw->irq[pos];
if (write)
irq->wr_mask |= BIT(j);
if (chan->dir == EDMA_DIR_WRITE)
irq->wr_mask |= BIT(chan->id);
else
irq->rd_mask |= BIT(j);
irq->rd_mask |= BIT(chan->id);
irq->dw = dw;
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
write ? "write" : "read", j,
chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
chan->msi.address_hi, chan->msi.address_lo,
chan->msi.data);
chan->vc.desc_free = vchan_free_desc;
vchan_init(&chan->vc, dma);
chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
&dw->chip->dt_region_wr[chan->id] :
&dw->chip->dt_region_rd[chan->id];
if (write) {
dt_region->paddr = chip->dt_region_wr[j].paddr;
dt_region->vaddr = chip->dt_region_wr[j].vaddr;
dt_region->sz = chip->dt_region_wr[j].sz;
} else {
dt_region->paddr = chip->dt_region_rd[j].paddr;
dt_region->vaddr = chip->dt_region_rd[j].vaddr;
dt_region->sz = chip->dt_region_rd[j].sz;
}
vchan_init(&chan->vc, dma);
dw_edma_v0_core_device_config(chan);
}
@ -797,16 +815,16 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma->chancnt = cnt;
/* Set DMA channel callbacks */
dma->dev = chip->dev;
dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
dma->device_free_chan_resources = dw_edma_free_chan_resources;
dma->device_caps = dw_edma_device_caps;
dma->device_config = dw_edma_device_config;
dma->device_pause = dw_edma_device_pause;
dma->device_resume = dw_edma_device_resume;
@ -820,9 +838,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
dma_set_max_seg_size(dma->dev, U32_MAX);
/* Register DMA device */
err = dma_async_device_register(dma);
return err;
return dma_async_device_register(dma);
}
static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
@ -893,10 +909,8 @@ static int dw_edma_irq_request(struct dw_edma *dw,
dw_edma_interrupt_read,
IRQF_SHARED, dw->name,
&dw->irq[i]);
if (err) {
dw->nr_irqs = i;
return err;
}
if (err)
goto err_irq_free;
if (irq_get_msi_desc(irq))
get_cached_msi_msg(irq, &dw->irq[i].msi);
@ -905,6 +919,14 @@ static int dw_edma_irq_request(struct dw_edma *dw,
dw->nr_irqs = i;
}
return 0;
err_irq_free:
for (i--; i >= 0; i--) {
irq = chip->ops->irq_vector(dev, i);
free_irq(irq, &dw->irq[i]);
}
return err;
}
@ -951,7 +973,8 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (!dw->chan)
return -ENOMEM;
snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
dev_name(chip->dev));
/* Disable eDMA, only to establish the ideal initial conditions */
dw_edma_v0_core_off(dw);
@ -961,13 +984,8 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (err)
return err;
/* Setup write channels */
err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
/* Setup read channels */
err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
/* Setup write/read channels */
err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
@ -993,6 +1011,10 @@ int dw_edma_remove(struct dw_edma_chip *chip)
struct dw_edma *dw = chip->dw;
int i;
/* Skip removal if no private data found */
if (!dw)
return -ENODEV;
/* Disable eDMA */
dw_edma_v0_core_off(dw);
@ -1001,23 +1023,13 @@ int dw_edma_remove(struct dw_edma_chip *chip)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Deregister eDMA device */
dma_async_device_unregister(&dw->wr_edma);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
dma_async_device_unregister(&dw->dma);
list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
tasklet_kill(&chan->vc.task);
list_del(&chan->vc.chan.device_node);
}
dma_async_device_unregister(&dw->rd_edma);
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
vc.chan.device_node) {
tasklet_kill(&chan->vc.task);
list_del(&chan->vc.chan.device_node);
}
/* Turn debugfs off */
dw_edma_v0_core_debugfs_off(dw);
return 0;
}
EXPORT_SYMBOL_GPL(dw_edma_remove);

View file

@ -96,12 +96,11 @@ struct dw_edma_irq {
};
struct dw_edma {
char name[20];
char name[32];
struct dma_device dma;
struct dma_device wr_edma;
u16 wr_ch_cnt;
struct dma_device rd_edma;
u16 rd_ch_cnt;
struct dw_edma_irq *irq;
@ -112,9 +111,6 @@ struct dw_edma {
raw_spinlock_t lock; /* Only for legacy */
struct dw_edma_chip *chip;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif /* CONFIG_DEBUG_FS */
};
struct dw_edma_sg {

View file

@ -95,8 +95,23 @@ static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
return pci_irq_vector(to_pci_dev(dev), nr);
}
static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_bus_region region;
struct resource res = {
.flags = IORESOURCE_MEM,
.start = cpu_addr,
.end = cpu_addr,
};
pcibios_resource_to_bus(pdev->bus, &region, &res);
return region.start;
}
static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
.pci_address = dw_edma_pcie_address,
};
static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
@ -207,7 +222,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
chip->id = pdev->devfn;
chip->mf = vsec_data.mf;
chip->nr_irqs = nr_irqs;
@ -226,21 +240,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr)
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
return -ENOMEM;
ll_region->vaddr += ll_block->off;
ll_region->paddr = pdev->resource[ll_block->bar].start;
ll_region->vaddr.io += ll_block->off;
ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
if (!dt_region->vaddr)
dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
if (!dt_region->vaddr.io)
return -ENOMEM;
dt_region->vaddr += dt_block->off;
dt_region->paddr = pdev->resource[dt_block->bar].start;
dt_region->vaddr.io += dt_block->off;
dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@ -251,21 +265,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr)
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
return -ENOMEM;
ll_region->vaddr += ll_block->off;
ll_region->paddr = pdev->resource[ll_block->bar].start;
ll_region->vaddr.io += ll_block->off;
ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
if (!dt_region->vaddr)
dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
if (!dt_region->vaddr.io)
return -ENOMEM;
dt_region->vaddr += dt_block->off;
dt_region->paddr = pdev->resource[dt_block->bar].start;
dt_region->vaddr.io += dt_block->off;
dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@ -289,24 +303,24 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_wr[i].bar,
vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_wr[i].bar,
vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_rd[i].bar,
vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_rd[i].bar,
vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);

View file

@ -8,6 +8,8 @@
#include <linux/bitfield.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
#include "dw-edma-v0-regs.h"
@ -53,8 +55,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_32(dw, rd_##name, value); \
} while (0)
#ifdef CONFIG_64BIT
#define SET_64(dw, name, value) \
writeq(value, &(__dw_regs(dw)->name))
@ -80,8 +80,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_64(dw, rd_##name, value); \
} while (0)
#endif /* CONFIG_64BIT */
#define SET_COMPAT(dw, name, value) \
writel(value, &(__dw_regs(dw)->type.unroll.name))
@ -161,11 +159,6 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define GET_CH_32(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
#define SET_LL_32(ll, value) \
writel(value, ll)
#ifdef CONFIG_64BIT
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u64 value, void __iomem *addr)
{
@ -192,7 +185,7 @@ static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
const void __iomem *addr)
{
u32 value;
u64 value;
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
@ -222,11 +215,6 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define GET_CH_64(dw, dir, ch, name) \
readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
#define SET_LL_64(ll, value) \
writeq(value, ll)
#endif /* CONFIG_64BIT */
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
@ -298,17 +286,53 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
GET_RW_32(dw, dir, int_status));
}
static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
u32 control, u32 size, u64 sar, u64 dar)
{
ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
lli->control = control;
lli->transfer_size = size;
lli->sar.reg = sar;
lli->dar.reg = dar;
} else {
struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
writel(control, &lli->control);
writel(size, &lli->transfer_size);
writeq(sar, &lli->sar.reg);
writeq(dar, &lli->dar.reg);
}
}
static void dw_edma_v0_write_ll_link(struct dw_edma_chunk *chunk,
int i, u32 control, u64 pointer)
{
ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
llp->control = control;
llp->llp.reg = pointer;
} else {
struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
writel(control, &llp->control);
writeq(pointer, &llp->llp.reg);
}
}
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma_v0_lli __iomem *lli;
struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
int j;
lli = chunk->ll_region.vaddr;
if (chunk->cb)
control = DW_EDMA_V0_CB;
@ -320,41 +344,16 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
control |= DW_EDMA_V0_RIE;
}
/* Channel control */
SET_LL_32(&lli[i].control, control);
/* Transfer size */
SET_LL_32(&lli[i].transfer_size, child->sz);
/* SAR */
#ifdef CONFIG_64BIT
SET_LL_64(&lli[i].sar.reg, child->sar);
#else /* CONFIG_64BIT */
SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
#endif /* CONFIG_64BIT */
/* DAR */
#ifdef CONFIG_64BIT
SET_LL_64(&lli[i].dar.reg, child->dar);
#else /* CONFIG_64BIT */
SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
#endif /* CONFIG_64BIT */
i++;
dw_edma_v0_write_ll_data(chunk, i++, control, child->sz,
child->sar, child->dar);
}
llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
/* Channel control */
SET_LL_32(&llp->control, control);
/* Linked list */
#ifdef CONFIG_64BIT
SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
#else /* CONFIG_64BIT */
SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
#endif /* CONFIG_64BIT */
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@ -504,8 +503,3 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
dw_edma_v0_debugfs_on(dw);
}
void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
{
dw_edma_v0_debugfs_off(dw);
}

View file

@ -23,6 +23,5 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */

View file

@ -13,76 +13,79 @@
#include "dw-edma-v0-regs.h"
#include "dw-edma-core.h"
#define REGS_ADDR(name) \
((void __force *)&regs->name)
#define REGISTER(name) \
{ #name, REGS_ADDR(name) }
#define REGS_ADDR(dw, name) \
({ \
struct dw_edma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \
\
(void __iomem *)&__regs->name; \
})
#define WR_REGISTER(name) \
{ #name, REGS_ADDR(wr_##name) }
#define RD_REGISTER(name) \
{ #name, REGS_ADDR(rd_##name) }
#define REGS_CH_ADDR(dw, name, _dir, _ch) \
({ \
struct dw_edma_v0_ch_regs __iomem *__ch_regs; \
\
if ((dw)->chip->mf == EDMA_MF_EDMA_LEGACY) \
__ch_regs = REGS_ADDR(dw, type.legacy.ch); \
else if (_dir == EDMA_DIR_READ) \
__ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].rd); \
else \
__ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].wr); \
\
(void __iomem *)&__ch_regs->name; \
})
#define WR_REGISTER_LEGACY(name) \
{ #name, REGS_ADDR(type.legacy.wr_##name) }
#define REGISTER(dw, name) \
{ dw, #name, REGS_ADDR(dw, name) }
#define CTX_REGISTER(dw, name, dir, ch) \
{ dw, #name, REGS_CH_ADDR(dw, name, dir, ch), dir, ch }
#define WR_REGISTER(dw, name) \
{ dw, #name, REGS_ADDR(dw, wr_##name) }
#define RD_REGISTER(dw, name) \
{ dw, #name, REGS_ADDR(dw, rd_##name) }
#define WR_REGISTER_LEGACY(dw, name) \
{ dw, #name, REGS_ADDR(dw, type.legacy.wr_##name) }
#define RD_REGISTER_LEGACY(name) \
{ #name, REGS_ADDR(type.legacy.rd_##name) }
{ dw, #name, REGS_ADDR(dw, type.legacy.rd_##name) }
#define WR_REGISTER_UNROLL(name) \
{ #name, REGS_ADDR(type.unroll.wr_##name) }
#define RD_REGISTER_UNROLL(name) \
{ #name, REGS_ADDR(type.unroll.rd_##name) }
#define WR_REGISTER_UNROLL(dw, name) \
{ dw, #name, REGS_ADDR(dw, type.unroll.wr_##name) }
#define RD_REGISTER_UNROLL(dw, name) \
{ dw, #name, REGS_ADDR(dw, type.unroll.rd_##name) }
#define WRITE_STR "write"
#define READ_STR "read"
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
static struct dw_edma *dw;
static struct dw_edma_v0_regs __iomem *regs;
static struct {
void __iomem *start;
void __iomem *end;
} lim[2][EDMA_V0_MAX_NR_CH];
struct debugfs_entries {
struct dw_edma_debugfs_entry {
struct dw_edma *dw;
const char *name;
dma_addr_t *reg;
void __iomem *reg;
enum dw_edma_dir dir;
u16 ch;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
void __iomem *reg = (void __force __iomem *)data;
struct dw_edma_debugfs_entry *entry = data;
struct dw_edma *dw = entry->dw;
void __iomem *reg = entry->reg;
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
reg >= (void __iomem *)&regs->type.legacy.ch) {
void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
reg >= REGS_ADDR(dw, type.legacy.ch)) {
unsigned long flags;
u16 ch;
u32 viewport_sel;
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
ptr += (reg - lim[0][ch].start);
goto legacy_sel_wr;
}
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
ptr += (reg - lim[1][ch].start);
goto legacy_sel_rd;
}
return 0;
legacy_sel_rd:
viewport_sel = BIT(31);
legacy_sel_wr:
viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
viewport_sel = entry->dir == EDMA_DIR_READ ? BIT(31) : 0;
viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, entry->ch);
raw_spin_lock_irqsave(&dw->lock, flags);
writel(viewport_sel, &regs->type.legacy.viewport_sel);
*val = readl(ptr);
writel(viewport_sel, REGS_ADDR(dw, type.legacy.viewport_sel));
*val = readl(reg);
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
@ -93,222 +96,197 @@ static int dw_edma_debugfs_u32_get(void *data, u64 *val)
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
int nr_entries, struct dentry *dir)
static void dw_edma_debugfs_create_x32(struct dw_edma *dw,
const struct dw_edma_debugfs_entry ini[],
int nr_entries, struct dentry *dent)
{
struct dw_edma_debugfs_entry *entries;
int i;
entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return;
for (i = 0; i < nr_entries; i++) {
if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
entries[i].reg, &fops_x32))
break;
entries[i] = ini[i];
debugfs_create_file_unsafe(entries[i].name, 0444, dent,
&entries[i], &fops_x32);
}
}
static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
struct dentry *dir)
static void dw_edma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
u16 ch, struct dentry *dent)
{
int nr_entries;
const struct debugfs_entries debugfs_regs[] = {
REGISTER(ch_control1),
REGISTER(ch_control2),
REGISTER(transfer_size),
REGISTER(sar.lsb),
REGISTER(sar.msb),
REGISTER(dar.lsb),
REGISTER(dar.msb),
REGISTER(llp.lsb),
REGISTER(llp.msb),
struct dw_edma_debugfs_entry debugfs_regs[] = {
CTX_REGISTER(dw, ch_control1, dir, ch),
CTX_REGISTER(dw, ch_control2, dir, ch),
CTX_REGISTER(dw, transfer_size, dir, ch),
CTX_REGISTER(dw, sar.lsb, dir, ch),
CTX_REGISTER(dw, sar.msb, dir, ch),
CTX_REGISTER(dw, dar.lsb, dir, ch),
CTX_REGISTER(dw, dar.msb, dir, ch),
CTX_REGISTER(dw, llp.lsb, dir, ch),
CTX_REGISTER(dw, llp.msb, dir, ch),
};
int nr_entries;
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent);
}
static void dw_edma_debugfs_regs_wr(struct dentry *dir)
static noinline_for_stack void
dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
{
const struct debugfs_entries debugfs_regs[] = {
const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
WR_REGISTER(engine_en),
WR_REGISTER(doorbell),
WR_REGISTER(ch_arb_weight.lsb),
WR_REGISTER(ch_arb_weight.msb),
WR_REGISTER(dw, engine_en),
WR_REGISTER(dw, doorbell),
WR_REGISTER(dw, ch_arb_weight.lsb),
WR_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
WR_REGISTER(int_status),
WR_REGISTER(int_mask),
WR_REGISTER(int_clear),
WR_REGISTER(err_status),
WR_REGISTER(done_imwr.lsb),
WR_REGISTER(done_imwr.msb),
WR_REGISTER(abort_imwr.lsb),
WR_REGISTER(abort_imwr.msb),
WR_REGISTER(ch01_imwr_data),
WR_REGISTER(ch23_imwr_data),
WR_REGISTER(ch45_imwr_data),
WR_REGISTER(ch67_imwr_data),
WR_REGISTER(linked_list_err_en),
WR_REGISTER(dw, int_status),
WR_REGISTER(dw, int_mask),
WR_REGISTER(dw, int_clear),
WR_REGISTER(dw, err_status),
WR_REGISTER(dw, done_imwr.lsb),
WR_REGISTER(dw, done_imwr.msb),
WR_REGISTER(dw, abort_imwr.lsb),
WR_REGISTER(dw, abort_imwr.msb),
WR_REGISTER(dw, ch01_imwr_data),
WR_REGISTER(dw, ch23_imwr_data),
WR_REGISTER(dw, ch45_imwr_data),
WR_REGISTER(dw, ch67_imwr_data),
WR_REGISTER(dw, linked_list_err_en),
};
const struct debugfs_entries debugfs_unroll_regs[] = {
const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
WR_REGISTER_UNROLL(engine_chgroup),
WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
WR_REGISTER_UNROLL(ch0_pwr_en),
WR_REGISTER_UNROLL(ch1_pwr_en),
WR_REGISTER_UNROLL(ch2_pwr_en),
WR_REGISTER_UNROLL(ch3_pwr_en),
WR_REGISTER_UNROLL(ch4_pwr_en),
WR_REGISTER_UNROLL(ch5_pwr_en),
WR_REGISTER_UNROLL(ch6_pwr_en),
WR_REGISTER_UNROLL(ch7_pwr_en),
WR_REGISTER_UNROLL(dw, engine_chgroup),
WR_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
WR_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
WR_REGISTER_UNROLL(dw, ch0_pwr_en),
WR_REGISTER_UNROLL(dw, ch1_pwr_en),
WR_REGISTER_UNROLL(dw, ch2_pwr_en),
WR_REGISTER_UNROLL(dw, ch3_pwr_en),
WR_REGISTER_UNROLL(dw, ch4_pwr_en),
WR_REGISTER_UNROLL(dw, ch5_pwr_en),
WR_REGISTER_UNROLL(dw, ch6_pwr_en),
WR_REGISTER_UNROLL(dw, ch7_pwr_en),
};
struct dentry *regs_dir, *ch_dir;
struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
regs_dir = debugfs_create_dir(WRITE_STR, dir);
if (!regs_dir)
return;
regs_dent = debugfs_create_dir(WRITE_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
regs_dent);
}
for (i = 0; i < dw->wr_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
ch_dir = debugfs_create_dir(name, regs_dir);
if (!ch_dir)
return;
ch_dent = debugfs_create_dir(name, regs_dent);
dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].wr, ch_dir);
lim[0][i].start = &regs->type.unroll.ch[i].wr;
lim[0][i].end = &regs->type.unroll.ch[i].padding_1[0];
dw_edma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent);
}
}
static void dw_edma_debugfs_regs_rd(struct dentry *dir)
static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
struct dentry *dent)
{
const struct debugfs_entries debugfs_regs[] = {
const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
RD_REGISTER(engine_en),
RD_REGISTER(doorbell),
RD_REGISTER(ch_arb_weight.lsb),
RD_REGISTER(ch_arb_weight.msb),
RD_REGISTER(dw, engine_en),
RD_REGISTER(dw, doorbell),
RD_REGISTER(dw, ch_arb_weight.lsb),
RD_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
RD_REGISTER(int_status),
RD_REGISTER(int_mask),
RD_REGISTER(int_clear),
RD_REGISTER(err_status.lsb),
RD_REGISTER(err_status.msb),
RD_REGISTER(linked_list_err_en),
RD_REGISTER(done_imwr.lsb),
RD_REGISTER(done_imwr.msb),
RD_REGISTER(abort_imwr.lsb),
RD_REGISTER(abort_imwr.msb),
RD_REGISTER(ch01_imwr_data),
RD_REGISTER(ch23_imwr_data),
RD_REGISTER(ch45_imwr_data),
RD_REGISTER(ch67_imwr_data),
RD_REGISTER(dw, int_status),
RD_REGISTER(dw, int_mask),
RD_REGISTER(dw, int_clear),
RD_REGISTER(dw, err_status.lsb),
RD_REGISTER(dw, err_status.msb),
RD_REGISTER(dw, linked_list_err_en),
RD_REGISTER(dw, done_imwr.lsb),
RD_REGISTER(dw, done_imwr.msb),
RD_REGISTER(dw, abort_imwr.lsb),
RD_REGISTER(dw, abort_imwr.msb),
RD_REGISTER(dw, ch01_imwr_data),
RD_REGISTER(dw, ch23_imwr_data),
RD_REGISTER(dw, ch45_imwr_data),
RD_REGISTER(dw, ch67_imwr_data),
};
const struct debugfs_entries debugfs_unroll_regs[] = {
const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
RD_REGISTER_UNROLL(engine_chgroup),
RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
RD_REGISTER_UNROLL(ch0_pwr_en),
RD_REGISTER_UNROLL(ch1_pwr_en),
RD_REGISTER_UNROLL(ch2_pwr_en),
RD_REGISTER_UNROLL(ch3_pwr_en),
RD_REGISTER_UNROLL(ch4_pwr_en),
RD_REGISTER_UNROLL(ch5_pwr_en),
RD_REGISTER_UNROLL(ch6_pwr_en),
RD_REGISTER_UNROLL(ch7_pwr_en),
RD_REGISTER_UNROLL(dw, engine_chgroup),
RD_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
RD_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
RD_REGISTER_UNROLL(dw, ch0_pwr_en),
RD_REGISTER_UNROLL(dw, ch1_pwr_en),
RD_REGISTER_UNROLL(dw, ch2_pwr_en),
RD_REGISTER_UNROLL(dw, ch3_pwr_en),
RD_REGISTER_UNROLL(dw, ch4_pwr_en),
RD_REGISTER_UNROLL(dw, ch5_pwr_en),
RD_REGISTER_UNROLL(dw, ch6_pwr_en),
RD_REGISTER_UNROLL(dw, ch7_pwr_en),
};
struct dentry *regs_dir, *ch_dir;
struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
regs_dir = debugfs_create_dir(READ_STR, dir);
if (!regs_dir)
return;
regs_dent = debugfs_create_dir(READ_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
regs_dent);
}
for (i = 0; i < dw->rd_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
ch_dir = debugfs_create_dir(name, regs_dir);
if (!ch_dir)
return;
ch_dent = debugfs_create_dir(name, regs_dent);
dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].rd, ch_dir);
lim[1][i].start = &regs->type.unroll.ch[i].rd;
lim[1][i].end = &regs->type.unroll.ch[i].padding_2[0];
dw_edma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent);
}
}
static void dw_edma_debugfs_regs(void)
static void dw_edma_debugfs_regs(struct dw_edma *dw)
{
const struct debugfs_entries debugfs_regs[] = {
REGISTER(ctrl_data_arb_prior),
REGISTER(ctrl),
const struct dw_edma_debugfs_entry debugfs_regs[] = {
REGISTER(dw, ctrl_data_arb_prior),
REGISTER(dw, ctrl),
};
struct dentry *regs_dir;
struct dentry *regs_dent;
int nr_entries;
regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
if (!regs_dir)
return;
regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root);
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
dw_edma_debugfs_regs_wr(regs_dir);
dw_edma_debugfs_regs_rd(regs_dir);
dw_edma_debugfs_regs_wr(dw, regs_dent);
dw_edma_debugfs_regs_rd(dw, regs_dent);
}
void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
dw = _dw;
if (!dw)
if (!debugfs_initialized())
return;
regs = dw->chip->reg_base;
if (!regs)
return;
debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf);
debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt);
debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt);
dw->debugfs = debugfs_create_dir(dw->name, NULL);
if (!dw->debugfs)
return;
debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
dw_edma_debugfs_regs();
}
void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
{
dw = _dw;
if (!dw)
return;
debugfs_remove_recursive(dw->debugfs);
dw->debugfs = NULL;
dw_edma_debugfs_regs(dw);
}

View file

@ -13,15 +13,10 @@
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma *dw);
void dw_edma_v0_debugfs_off(struct dw_edma *dw);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
}
static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
{
}
#endif /* CONFIG_DEBUG_FS */
#endif /* _DW_EDMA_V0_DEBUG_FS_H */

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/**
/*
* Host side test driver to test endpoint functionality
*
* Copyright (C) 2017 Texas Instruments
@ -72,6 +72,7 @@
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_LS1088A 0x80c0
#define PCI_DEVICE_ID_IMX8 0x0808
#define is_am654_pci_dev(pdev) \
((pdev)->device == PCI_DEVICE_ID_TI_AM654)
@ -980,6 +981,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
.driver_data = (kernel_ulong_t)&default_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
.driver_data = (kernel_ulong_t)&default_data,
},

View file

@ -9,6 +9,7 @@ config PCI_MVEBU
depends on MVEBU_MBUS
depends on ARM
depends on OF
depends on BROKEN
select PCI_BRIDGE_EMUL
help
Add support for Marvell EBU PCIe controller. This PCIe controller
@ -285,7 +286,7 @@ config PCIE_BRCMSTB
config PCI_HYPERV_INTERFACE
tristate "Hyper-V PCI Interface"
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
help
The Hyper-V PCI Interface is a helper driver allows other drivers to
have a common interface with the Hyper-V PCI frontend driver.

View file

@ -92,10 +92,31 @@ config PCI_EXYNOS
functions to implement the driver.
config PCI_IMX6
bool "Freescale i.MX6/7/8 PCIe controller"
bool
config PCI_IMX6_HOST
bool "Freescale i.MX6/7/8 PCIe controller host mode"
depends on ARCH_MXC || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
select PCI_IMX6
help
Enables support for the PCIe controller in the i.MX SoCs to
work in Root Complex mode. The PCI controller on i.MX is based
on DesignWare hardware and therefore the driver re-uses the
DesignWare core functions to implement the driver.
config PCI_IMX6_EP
bool "Freescale i.MX6/7/8 PCIe controller endpoint mode"
depends on ARCH_MXC || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCI_IMX6
help
Enables support for the PCIe controller in the i.MX SoCs to
work in endpoint mode. The PCI controller on i.MX is based
on DesignWare hardware and therefore the driver re-uses the
DesignWare core functions to implement the driver.
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"

View file

@ -840,7 +840,7 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
}
dra7xx->mode = mode;
ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");

View file

@ -52,6 +52,9 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
@ -60,6 +63,7 @@ enum imx6_pcie_variants {
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
const char *gpr;
@ -152,24 +156,39 @@ struct imx6_pcie {
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
imx6_pcie->drvdata->variant != IMX8MQ_EP &&
imx6_pcie->drvdata->variant != IMX8MM &&
imx6_pcie->drvdata->variant != IMX8MP);
imx6_pcie->drvdata->variant != IMX8MM_EP &&
imx6_pcie->drvdata->variant != IMX8MP &&
imx6_pcie->drvdata->variant != IMX8MP_EP);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
unsigned int mask, val;
unsigned int mask, val, mode;
if (imx6_pcie->drvdata->variant == IMX8MQ &&
imx6_pcie->controller_id == 1) {
mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
PCI_EXP_TYPE_ROOT_PORT);
} else {
if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
mode = PCI_EXP_TYPE_ENDPOINT;
else
mode = PCI_EXP_TYPE_ROOT_PORT;
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
if (imx6_pcie->controller_id == 1) {
mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
mode);
} else {
mask = IMX6Q_GPR12_DEVICE_TYPE;
val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
}
break;
default:
mask = IMX6Q_GPR12_DEVICE_TYPE;
val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
PCI_EXP_TYPE_ROOT_PORT);
val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
break;
}
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
@ -304,13 +323,16 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
case IMX8MM:
case IMX8MM_EP:
case IMX8MP:
case IMX8MP_EP:
/*
* The PHY initialization had been done in the PHY
* driver, break here directly.
*/
break;
case IMX8MQ:
case IMX8MQ_EP:
/*
* TODO: Currently this code assumes external
* oscillator is being used
@ -561,8 +583,11 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
case IMX7D:
break;
case IMX8MM:
case IMX8MM_EP:
case IMX8MQ:
case IMX8MQ_EP:
case IMX8MP:
case IMX8MP_EP:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
dev_err(dev, "unable to enable pcie_aux clock\n");
@ -606,8 +631,11 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
case IMX8MM:
case IMX8MM_EP:
case IMX8MQ:
case IMX8MQ_EP:
case IMX8MP:
case IMX8MP_EP:
clk_disable_unprepare(imx6_pcie->pcie_aux);
break;
default:
@ -672,10 +700,13 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
switch (imx6_pcie->drvdata->variant) {
case IMX7D:
case IMX8MQ:
case IMX8MQ_EP:
reset_control_assert(imx6_pcie->pciephy_reset);
fallthrough;
case IMX8MM:
case IMX8MM_EP:
case IMX8MP:
case IMX8MP_EP:
reset_control_assert(imx6_pcie->apps_reset);
break;
case IMX6SX:
@ -713,6 +744,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
reset_control_deassert(imx6_pcie->pciephy_reset);
break;
case IMX7D:
@ -751,7 +783,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
break;
case IMX6Q: /* Nothing to do */
case IMX8MM:
case IMX8MM_EP:
case IMX8MP:
case IMX8MP_EP:
break;
}
@ -800,8 +834,11 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
break;
case IMX7D:
case IMX8MQ:
case IMX8MQ_EP:
case IMX8MM:
case IMX8MM_EP:
case IMX8MP:
case IMX8MP_EP:
reset_control_deassert(imx6_pcie->apps_reset);
break;
}
@ -820,8 +857,11 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
break;
case IMX7D:
case IMX8MQ:
case IMX8MQ_EP:
case IMX8MM:
case IMX8MM_EP:
case IMX8MP:
case IMX8MP_EP:
reset_control_assert(imx6_pcie->apps_reset);
break;
}
@ -1003,8 +1043,104 @@ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = imx6_pcie_start_link,
.stop_link = imx6_pcie_stop_link,
};
static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
{
enum pci_barno bar;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
for (bar = BAR_0; bar <= BAR_5; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
return dw_pcie_ep_raise_legacy_irq(ep, func_no);
case PCI_EPC_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
case PCI_EPC_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
return -EINVAL;
}
return 0;
}
static const struct pci_epc_features imx8m_pcie_epc_features = {
.linkup_notifier = false,
.msi_capable = true,
.msix_capable = false,
.reserved_bar = 1 << BAR_1 | 1 << BAR_3,
.align = SZ_64K,
};
static const struct pci_epc_features*
imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
return &imx8m_pcie_epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = imx6_pcie_ep_init,
.raise_irq = imx6_pcie_ep_raise_irq,
.get_features = imx6_pcie_ep_get_features,
};
static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
struct platform_device *pdev)
{
int ret;
unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
struct resource *res;
struct dw_pcie *pci = imx6_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
imx6_pcie_host_init(pp);
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ_EP:
case IMX8MM_EP:
case IMX8MP_EP:
pcie_dbi2_offset = SZ_1M;
break;
default:
pcie_dbi2_offset = SZ_4K;
break;
}
pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
ep->page_size = SZ_64K;
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
return 0;
}
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
@ -1166,6 +1302,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
"pcie_inbound_axi clock missing or invalid\n");
break;
case IMX8MQ:
case IMX8MQ_EP:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
@ -1190,7 +1327,9 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
break;
case IMX8MM:
case IMX8MM_EP:
case IMX8MP:
case IMX8MP_EP:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
@ -1279,15 +1418,22 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
ret = imx6_add_pcie_ep(imx6_pcie, pdev);
if (ret < 0)
return ret;
} else {
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
if (pci_msi_enabled()) {
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
val |= PCI_MSI_FLAGS_ENABLE;
dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
if (pci_msi_enabled()) {
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
val |= PCI_MSI_FLAGS_ENABLE;
dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
}
}
return 0;
@ -1343,6 +1489,21 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mp-iomuxc-gpr",
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
},
};
static const struct of_device_id imx6_pcie_of_match[] = {
@ -1353,6 +1514,9 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
{},
};

View file

@ -583,6 +583,10 @@ static int bt1_pcie_add_port(struct bt1_pcie *btpci)
struct device *dev = &btpci->pdev->dev;
int ret;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
return ret;
btpci->dw.version = DW_PCIE_VER_460A;
btpci->dw.dev = dev;
btpci->dw.ops = &bt1_pcie_ops;

View file

@ -612,8 +612,11 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
dw_pcie_edma_remove(pci);
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
@ -785,6 +788,10 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
ret = dw_pcie_edma_detect(pci);
if (ret)
goto err_free_epc_mem;
if (ep->ops->get_features) {
epc_features = ep->ops->get_features(ep);
if (epc_features->core_init_notifier)
@ -793,10 +800,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ret = dw_pcie_ep_init_complete(ep);
if (ret)
goto err_free_epc_mem;
goto err_remove_edma;
return 0;
err_remove_edma:
dw_pcie_edma_remove(pci);
err_free_epc_mem:
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);

View file

@ -366,7 +366,17 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
dw_chained_msi_isr, pp);
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
/*
* Even though the iMSI-RX Module supports 64-bit addresses some
* peripheral PCIe devices may lack 64-bit message support. In
* order not to miss MSI TLPs from those devices the MSI target
* address has to be within the lowest 4GB.
*
* Note until there is a better alternative found the reservation is
* done by allocating from the artificially limited DMA-coherent
* memory.
*/
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret)
dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
@ -467,14 +477,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
ret = dw_pcie_setup_rc(pp);
ret = dw_pcie_edma_detect(pci);
if (ret)
goto err_free_msi;
ret = dw_pcie_setup_rc(pp);
if (ret)
goto err_remove_edma;
if (!dw_pcie_link_up(pci)) {
ret = dw_pcie_start_link(pci);
if (ret)
goto err_free_msi;
goto err_remove_edma;
}
/* Ignore errors, the link may come up later */
@ -491,6 +505,9 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
err_stop_link:
dw_pcie_stop_link(pci);
err_remove_edma:
dw_pcie_edma_remove(pci);
err_free_msi:
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
@ -512,6 +529,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
dw_pcie_stop_link(pci);
dw_pcie_edma_remove(pci);
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);

View file

@ -12,6 +12,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma/edma.h>
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
@ -142,6 +143,18 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
if (!pci->atu_size)
pci->atu_size = SZ_4K;
/* eDMA region can be mapped to a custom base address */
if (!pci->edma.reg_base) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
if (res) {
pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->edma.reg_base))
return PTR_ERR(pci->edma.reg_base);
} else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
}
}
/* LLDD is supposed to manually switch the clocks and resets state */
if (dw_pcie_cap_is(pci, REQ_RES)) {
ret = dw_pcie_get_clocks(pci);
@ -782,6 +795,188 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg)
{
u32 val = 0;
int ret;
if (pci->ops && pci->ops->read_dbi)
return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4);
ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read DMA address failed\n");
return val;
}
static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
{
struct platform_device *pdev = to_platform_device(dev);
char name[6];
int ret;
if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH)
return -EINVAL;
ret = platform_get_irq_byname_optional(pdev, "dma");
if (ret > 0)
return ret;
snprintf(name, sizeof(name), "dma%u", nr);
return platform_get_irq_byname_optional(pdev, name);
}
static struct dw_edma_core_ops dw_pcie_edma_ops = {
.irq_vector = dw_pcie_edma_irq_vector,
};
static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
{
u32 val;
/*
* Indirect eDMA CSRs access has been completely removed since v5.40a
* thus no space is now reserved for the eDMA channels viewport and
* former DMA CTRL register is no longer fixed to FFs.
*/
if (dw_pcie_ver_is_ge(pci, 540A))
val = 0xFFFFFFFF;
else
val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
if (val == 0xFFFFFFFF && pci->edma.reg_base) {
pci->edma.mf = EDMA_MF_EDMA_UNROLL;
val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
} else if (val != 0xFFFFFFFF) {
pci->edma.mf = EDMA_MF_EDMA_LEGACY;
pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE;
} else {
return -ENODEV;
}
pci->edma.dev = pci->dev;
if (!pci->edma.ops)
pci->edma.ops = &dw_pcie_edma_ops;
pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
/* Sanity check the channels count if the mapping was incorrect */
if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
!pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH)
return -EINVAL;
return 0;
}
static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
char name[6];
int ret;
if (pci->edma.nr_irqs == 1)
return 0;
else if (pci->edma.nr_irqs > 1)
return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
ret = platform_get_irq_byname_optional(pdev, "dma");
if (ret > 0) {
pci->edma.nr_irqs = 1;
return 0;
}
for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) {
snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs);
ret = platform_get_irq_byname_optional(pdev, name);
if (ret <= 0)
return -EINVAL;
}
return 0;
}
static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci)
{
struct dw_edma_region *ll;
dma_addr_t paddr;
int i;
for (i = 0; i < pci->edma.ll_wr_cnt; i++) {
ll = &pci->edma.ll_region_wr[i];
ll->sz = DMA_LLP_MEM_SIZE;
ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
&paddr, GFP_KERNEL);
if (!ll->vaddr.mem)
return -ENOMEM;
ll->paddr = paddr;
}
for (i = 0; i < pci->edma.ll_rd_cnt; i++) {
ll = &pci->edma.ll_region_rd[i];
ll->sz = DMA_LLP_MEM_SIZE;
ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
&paddr, GFP_KERNEL);
if (!ll->vaddr.mem)
return -ENOMEM;
ll->paddr = paddr;
}
return 0;
}
int dw_pcie_edma_detect(struct dw_pcie *pci)
{
int ret;
/* Don't fail if no eDMA was found (for the backward compatibility) */
ret = dw_pcie_edma_find_chip(pci);
if (ret)
return 0;
/* Don't fail on the IRQs verification (for the backward compatibility) */
ret = dw_pcie_edma_irq_verify(pci);
if (ret) {
dev_err(pci->dev, "Invalid eDMA IRQs found\n");
return 0;
}
ret = dw_pcie_edma_ll_alloc(pci);
if (ret) {
dev_err(pci->dev, "Couldn't allocate LLP memory\n");
return ret;
}
/* Don't fail if the DW eDMA driver can't find the device */
ret = dw_edma_probe(&pci->edma);
if (ret && ret != -ENODEV) {
dev_err(pci->dev, "Couldn't register eDMA device\n");
return ret;
}
dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n",
pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F",
pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt);
return 0;
}
void dw_pcie_edma_remove(struct dw_pcie *pci)
{
dw_edma_remove(&pci->edma);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;

View file

@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dma/edma.h>
#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/msi.h>
@ -31,6 +32,7 @@
#define DW_PCIE_VER_480A 0x3438302a
#define DW_PCIE_VER_490A 0x3439302a
#define DW_PCIE_VER_520A 0x3532302a
#define DW_PCIE_VER_540A 0x3534302a
#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
((_pci)->version _op DW_PCIE_VER_ ## _ver)
@ -167,6 +169,18 @@
#define PCIE_MSIX_DOORBELL 0x948
#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
/*
* eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible
* over the Port Logic registers space. Afterwards the unrolled mapping was
* introduced so eDMA and iATU could be accessed via a dedicated registers
* space.
*/
#define PCIE_DMA_VIEWPORT_BASE 0x970
#define PCIE_DMA_UNROLL_BASE 0x80000
#define PCIE_DMA_CTRL 0x008
#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0)
#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16)
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
@ -215,6 +229,7 @@
* this offset, if atu_base not set.
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
@ -226,6 +241,9 @@
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
/* Default eDMA LLP memory size */
#define DMA_LLP_MEM_SIZE PAGE_SIZE
struct dw_pcie;
struct dw_pcie_rp;
struct dw_pcie_ep;
@ -369,6 +387,7 @@ struct dw_pcie {
int num_lanes;
int link_gen;
u8 n_fts[2];
struct dw_edma_chip edma;
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
@ -408,6 +427,8 @@ int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{

View file

@ -450,4 +450,3 @@ static struct platform_driver histb_pcie_platform_driver = {
module_platform_driver(histb_pcie_platform_driver);
MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
MODULE_LICENSE("GPL v2");

View file

@ -1534,8 +1534,19 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
}
static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
qcom_ep_reset_assert(pcie);
phy_power_off(pcie->phy);
pcie->cfg->ops->deinit(pcie);
}
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.host_init = qcom_pcie_host_init,
.host_init = qcom_pcie_host_init,
.host_deinit = qcom_pcie_host_deinit,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@ -1817,6 +1828,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
@ -1826,6 +1838,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ }

View file

@ -286,6 +286,7 @@ struct tegra_pcie_dw {
struct gpio_desc *pex_refclk_sel_gpiod;
unsigned int pex_rst_irq;
int ep_state;
long link_status;
};
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
@ -449,9 +450,13 @@ static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
struct dw_pcie *pci = &pcie->pci;
u32 val, speed;
if (test_and_clear_bit(0, &pcie->link_status))
dw_pcie_ep_linkup(ep);
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
PCI_EXP_LNKSTA_CLS;
clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
@ -498,7 +503,6 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
u32 status_l0, status_l1, link_status;
@ -514,7 +518,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
link_status = appl_readl(pcie, APPL_LINK_STATUS);
if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
dev_dbg(pcie->dev, "Link is up with Host\n");
dw_pcie_ep_linkup(ep);
set_bit(0, &pcie->link_status);
return IRQ_WAKE_THREAD;
}
}

View file

@ -56,6 +56,5 @@ static struct platform_driver mobiveil_pcie_driver = {
builtin_platform_driver(mobiveil_pcie_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");

View file

@ -15,9 +15,14 @@
#include "../pci.h"
/* Device IDs */
#define DEV_PCIE_PORT_0 0x7a09
#define DEV_PCIE_PORT_1 0x7a19
#define DEV_PCIE_PORT_2 0x7a29
#define DEV_LS2K_PCIE_PORT0 0x1a05
#define DEV_LS7A_PCIE_PORT0 0x7a09
#define DEV_LS7A_PCIE_PORT1 0x7a19
#define DEV_LS7A_PCIE_PORT2 0x7a29
#define DEV_LS7A_PCIE_PORT3 0x7a39
#define DEV_LS7A_PCIE_PORT4 0x7a49
#define DEV_LS7A_PCIE_PORT5 0x7a59
#define DEV_LS7A_PCIE_PORT6 0x7a69
#define DEV_LS2K_APB 0x7a02
#define DEV_LS7A_GMAC 0x7a03
@ -53,11 +58,11 @@ static void bridge_class_quirk(struct pci_dev *dev)
dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_PCIE_PORT_0, bridge_class_quirk);
DEV_LS7A_PCIE_PORT0, bridge_class_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_PCIE_PORT_1, bridge_class_quirk);
DEV_LS7A_PCIE_PORT1, bridge_class_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_PCIE_PORT_2, bridge_class_quirk);
DEV_LS7A_PCIE_PORT2, bridge_class_quirk);
static void system_bus_quirk(struct pci_dev *pdev)
{
@ -75,37 +80,33 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_LPC, system_bus_quirk);
static void loongson_mrrs_quirk(struct pci_dev *dev)
static void loongson_mrrs_quirk(struct pci_dev *pdev)
{
struct pci_bus *bus = dev->bus;
struct pci_dev *bridge;
static const struct pci_device_id bridge_devids[] = {
{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_0) },
{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_1) },
{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_2) },
{ 0, },
};
/*
* Some Loongson PCIe ports have h/w limitations of maximum read
* request size. They can't handle anything larger than this. So
* force this limit on any devices attached under these ports.
*/
struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
/* look for the matching bridge */
while (!pci_is_root_bus(bus)) {
bridge = bus->self;
bus = bus->parent;
/*
* Some Loongson PCIe ports have a h/w limitation of
* 256 bytes maximum read request size. They can't handle
* anything larger than this. So force this limit on
* any devices attached under these ports.
*/
if (pci_match_id(bridge_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
pci_info(dev, "limiting MRRS to 256\n");
pcie_set_readrq(dev, 256);
}
break;
}
}
bridge->no_inc_mrrs = 1;
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS2K_PCIE_PORT0, loongson_mrrs_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_PCIE_PORT0, loongson_mrrs_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_PCIE_PORT1, loongson_mrrs_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_PCIE_PORT2, loongson_mrrs_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_PCIE_PORT3, loongson_mrrs_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_PCIE_PORT4, loongson_mrrs_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_PCIE_PORT5, loongson_mrrs_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_PCIE_PORT6, loongson_mrrs_quirk);
static void loongson_pci_pin_quirk(struct pci_dev *pdev)
{

View file

@ -2814,4 +2814,3 @@ static struct platform_driver tegra_pcie_driver = {
.remove = tegra_pcie_remove,
};
module_platform_driver(tegra_pcie_driver);
MODULE_LICENSE("GPL");

View file

@ -169,4 +169,3 @@ static struct platform_driver versatile_pci_driver = {
module_platform_driver(versatile_pci_driver);
MODULE_DESCRIPTION("Versatile PCI driver");
MODULE_LICENSE("GPL v2");

View file

@ -324,4 +324,3 @@ static struct platform_driver hisi_pcie_error_handler_driver = {
module_platform_driver(hisi_pcie_error_handler_driver);
MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver");
MODULE_LICENSE("GPL v2");

View file

@ -1135,6 +1135,5 @@ static struct platform_driver mc_pcie_driver = {
};
builtin_platform_driver(mc_pcie_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microchip PCIe host controller driver");
MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");

View file

@ -60,6 +60,7 @@
#define PCIE_PORT_LINKUP BIT(0)
#define PCIE_PORT_CNT 3
#define INIT_PORTS_DELAY_MS 100
#define PERST_DELAY_MS 100
/**
@ -369,6 +370,7 @@ static int mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
}
}
msleep(INIT_PORTS_DELAY_MS);
mt7621_pcie_reset_ep_deassert(pcie);
tmp = NULL;

View file

@ -66,8 +66,23 @@ enum vmd_features {
* interrupt handling.
*/
VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
/*
* Enable ASPM on the PCIE root ports and set the default LTR of the
* storage devices on platforms where these values are not configured by
* BIOS. This is needed for laptops, which require these settings for
* proper power management of the SoC.
*/
VMD_FEAT_BIOS_PM_QUIRK = (1 << 5),
};
#define VMD_BIOS_PM_QUIRK_LTR 0x1003 /* 3145728 ns */
#define VMD_FEATS_CLIENT (VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | \
VMD_FEAT_HAS_BUS_RESTRICTIONS | \
VMD_FEAT_OFFSET_FIRST_VECTOR | \
VMD_FEAT_BIOS_PM_QUIRK)
static DEFINE_IDA(vmd_instance_ida);
/*
@ -709,6 +724,46 @@ static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge,
vmd_bridge->native_dpc = root_bridge->native_dpc;
}
/*
* Enable ASPM and LTR settings on devices that aren't configured by BIOS.
*/
static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
{
unsigned long features = *(unsigned long *)userdata;
u16 ltr = VMD_BIOS_PM_QUIRK_LTR;
u32 ltr_reg;
int pos;
if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
return 0;
pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (!pos)
return 0;
/*
* Skip if the max snoop LTR is non-zero, indicating BIOS has set it
* so the LTR quirk is not needed.
*/
pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
return 0;
/*
* Set the default values to the maximum required by the platform to
* allow the deepest power management savings. Write as a DWORD where
* the lower word is the max snoop latency and the upper word is the
* max non-snoop latency.
*/
ltr_reg = (ltr << 16) | ltr;
pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
pci_info(pdev, "VMD: Default LTR value set by driver\n");
return 0;
}
static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{
struct pci_sysdata *sd = &vmd->sysdata;
@ -881,6 +936,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_assign_unassigned_bus_resources(vmd->bus);
pci_walk_bus(vmd->bus, vmd_pm_enable_quirk, &features);
/*
* VMD root buses are virtual and don't return true on pci_is_pcie()
* and will fail pcie_bus_configure_settings() early. It can instead be
@ -1017,36 +1074,24 @@ static int vmd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
static const struct pci_device_id vmd_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa77f),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_VDEVICE(INTEL, 0x467f),
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, 0x4c3d),
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, 0xa77f),
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, 0x7d0b),
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, 0xad0b),
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEATS_CLIENT,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);

View file

@ -826,33 +826,21 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
return 0;
}
static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
void *data)
static int pci_epf_test_link_up(struct pci_epf *epf)
{
struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
int ret;
switch (val) {
case CORE_INIT:
ret = pci_epf_test_core_init(epf);
if (ret)
return NOTIFY_BAD;
break;
queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
msecs_to_jiffies(1));
case LINK_UP:
queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
msecs_to_jiffies(1));
break;
default:
dev_err(&epf->dev, "Invalid EPF test notifier event\n");
return NOTIFY_BAD;
}
return NOTIFY_OK;
return 0;
}
static const struct pci_epc_event_ops pci_epf_test_event_ops = {
.core_init = pci_epf_test_core_init,
.link_up = pci_epf_test_link_up,
};
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@ -979,12 +967,8 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
epf_test->dma_supported = false;
if (linkup_notifier || core_init_notifier) {
epf->nb.notifier_call = pci_epf_test_notifier;
pci_epc_register_notifier(epc, &epf->nb);
} else {
if (!linkup_notifier && !core_init_notifier)
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
}
return 0;
}
@ -1010,6 +994,8 @@ static int pci_epf_test_probe(struct pci_epf *epf)
INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
epf->event_ops = &pci_epf_test_event_ops;
epf_set_drvdata(epf, epf_test);
return 0;
}

View file

@ -652,6 +652,7 @@ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
/**
* epf_ntb_mw_bar_clear() - Clear Memory window BARs
* @ntb: NTB device that facilitates communication between HOST and VHOST
* @num_mws: the number of Memory window BARs that to be cleared
*/
static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
{

View file

@ -728,4 +728,3 @@ module_exit(pci_ep_cfs_exit);
MODULE_DESCRIPTION("PCI EP CONFIGFS");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
MODULE_LICENSE("GPL v2");

View file

@ -613,7 +613,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
if (type == SECONDARY_INTERFACE && epf->sec_epc)
return -EBUSY;
mutex_lock(&epc->lock);
mutex_lock(&epc->list_lock);
func_no = find_first_zero_bit(&epc->function_num_map,
BITS_PER_LONG);
if (func_no >= BITS_PER_LONG) {
@ -640,7 +640,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
list_add_tail(list, &epc->pci_epf);
ret:
mutex_unlock(&epc->lock);
mutex_unlock(&epc->list_lock);
return ret;
}
@ -672,11 +672,11 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
list = &epf->sec_epc_list;
}
mutex_lock(&epc->lock);
mutex_lock(&epc->list_lock);
clear_bit(func_no, &epc->function_num_map);
list_del(list);
epf->epc = NULL;
mutex_unlock(&epc->lock);
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
@ -690,10 +690,19 @@ EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
*/
void pci_epc_linkup(struct pci_epc *epc)
{
struct pci_epf *epf;
if (!epc || IS_ERR(epc))
return;
atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
mutex_lock(&epc->list_lock);
list_for_each_entry(epf, &epc->pci_epf, list) {
mutex_lock(&epf->lock);
if (epf->event_ops && epf->event_ops->link_up)
epf->event_ops->link_up(epf);
mutex_unlock(&epf->lock);
}
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_linkup);
@ -707,10 +716,19 @@ EXPORT_SYMBOL_GPL(pci_epc_linkup);
*/
void pci_epc_init_notify(struct pci_epc *epc)
{
struct pci_epf *epf;
if (!epc || IS_ERR(epc))
return;
atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
mutex_lock(&epc->list_lock);
list_for_each_entry(epf, &epc->pci_epf, list) {
mutex_lock(&epf->lock);
if (epf->event_ops && epf->event_ops->core_init)
epf->event_ops->core_init(epf);
mutex_unlock(&epf->lock);
}
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
@ -777,8 +795,8 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
}
mutex_init(&epc->lock);
mutex_init(&epc->list_lock);
INIT_LIST_HEAD(&epc->pci_epf);
ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
device_initialize(&epc->dev);
epc->dev.class = pci_epc_class;
@ -861,4 +879,3 @@ module_exit(pci_epc_exit);
MODULE_DESCRIPTION("PCI EPC Library");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
MODULE_LICENSE("GPL v2");

View file

@ -260,4 +260,3 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
MODULE_DESCRIPTION("PCI EPC Address Space Management");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
MODULE_LICENSE("GPL v2");

View file

@ -568,4 +568,3 @@ module_exit(pci_epf_exit);
MODULE_DESCRIPTION("PCI EPF Library");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
MODULE_LICENSE("GPL v2");

View file

@ -45,7 +45,6 @@ static struct acpiphp_attention_info *attention_info;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(disable, "disable acpiphp driver");
module_param_named(disable, acpiphp_disabled, bool, 0444);

View file

@ -1088,6 +1088,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,

View file

@ -32,7 +32,6 @@ int shpchp_poll_time;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(shpchp_debug, bool, 0644);
module_param(shpchp_poll_mode, bool, 0644);

View file

@ -14,7 +14,7 @@
#include <linux/delay.h>
#include "pci.h"
#define VIRTFN_ID_LEN 16
#define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */
int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
{

View file

@ -194,11 +194,13 @@ static const struct attribute_group p2pmem_group = {
static void p2pdma_page_free(struct page *page)
{
struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
/* safe to dereference while a reference is held to the percpu ref */
struct pci_p2pdma *p2pdma =
rcu_dereference_protected(pgmap->provider->p2pdma, 1);
struct percpu_ref *ref;
gen_pool_free_owner(pgmap->provider->p2pdma->pool,
(uintptr_t)page_to_virt(page), PAGE_SIZE,
(void **)&ref);
gen_pool_free_owner(p2pdma->pool, (uintptr_t)page_to_virt(page),
PAGE_SIZE, (void **)&ref);
percpu_ref_put(ref);
}

View file

@ -976,24 +976,41 @@ bool acpi_pci_power_manageable(struct pci_dev *dev)
bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
struct pci_dev *rpdev;
struct acpi_device *adev;
acpi_status status;
unsigned long long state;
struct acpi_device *adev, *rpadev;
const union acpi_object *obj;
if (acpi_pci_disabled || !dev->is_hotplug_bridge)
return false;
/* Assume D3 support if the bridge is power-manageable by ACPI. */
if (acpi_pci_power_manageable(dev))
return true;
adev = ACPI_COMPANION(&dev->dev);
if (adev) {
/*
* If the bridge has _S0W, whether or not it can go into D3
* depends on what is returned by that object. In particular,
* if the power state returned by _S0W is D2 or shallower,
* entering D3 should not be allowed.
*/
if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2)
return false;
/*
* Otherwise, assume that the bridge can enter D3 so long as it
* is power-manageable via ACPI.
*/
if (acpi_device_power_manageable(adev))
return true;
}
rpdev = pcie_find_root_port(dev);
if (!rpdev)
return false;
adev = ACPI_COMPANION(&rpdev->dev);
if (!adev)
if (rpdev == dev)
rpadev = adev;
else
rpadev = ACPI_COMPANION(&rpdev->dev);
if (!rpadev)
return false;
/*
@ -1001,15 +1018,15 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
* doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
* events from low-power states including D3hot and D3cold.
*/
if (!adev->wakeup.flags.valid)
if (!rpadev->wakeup.flags.valid)
return false;
/*
* If the Root Port cannot wake itself from D3hot or D3cold, we
* can't use D3.
* In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port
* to verify whether or not it can signal wakeup from D3.
*/
status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
if (rpadev != adev &&
acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2)
return false;
/*
@ -1018,7 +1035,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
* bridges *below* that Root Port can also signal hotplug events
* while in D3.
*/
if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3",
ACPI_TYPE_INTEGER, &obj) &&
obj->integer.value == 1)
return true;

View file

@ -572,7 +572,7 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
{
pci_bridge_wait_for_secondary_bus(pci_dev);
pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT);
/*
* When powering on a bridge from D3cold, the whole hierarchy may be
* powered on into D0uninitialized state, resume them to give them a

View file

@ -167,9 +167,6 @@ static int __init pcie_port_pm_setup(char *str)
}
__setup("pcie_port_pm=", pcie_port_pm_setup);
/* Time to wait after a reset for device to become responsive */
#define PCIE_RESET_READY_POLL_MS 60000
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@ -1174,7 +1171,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
return -ENOTTY;
}
if (delay > 1000)
if (delay > PCI_RESET_WAIT)
pci_info(dev, "not ready %dms after %s; waiting\n",
delay - 1, reset_type);
@ -1183,7 +1180,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
pci_read_config_dword(dev, PCI_COMMAND, &id);
}
if (delay > 1000)
if (delay > PCI_RESET_WAIT)
pci_info(dev, "ready %dms after %s\n", delay - 1,
reset_type);
@ -4941,24 +4938,31 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
/**
* pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
* @dev: PCI bridge
* @reset_type: reset type in human-readable form
* @timeout: maximum time to wait for devices on secondary bus (milliseconds)
*
* Handle necessary delays before access to the devices on the secondary
* side of the bridge are permitted after D3cold to D0 transition.
* side of the bridge are permitted after D3cold to D0 transition
* or Conventional Reset.
*
* For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
* conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
* 4.3.2.
*
* Return 0 on success or -ENOTTY if the first device on the secondary bus
* failed to become accessible.
*/
void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
int timeout)
{
struct pci_dev *child;
int delay;
if (pci_dev_is_disconnected(dev))
return;
return 0;
if (!pci_is_bridge(dev) || !dev->bridge_d3)
return;
if (!pci_is_bridge(dev))
return 0;
down_read(&pci_bus_sem);
@ -4970,14 +4974,14 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
*/
if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
up_read(&pci_bus_sem);
return;
return 0;
}
/* Take d3cold_delay requirements into account */
delay = pci_bus_max_d3cold_delay(dev->subordinate);
if (!delay) {
up_read(&pci_bus_sem);
return;
return 0;
}
child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
@ -4986,14 +4990,12 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
/*
* Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
* accessing the device after reset (that is 1000 ms + 100 ms). In
* practice this should not be needed because we don't do power
* management for them (see pci_bridge_d3_possible()).
* accessing the device after reset (that is 1000 ms + 100 ms).
*/
if (!pci_is_pcie(dev)) {
pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
msleep(1000 + delay);
return;
return 0;
}
/*
@ -5010,11 +5012,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
* configuration requests if we only wait for 100 ms (see
* https://bugzilla.kernel.org/show_bug.cgi?id=203885).
*
* Therefore we wait for 100 ms and check for the device presence.
* If it is still not present give it an additional 100 ms.
* Therefore we wait for 100 ms and check for the device presence
* until the timeout expires.
*/
if (!pcie_downstream_port(dev))
return;
return 0;
if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
@ -5025,14 +5027,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
return;
return -ENOTTY;
}
}
if (!pci_device_is_present(child)) {
pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
msleep(delay);
}
return pci_dev_wait(child, reset_type, timeout - delay);
}
void pci_reset_secondary_bus(struct pci_dev *dev)
@ -5051,15 +5050,6 @@ void pci_reset_secondary_bus(struct pci_dev *dev)
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
/*
* Trhfa for conventional PCI is 2^25 clock cycles.
* Assuming a minimum 33MHz clock this results in a 1s
* delay before we can consider subordinate devices to
* be re-initialized. PCIe has some ways to shorten this,
* but we don't make use of them yet.
*/
ssleep(1);
}
void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
@ -5078,7 +5068,8 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
pcibios_reset_secondary_bus(dev);
return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
return pci_bridge_wait_for_secondary_bus(dev, "bus reset",
PCIE_RESET_READY_POLL_MS);
}
EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
@ -6026,6 +6017,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
int ret;
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
return -EINVAL;
@ -6044,6 +6036,15 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
v = (ffs(rq) - 8) << 12;
if (bridge->no_inc_mrrs) {
int max_mrrs = pcie_get_readrq(dev);
if (rq > max_mrrs) {
pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
return -EINVAL;
}
}
ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_READRQ, v);

View file

@ -64,6 +64,19 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
#define PCI_PM_D3HOT_WAIT 10 /* msec */
#define PCI_PM_D3COLD_WAIT 100 /* msec */
/*
* Following exit from Conventional Reset, devices must be ready within 1 sec
* (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional
* Reset (PCIe r6.0 sec 5.8).
*/
#define PCI_RESET_WAIT 1000 /* msec */
/*
* Devices may extend the 1 sec period through Request Retry Status completions
* (PCIe r6.0 sec 2.3.1). The spec does not provide an upper limit, but 60 sec
* ought to be enough for any device to become responsive.
*/
#define PCIE_RESET_READY_POLL_MS 60000 /* msec */
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
void pci_refresh_power_state(struct pci_dev *dev);
int pci_power_up(struct pci_dev *dev);
@ -86,8 +99,9 @@ void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
int timeout);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@ -310,53 +324,36 @@ struct pci_sriov {
* @dev: PCI device to set new error_state
* @new: the state we want dev to be in
*
* Must be called with device_lock held.
* If the device is experiencing perm_failure, it has to remain in that state.
* Any other transition is allowed.
*
* Returns true if state has been changed to the requested state.
*/
static inline bool pci_dev_set_io_state(struct pci_dev *dev,
pci_channel_state_t new)
{
bool changed = false;
pci_channel_state_t old;
device_lock_assert(&dev->dev);
switch (new) {
case pci_channel_io_perm_failure:
switch (dev->error_state) {
case pci_channel_io_frozen:
case pci_channel_io_normal:
case pci_channel_io_perm_failure:
changed = true;
break;
}
break;
xchg(&dev->error_state, pci_channel_io_perm_failure);
return true;
case pci_channel_io_frozen:
switch (dev->error_state) {
case pci_channel_io_frozen:
case pci_channel_io_normal:
changed = true;
break;
}
break;
old = cmpxchg(&dev->error_state, pci_channel_io_normal,
pci_channel_io_frozen);
return old != pci_channel_io_perm_failure;
case pci_channel_io_normal:
switch (dev->error_state) {
case pci_channel_io_frozen:
case pci_channel_io_normal:
changed = true;
break;
}
break;
old = cmpxchg(&dev->error_state, pci_channel_io_frozen,
pci_channel_io_normal);
return old != pci_channel_io_perm_failure;
default:
return false;
}
if (changed)
dev->error_state = new;
return changed;
}
static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
{
device_lock(&dev->dev);
pci_dev_set_io_state(dev, pci_channel_io_perm_failure);
device_unlock(&dev->dev);
return 0;
}

View file

@ -184,6 +184,9 @@ static int disable_ecrc_checking(struct pci_dev *dev)
*/
void pcie_set_ecrc_checking(struct pci_dev *dev)
{
if (!pcie_aer_is_native(dev))
return;
switch (ecrc_policy) {
case ECRC_POLICY_DEFAULT:
return;
@ -1224,42 +1227,6 @@ static irqreturn_t aer_irq(int irq, void *context)
return IRQ_WAKE_THREAD;
}
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
int type = pci_pcie_type(dev);
if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
(type == PCI_EXP_TYPE_RC_EC) ||
(type == PCI_EXP_TYPE_UPSTREAM) ||
(type == PCI_EXP_TYPE_DOWNSTREAM)) {
if (enable)
pci_enable_pcie_error_reporting(dev);
else
pci_disable_pcie_error_reporting(dev);
}
return 0;
}
/**
* set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
* @dev: pointer to root port's pci_dev data structure
* @enable: true = enable error reporting, false = disable error reporting.
*/
static void set_downstream_devices_error_reporting(struct pci_dev *dev,
bool enable)
{
set_device_error_reporting(dev, &enable);
if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC)
pcie_walk_rcec(dev, set_device_error_reporting, &enable);
else if (dev->subordinate)
pci_walk_bus(dev->subordinate, set_device_error_reporting,
&enable);
}
/**
* aer_enable_rootport - enable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
@ -1289,12 +1256,6 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, &reg32);
pci_write_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, reg32);
/*
* Enable error reporting for the root port device and downstream port
* devices.
*/
set_downstream_devices_error_reporting(pdev, true);
/* Enable Root Port's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
@ -1313,12 +1274,6 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
int aer = pdev->aer_cap;
u32 reg32;
/*
* Disable error reporting for the root port device and downstream port
* devices.
*/
set_downstream_devices_error_reporting(pdev, false);
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;

View file

@ -1138,6 +1138,60 @@ int pci_disable_link_state(struct pci_dev *pdev, int state)
}
EXPORT_SYMBOL(pci_disable_link_state);
/**
* pci_enable_link_state - Clear and set the default device link state so that
* the link may be allowed to enter the specified states. Note that if the
* BIOS didn't grant ASPM control to the OS, this does nothing because we can't
* touch the LNKCTL register. Also note that this does not enable states
* disabled by pci_disable_link_state(). Return 0 or a negative errno.
*
* @pdev: PCI device
* @state: Mask of ASPM link states to enable
*/
int pci_enable_link_state(struct pci_dev *pdev, int state)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
if (!link)
return -EINVAL;
/*
* A driver requested that ASPM be enabled on this device, but
* if we don't have permission to manage ASPM (e.g., on ACPI
* systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
* the _OSC method), we can't honor that request.
*/
if (aspm_disabled) {
pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n");
return -EPERM;
}
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link->aspm_default = 0;
if (state & PCIE_LINK_STATE_L0S)
link->aspm_default |= ASPM_STATE_L0S;
if (state & PCIE_LINK_STATE_L1)
/* L1 PM substates require L1 */
link->aspm_default |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
if (state & PCIE_LINK_STATE_L1_1)
link->aspm_default |= ASPM_STATE_L1_1;
if (state & PCIE_LINK_STATE_L1_2)
link->aspm_default |= ASPM_STATE_L1_2;
if (state & PCIE_LINK_STATE_L1_1_PCIPM)
link->aspm_default |= ASPM_STATE_L1_1_PCIPM;
if (state & PCIE_LINK_STATE_L1_2_PCIPM)
link->aspm_default |= ASPM_STATE_L1_2_PCIPM;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
return 0;
}
EXPORT_SYMBOL(pci_enable_link_state);
static int pcie_aspm_set_policy(const char *val,
const struct kernel_param *kp)
{

View file

@ -170,8 +170,8 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
if (!pcie_wait_for_link(pdev, true)) {
pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
if (pci_bridge_wait_for_secondary_bus(pdev, "DPC",
PCIE_RESET_READY_POLL_MS)) {
clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
ret = PCI_ERS_RESULT_DISCONNECT;
} else {

View file

@ -501,7 +501,6 @@ static void pcie_port_device_remove(struct pci_dev *dev)
{
device_for_each_child(&dev->dev, NULL, remove_iter);
pci_free_irq_vectors(dev);
pci_disable_device(dev);
}
/**
@ -727,6 +726,19 @@ static void pcie_portdrv_remove(struct pci_dev *dev)
}
pcie_port_device_remove(dev);
pci_disable_device(dev);
}
static void pcie_portdrv_shutdown(struct pci_dev *dev)
{
if (pci_bridge_d3_possible(dev)) {
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_dont_use_autosuspend(&dev->dev);
}
pcie_port_device_remove(dev);
}
static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
@ -777,7 +789,7 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
.shutdown = pcie_portdrv_remove,
.shutdown = pcie_portdrv_shutdown,
.err_handler = &pcie_portdrv_err_handler,

View file

@ -996,7 +996,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
resource_list_for_each_entry_safe(window, n, &resources) {
offset = window->offset;
res = window->res;
if (!res->end)
if (!res->flags && !res->start && !res->end)
continue;
list_move_tail(&window->node, &bridge->windows);
@ -1841,6 +1841,8 @@ int pci_setup_device(struct pci_dev *dev)
pci_set_of_node(dev);
pci_set_acpi_fwnode(dev);
if (dev->dev.fwnode && !fwnode_device_is_available(dev->dev.fwnode))
return -ENODEV;
pci_dev_assign_slot(dev);

View file

@ -4835,6 +4835,26 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
* Wangxun 10G/1G NICs have no ACS capability, and on multi-function
* devices, peer-to-peer transactions are not be used between the functions.
* So add an ACS quirk for below devices to isolate functions.
* SFxxx 1G NICs(em).
* RP1000/RP2000 10G NICs(sp).
*/
static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
{
switch (dev->device) {
case 0x0100 ... 0x010F:
case 0x1001:
case 0x2001:
return pci_acs_ctrl_enabled(acs_flags,
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
return false;
}
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
@ -4980,6 +5000,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
/* Zhaoxin Root/Downstream Ports */
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
/* Wangxun nics */
{ PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs },
{ 0 }
};
@ -5340,6 +5362,7 @@ static void quirk_no_flr(struct pci_dev *dev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);

View file

@ -1765,12 +1765,70 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
add_size = size - new_size;
pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
&add_size);
} else {
return;
}
res->end = res->start + new_size - 1;
remove_from_list(add_list, res);
/* If the resource is part of the add_list, remove it now */
if (add_list)
remove_from_list(add_list, res);
}
static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
struct resource *res)
{
resource_size_t size, align, tmp;
size = resource_size(res);
if (!size)
return;
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(avail->start, align) - avail->start : 0;
tmp = align + size;
avail->start = min(avail->start + tmp, avail->end + 1);
}
static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
struct resource *mmio,
struct resource *mmio_pref)
{
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if (resource_type(res) == IORESOURCE_IO) {
remove_dev_resource(io, dev, res);
} else if (resource_type(res) == IORESOURCE_MEM) {
/*
* Make sure prefetchable memory is reduced from
* the correct resource. Specifically we put 32-bit
* prefetchable memory in non-prefetchable window
* if there is an 64-bit pretchable window.
*
* See comments in __pci_bus_size_bridges() for
* more information.
*/
if ((res->flags & IORESOURCE_PREFETCH) &&
((res->flags & IORESOURCE_MEM_64) ==
(mmio_pref->flags & IORESOURCE_MEM_64)))
remove_dev_resource(mmio_pref, dev, res);
else
remove_dev_resource(mmio, dev, res);
}
}
}
/*
* io, mmio and mmio_pref contain the total amount of bridge window space
* available. This includes the minimal space needed to cover all the
* existing devices on the bus and the possible extra space that can be
* shared with the bridges.
*/
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
struct list_head *add_list,
struct resource io,
@ -1780,7 +1838,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
unsigned int normal_bridges = 0, hotplug_bridges = 0;
struct resource *io_res, *mmio_res, *mmio_pref_res;
struct pci_dev *dev, *bridge = bus->self;
resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
@ -1824,94 +1882,88 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
normal_bridges++;
}
/*
* There is only one bridge on the bus so it gets all available
* resources which it can then distribute to the possible hotplug
* bridges below.
*/
if (hotplug_bridges + normal_bridges == 1) {
dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
if (dev->subordinate)
pci_bus_distribute_available_resources(dev->subordinate,
add_list, io, mmio, mmio_pref);
if (!(hotplug_bridges + normal_bridges))
return;
/*
* Calculate the amount of space we can forward from "bus" to any
* downstream buses, i.e., the space left over after assigning the
* BARs and windows on "bus".
*/
list_for_each_entry(dev, &bus->devices, bus_list) {
if (!dev->is_virtfn)
remove_dev_resources(dev, &io, &mmio, &mmio_pref);
}
if (hotplug_bridges == 0)
return;
/*
* Calculate the total amount of extra resource space we can
* pass to bridges below this one. This is basically the
* extra space reduced by the minimal required space for the
* non-hotplug bridges.
* If there is at least one hotplug bridge on this bus it gets all
* the extra resource space that was left after the reductions
* above.
*
* If there are no hotplug bridges the extra resource space is
* split between non-hotplug bridges. This is to allow possible
* hotplug bridges below them to get the extra space as well.
*/
if (hotplug_bridges) {
io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
hotplug_bridges);
} else {
io_per_b = div64_ul(resource_size(&io), normal_bridges);
mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
normal_bridges);
}
for_each_pci_bridge(dev, bus) {
resource_size_t used_size;
struct resource *res;
if (dev->is_hotplug_bridge)
continue;
/*
* Reduce the available resource space by what the
* bridge and devices below it occupy.
*/
res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(io.start, align) - io.start : 0;
used_size = align + resource_size(res);
if (!res->parent)
io.start = min(io.start + used_size, io.end + 1);
res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
used_size = align + resource_size(res);
if (!res->parent)
mmio.start = min(mmio.start + used_size, mmio.end + 1);
res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(mmio_pref.start, align) -
mmio_pref.start : 0;
used_size = align + resource_size(res);
if (!res->parent)
mmio_pref.start = min(mmio_pref.start + used_size,
mmio_pref.end + 1);
}
io_per_hp = div64_ul(resource_size(&io), hotplug_bridges);
mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges);
mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref),
hotplug_bridges);
/*
* Go over devices on this bus and distribute the remaining
* resource space between hotplug bridges.
*/
for_each_pci_bridge(dev, bus) {
struct pci_bus *b;
b = dev->subordinate;
if (!b || !dev->is_hotplug_bridge)
if (!b)
continue;
if (hotplug_bridges && !dev->is_hotplug_bridge)
continue;
res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
/*
* Distribute available extra resources equally between
* hotplug-capable downstream ports taking alignment into
* account.
* Make sure the split resource space is properly aligned
* for bridge windows (align it down to avoid going above
* what is available).
*/
io.end = io.start + io_per_hp - 1;
mmio.end = mmio.start + mmio_per_hp - 1;
mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1;
align = pci_resource_alignment(dev, res);
io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1
: io.start + io_per_b - 1;
/*
* The x_per_b holds the extra resource space that can be
* added for each bridge but there is the minimal already
* reserved as well so adjust x.start down accordingly to
* cover the whole space.
*/
io.start -= resource_size(res);
res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1
: mmio.start + mmio_per_b - 1;
mmio.start -= resource_size(res);
res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
mmio_pref.end = align ? mmio_pref.start +
ALIGN_DOWN(mmio_pref_per_b, align) - 1
: mmio_pref.start + mmio_pref_per_b - 1;
mmio_pref.start -= resource_size(res);
pci_bus_distribute_available_resources(b, add_list, io, mmio,
mmio_pref);
io.start += io_per_hp;
mmio.start += mmio_per_hp;
mmio_pref.start += mmio_pref_per_hp;
io.start += io.end + 1;
mmio.start += mmio.end + 1;
mmio_pref.start += mmio_pref.end + 1;
}
}
@ -1923,6 +1975,8 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
if (!bridge->is_hotplug_bridge)
return;
pci_dbg(bridge, "distributing available resources\n");
/* Take the initial extra resources from the hotplug port */
available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
@ -1934,6 +1988,54 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
available_mmio_pref);
}
static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
{
const struct resource *r;
/*
* If the child device's resources are not yet assigned it means we
* are configuring them (not the boot firmware), so we should be
* able to extend the upstream bridge resources in the same way we
* do with the normal hotplug case.
*/
r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
return false;
r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
return false;
r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
return false;
return true;
}
static void
pci_root_bus_distribute_available_resources(struct pci_bus *bus,
struct list_head *add_list)
{
struct pci_dev *dev, *bridge = bus->self;
for_each_pci_bridge(dev, bus) {
struct pci_bus *b;
b = dev->subordinate;
if (!b)
continue;
/*
* Need to check "bridge" here too because it is NULL
* in case of root bus.
*/
if (bridge && pci_bridge_resources_not_assigned(dev))
pci_bridge_distribute_available_resources(bridge,
add_list);
else
pci_root_bus_distribute_available_resources(b, add_list);
}
}
/*
* First try will not touch PCI bridge res.
* Second and later try will clear small leaf bridge res.
@ -1973,6 +2075,8 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
*/
__pci_bus_size_bridges(bus, add_list);
pci_root_bus_distribute_available_resources(bus, add_list);
/* Depth last, allocate resources and update the hardware. */
__pci_bus_assign_resources(bus, add_list, &fail_head);
if (add_list)

View file

@ -98,7 +98,7 @@ static struct attribute *pci_slot_default_attrs[] = {
};
ATTRIBUTE_GROUPS(pci_slot_default);
static struct kobj_type pci_slot_ktype = {
static const struct kobj_type pci_slot_ktype = {
.sysfs_ops = &pci_slot_sysfs_ops,
.release = &pci_slot_release,
.default_groups = pci_slot_default_groups,

View file

@ -606,21 +606,20 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
rc = copy_to_user(data, &stuser->return_code,
sizeof(stuser->return_code));
if (rc) {
rc = -EFAULT;
goto out;
mutex_unlock(&stdev->mrpc_mutex);
return -EFAULT;
}
data += sizeof(stuser->return_code);
rc = copy_to_user(data, &stuser->data,
size - sizeof(stuser->return_code));
if (rc) {
rc = -EFAULT;
goto out;
mutex_unlock(&stdev->mrpc_mutex);
return -EFAULT;
}
stuser_set_state(stuser, MRPC_IDLE);
out:
mutex_unlock(&stdev->mrpc_mutex);
if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE ||
@ -1480,15 +1479,13 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
{
struct switchtec_dev *stdev = dev;
irqreturn_t ret = IRQ_NONE;
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr);
schedule_work(&stdev->mrpc_work);
ret = IRQ_HANDLED;
return ret;
return IRQ_HANDLED;
}
static int switchtec_init_isr(struct switchtec_dev *stdev)

View file

@ -534,6 +534,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p);
int acpi_device_update_power(struct acpi_device *device, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
void acpi_dev_power_up_children_with_adr(struct acpi_device *adev);
u8 acpi_dev_power_state_for_wake(struct acpi_device *adev);
int acpi_device_power_add_dependent(struct acpi_device *adev,
struct device *dev);
void acpi_device_power_remove_dependent(struct acpi_device *adev,

View file

@ -18,13 +18,31 @@
struct dw_edma;
struct dw_edma_region {
phys_addr_t paddr;
void __iomem *vaddr;
u64 paddr;
union {
void *mem;
void __iomem *io;
} vaddr;
size_t sz;
};
/**
* struct dw_edma_core_ops - platform-specific eDMA methods
* @irq_vector: Get IRQ number of the passed eDMA channel. Note the
* method accepts the channel id in the end-to-end
* numbering with the eDMA write channels being placed
* first in the row.
* @pci_address: Get PCIe bus address corresponding to the passed CPU
* address. Note there is no need in specifying this
* function if the address translation is performed by
* the DW PCIe RP/EP controller with the DW eDMA device in
* subject and DMA_BYPASS isn't set for all the outbound
* iATU windows. That will be done by the controller
* automatically.
*/
struct dw_edma_core_ops {
int (*irq_vector)(struct device *dev, unsigned int nr);
u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
};
enum dw_edma_map_format {
@ -61,7 +79,6 @@ enum dw_edma_chip_flags {
*/
struct dw_edma_chip {
struct device *dev;
int id;
int nr_irqs;
const struct dw_edma_core_ops *ops;
u32 flags;
@ -84,7 +101,7 @@ struct dw_edma_chip {
};
/* Export to the platform drivers */
#if IS_ENABLED(CONFIG_DW_EDMA)
#if IS_REACHABLE(CONFIG_DW_EDMA)
int dw_edma_probe(struct dw_edma_chip *chip);
int dw_edma_remove(struct dw_edma_chip *chip);
#else

View file

@ -394,7 +394,7 @@ enum dma_slave_buswidth {
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
* should be written (TX), if the source is memory this argument
* should be written (TX), if the destination is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source

View file

@ -122,6 +122,7 @@ struct pci_epc_mem {
* struct pci_epc - represents the PCI EPC device
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
* list_lock: Mutex for protecting pci_epf list
* @ops: function pointers for performing endpoint operations
* @windows: array of address space of the endpoint controller
* @mem: first window of the endpoint controller, which corresponds to
@ -134,11 +135,11 @@ struct pci_epc_mem {
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
* @notifier: used to notify EPF of any EPC events (like linkup)
*/
struct pci_epc {
struct device dev;
struct list_head pci_epf;
struct mutex list_lock;
const struct pci_epc_ops *ops;
struct pci_epc_mem **windows;
struct pci_epc_mem *mem;
@ -149,7 +150,6 @@ struct pci_epc {
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
struct atomic_notifier_head notifier;
};
/**
@ -192,12 +192,6 @@ static inline void *epc_get_drvdata(struct pci_epc *epc)
return dev_get_drvdata(&epc->dev);
}
static inline int
pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
{
return atomic_notifier_chain_register(&epc->notifier, nb);
}
struct pci_epc *
__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);

View file

@ -17,11 +17,6 @@
struct pci_epf;
enum pci_epc_interface_type;
enum pci_notify_event {
CORE_INIT,
LINK_UP,
};
enum pci_barno {
NO_BAR = -1,
BAR_0,
@ -72,6 +67,16 @@ struct pci_epf_ops {
struct config_group *group);
};
/**
* struct pci_epf_event_ops - Callbacks for capturing the EPC events
* @core_init: Callback for the EPC initialization complete event
* @link_up: Callback for the EPC link up event
*/
struct pci_epc_event_ops {
int (*core_init)(struct pci_epf *epf);
int (*link_up)(struct pci_epf *epf);
};
/**
* struct pci_epf_driver - represents the PCI EPF driver
* @probe: ops to perform when a new EPF device has been bound to the EPF driver
@ -127,7 +132,6 @@ struct pci_epf_bar {
* @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @nb: notifier block to notify EPF of any EPC events (like linkup)
* @lock: mutex to protect pci_epf_ops
* @sec_epc: the secondary EPC device to which this EPF device is bound
* @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
@ -139,6 +143,7 @@ struct pci_epf_bar {
* @is_vf: true - virtual function, false - physical function
* @vfunction_num_map: bitmap to manage virtual function number
* @pci_vepf: list of virtual endpoint functions associated with this function
* @event_ops: Callbacks for capturing the EPC events
*/
struct pci_epf {
struct device dev;
@ -154,7 +159,6 @@ struct pci_epf {
struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
struct list_head list;
struct notifier_block nb;
/* mutex to protect against concurrent access of pci_epf_ops */
struct mutex lock;
@ -168,6 +172,7 @@ struct pci_epf {
unsigned int is_vf;
unsigned long vfunction_num_map;
struct list_head pci_vepf;
const struct pci_epc_event_ops *event_ops;
};
/**

View file

@ -572,6 +572,7 @@ struct pci_host_bridge {
void *release_data;
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
unsigned int no_inc_mrrs:1; /* No Increase MRRS */
unsigned int native_aer:1; /* OS may use PCIe AER */
unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
@ -1697,10 +1698,15 @@ extern bool pcie_ports_native;
#define PCIE_LINK_STATE_L1_2 BIT(4)
#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5)
#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6)
#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\
PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\
PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\
PCIE_LINK_STATE_L1_2_PCIPM)
#ifdef CONFIG_PCIEASPM
int pci_disable_link_state(struct pci_dev *pdev, int state);
int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
int pci_enable_link_state(struct pci_dev *pdev, int state);
void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
bool pcie_aspm_enabled(struct pci_dev *pdev);
@ -1709,6 +1715,8 @@ static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
{ return 0; }
static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{ return 0; }
static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
{ return 0; }
static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }

View file

@ -3012,6 +3012,8 @@
#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
#define PCI_VENDOR_ID_WANGXUN 0x8088
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010