Merge branches 'pci/host-armada', 'pci/host-designware', 'pci/host-hv', 'pci/host-imx6', 'pci/host-keystone', 'pci/host-mvebu', 'pci/host-rcar', 'pci/host-thunder' and 'pci/host-vmd' into next

* pci/host-armada:
  PCI: armada: Add driver for Marvell Armada 7K/8K PCIe controller
  dt-bindings: pci: add DT binding for Marvell Armada 7K/8K PCIe controller

* pci/host-designware:
  PCI: designware: Remove incorrect RC memory base/limit configuration
  PCI: designware: Move Root Complex setup code to dw_pcie_setup_rc()

* pci/host-hv:
  PCI: hv: Report resources release after stopping the bus

* pci/host-imx6:
  ARM: dts: imx6qp: Specify imx6qp version of PCIe core
  PCI: imx6: Implement reset sequence for i.MX6+
  PCI: imx6: Use enum instead of bool for variant indicator
  PCI: imx6: Add DT property for link gen, default to Gen1
  PCI: imx6: Add reset-gpio-active-high boolean property to DT
  ARM: dts: imx6: Fix PCIe reset GPIO polarity on Toradex Apalis Ixora
  PCI: imx6: Add initial imx6sx support
  PCI: imx6: Factor out ref clock enable
  Revert "PCI: imx6: Add support for active-low reset GPIO"

* pci/host-keystone:
  PCI: keystone: Remove unnecessary goto statement
  PCI: keystone: Add error IRQ handler

* pci/host-mvebu:
  PCI: mvebu: Use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS for mvebu_pcie_pm_ops
  PCI: mvebu: Constify mvebu_pcie_pm_ops structure

* pci/host-rcar:
  PCI: rcar: Select PCI_MSI_IRQ_DOMAIN

* pci/host-thunder:
  PCI: thunder: Don't clobber read-only bits in bridge config registers

* pci/host-vmd:
  PCI: Remove return values from pcie_port_platform_notify() and relatives
  PCI/ACPI: Allow all PCIe services on non-ACPI host bridges
This commit is contained in:
Bjorn Helgaas 2016-05-03 11:42:30 -05:00
21 changed files with 674 additions and 132 deletions

View file

@ -4,8 +4,8 @@ This PCIe host controller is based on the Synopsis Designware PCIe IP
and thus inherits all the common properties defined in designware-pcie.txt.
Required properties:
- compatible: "fsl,imx6q-pcie"
- reg: base addresse and length of the pcie controller
- compatible: "fsl,imx6q-pcie", "fsl,imx6sx-pcie", "fsl,imx6qp-pcie"
- reg: base address and length of the PCIe controller
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
- interrupt-names: Must include the following entries:
@ -19,6 +19,20 @@ Optional properties:
- fsl,tx-deemph-gen2-6db: Gen2 (6db) De-emphasis value. Default: 20
- fsl,tx-swing-full: Gen2 TX SWING FULL value. Default: 127
- fsl,tx-swing-low: TX launch amplitude swing_low value. Default: 127
- fsl,max-link-speed: Specify PCI gen for link capability. Must be '2' for
gen2, otherwise will default to gen1. Note that the IMX6 LVDS clock outputs
do not meet gen2 jitter requirements and thus for gen2 capability a gen2
compliant clock generator should be used and configured.
- reset-gpio: Should specify the GPIO for controlling the PCI bus device reset
signal. It's not polarity aware and defaults to active-low reset sequence
(L=reset state, H=operation state).
- reset-gpio-active-high: If present then the reset sequence using the GPIO
specified in the "reset-gpio" property is reversed (H=reset state,
L=operation state).
Additional required properties for imx6sx-pcie:
- clock names: Must include the following additional entries:
- "pcie_inbound_axi"
Example:

View file

@ -0,0 +1,38 @@
* Marvell Armada 7K/8K PCIe interface
This PCIe host controller is based on the Synopsis Designware PCIe IP
and thus inherits all the common properties defined in designware-pcie.txt.
Required properties:
- compatible: "marvell,armada8k-pcie"
- reg: must contain two register regions
- the control register region
- the config space region
- reg-names:
- "ctrl" for the control register region
- "config" for the config space region
- interrupts: Interrupt specifier for the PCIe controler
- clocks: reference to the PCIe controller clock
Example:
pcie@f2600000 {
compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
reg = <0 0xf2600000 0 0x10000>, <0 0xf6f00000 0 0x80000>;
reg-names = "ctrl", "config";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
device_type = "pci";
dma-coherent;
bus-range = <0 0xff>;
ranges = <0x81000000 0 0xf9000000 0 0xf9000000 0 0x10000 /* downstream I/O */
0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>; /* non-prefetchable memory */
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
clocks = <&cpm_syscon0 1 13>;
status = "disabled";
};

View file

@ -56,6 +56,7 @@ Optional properties:-
phy-names: name of the Generic Keystine SerDes phy for PCI
- If boot loader already does PCI link establishment, then phys and
phy-names shouldn't be present.
interrupts: platform interrupt for error interrupts.
Designware DT Properties not applicable for Keystone PCI

View file

@ -174,8 +174,9 @@ &ldb {
};
&pcie {
/* active-low meaning opposite of regular PERST# active-low polarity */
reset-gpio = <&gpio1 28 GPIO_ACTIVE_LOW>;
/* active-high meaning opposite of regular PERST# active-low polarity */
reset-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
reset-gpio-active-high;
status = "okay";
};

View file

@ -82,5 +82,8 @@ ipu2: ipu@02800000 {
"ldb_di0", "ldb_di1", "prg";
};
pcie: pcie@0x01000000 {
compatible = "fsl,imx6qp-pcie", "snps,dw-pcie";
};
};
};

View file

@ -72,6 +72,8 @@ config PCI_RCAR_GEN2
config PCI_RCAR_GEN2_PCIE
bool "Renesas R-Car PCIe controller"
depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
select PCI_MSI
select PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
@ -231,4 +233,15 @@ config PCI_HOST_THUNDER_ECAM
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
depends on ARCH_MVEBU
select PCIE_DW
select PCIEPORTBUS
help
Say Y here if you want to enable PCIe controller support on
Armada-8K SoCs. The PCIe controller on Armada-8K is based on
Designware hardware and therefore the driver re-uses the
Designware core functions to implement the driver.
endmenu

View file

@ -28,3 +28,4 @@ obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o

View file

@ -142,13 +142,13 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
dw_pcie_setup_rc(pp);
pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
dw_pcie_setup_rc(pp);
dra7xx_pcie_establish_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);

View file

@ -2268,11 +2268,6 @@ static int hv_pci_remove(struct hv_device *hdev)
hbus = hv_get_drvdata(hdev);
ret = hv_send_resources_released(hdev);
if (ret)
dev_err(&hdev->device,
"Couldn't send resources released packet(s)\n");
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
@ -2295,6 +2290,11 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_unlock_rescan_remove();
}
ret = hv_send_resources_released(hdev);
if (ret)
dev_err(&hdev->device,
"Couldn't send resources released packet(s)\n");
vmbus_close(hdev->channel);
/* Delete any children which might still exist. */

View file

@ -19,6 +19,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@ -31,19 +32,29 @@
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
enum imx6_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
};
struct imx6_pcie {
struct gpio_desc *reset_gpio;
int reset_gpio;
bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
struct clk *pcie;
struct pcie_port pp;
struct regmap *iomuxc_gpr;
enum imx6_pcie_variants variant;
void __iomem *mem_base;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
int link_gen;
};
/* PCIe Root Complex registers (memory-mapped) */
@ -236,39 +247,95 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
u32 val, gpr1, gpr12;
/*
* If the bootloader already enabled the link we need some special
* handling to get the core back into a state where it is safe to
* touch it for configuration. As there is no dedicated reset signal
* wired up for MX6QDL, we need to manually force LTSSM into "detect"
* state before completely disabling LTSSM, which is a prerequisite
* for core configuration.
*
* If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
* indication that the bootloader activated the link.
*/
regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
(gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
val = readl(pp->dbi_base + PCIE_PL_PFLR);
val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
val |= PCIE_PL_PFLR_FORCE_LINK;
writel(val, pp->dbi_base + PCIE_PL_PFLR);
switch (imx6_pcie->variant) {
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
/* Force PCIe PHY reset */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
IMX6SX_GPR5_PCIE_BTNRST_RESET,
IMX6SX_GPR5_PCIE_BTNRST_RESET);
break;
case IMX6QP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_SW_RST,
IMX6Q_GPR1_PCIE_SW_RST);
break;
case IMX6Q:
/*
* If the bootloader already enabled the link we need some
* special handling to get the core back into a state where
* it is safe to touch it for configuration. As there is
* no dedicated reset signal wired up for MX6QDL, we need
* to manually force LTSSM into "detect" state before
* completely disabling LTSSM, which is a prerequisite for
* core configuration.
*
* If both LTSSM_ENABLE and REF_SSP_ENABLE are active we
* have a strong indication that the bootloader activated
* the link.
*/
regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
(gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
val = readl(pp->dbi_base + PCIE_PL_PFLR);
val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
val |= PCIE_PL_PFLR_FORCE_LINK;
writel(val, pp->dbi_base + PCIE_PL_PFLR);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
}
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
return 0;
}
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
int ret = 0;
switch (imx6_pcie->variant) {
case IMX6SX:
ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_axi clock\n");
break;
}
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
break;
case IMX6QP: /* FALLTHROUGH */
case IMX6Q:
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
/*
* the async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
* add one ~10us delay here.
*/
udelay(10);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
}
return ret;
}
static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
@ -292,43 +359,60 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
goto err_pcie;
}
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
/*
* the async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
* add one ~10us delay here.
*/
udelay(10);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
if (ret) {
dev_err(pp->dev, "unable to enable pcie ref clock\n");
goto err_ref_clk;
}
/* allow the clocks to stabilize */
usleep_range(200, 500);
/* Some boards don't have PCIe reset GPIO. */
if (imx6_pcie->reset_gpio) {
gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
gpio_set_value_cansleep(imx6_pcie->reset_gpio,
imx6_pcie->gpio_active_high);
msleep(100);
gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
gpio_set_value_cansleep(imx6_pcie->reset_gpio,
!imx6_pcie->gpio_active_high);
}
switch (imx6_pcie->variant) {
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
break;
case IMX6QP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_SW_RST, 0);
usleep_range(200, 500);
break;
case IMX6Q: /* Nothing to do */
break;
}
return 0;
err_ref_clk:
clk_disable_unprepare(imx6_pcie->pcie);
err_pcie:
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
err_pcie_phy:
return ret;
}
static void imx6_pcie_init_phy(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
if (imx6_pcie->variant == IMX6SX)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
IMX6SX_GPR12_PCIE_RX_EQ_2);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@ -417,11 +501,15 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
goto err_reset_phy;
}
/* Allow Gen2 mode after the link is up. */
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
writel(tmp, pp->dbi_base + PCIE_RC_LCR);
if (imx6_pcie->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
writel(tmp, pp->dbi_base + PCIE_RC_LCR);
} else {
dev_info(pp->dev, "Link: Gen2 disabled\n");
}
/*
* Start Directed Speed Change so the best possible speed both link
@ -445,8 +533,7 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
}
tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
return 0;
err_reset_phy:
@ -523,6 +610,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
struct device_node *node = pdev->dev.of_node;
int ret;
@ -534,6 +622,9 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
pp = &imx6_pcie->pp;
pp->dev = &pdev->dev;
imx6_pcie->variant =
(enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
@ -544,8 +635,20 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
GPIOD_OUT_LOW);
imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
imx6_pcie->gpio_active_high = of_property_read_bool(np,
"reset-gpio-active-high");
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
imx6_pcie->gpio_active_high ?
GPIOF_OUT_INIT_HIGH :
GPIOF_OUT_INIT_LOW,
"PCIe reset");
if (ret) {
dev_err(&pdev->dev, "unable to get reset gpio\n");
return ret;
}
}
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
@ -569,6 +672,16 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->pcie);
}
if (imx6_pcie->variant == IMX6SX) {
imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
"pcie_inbound_axi");
if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
dev_err(&pdev->dev,
"pcie_incbound_axi clock missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_inbound_axi);
}
}
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
@ -598,6 +711,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
&imx6_pcie->tx_swing_low))
imx6_pcie->tx_swing_low = 127;
/* Limit link speed */
ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
&imx6_pcie->link_gen);
if (ret)
imx6_pcie->link_gen = 1;
ret = imx6_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
@ -615,7 +734,9 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
}
static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", },
{ .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
{ .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
{},
};
MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);

View file

@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/irqreturn.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_pci.h>
@ -53,6 +54,21 @@
#define IRQ_STATUS 0x184
#define MSI_IRQ_OFFSET 4
/* Error IRQ bits */
#define ERR_AER BIT(5) /* ECRC error */
#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
#define ERR_CORR BIT(3) /* Correctable error */
#define ERR_NONFATAL BIT(2) /* Non-fatal error */
#define ERR_FATAL BIT(1) /* Fatal error */
#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */
#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
ERR_NONFATAL | ERR_FATAL | ERR_SYS)
#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI)
#define ERR_IRQ_STATUS_RAW 0x1c0
#define ERR_IRQ_STATUS 0x1c4
#define ERR_IRQ_ENABLE_SET 0x1c8
#define ERR_IRQ_ENABLE_CLR 0x1cc
/* Config space registers */
#define DEBUG0 0x728
@ -243,6 +259,28 @@ void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
writel(offset, ks_pcie->va_app_base + IRQ_EOI);
}
void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
{
writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
}
irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
void __iomem *reg_base)
{
u32 status;
status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
if (!status)
return IRQ_NONE;
if (status & ERR_FATAL_IRQ)
dev_err(dev, "fatal error (status %#010x)\n", status);
/* Ack the IRQ; status bits are RW1C */
writel(status, reg_base + ERR_IRQ_STATUS);
return IRQ_HANDLED;
}
static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
{
}

View file

@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
@ -159,7 +160,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
char *controller, int *num_irqs)
{
int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
int temp, max_host_irqs, legacy = 1, *host_irqs;
struct device *dev = ks_pcie->pp.dev;
struct device_node *np_pcie = dev->of_node, **np_temp;
@ -180,11 +181,15 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
*np_temp = of_find_node_by_name(np_pcie, controller);
if (!(*np_temp)) {
dev_err(dev, "Node for %s is absent\n", controller);
goto out;
return -EINVAL;
}
temp = of_irq_count(*np_temp);
if (!temp)
goto out;
if (!temp) {
dev_err(dev, "No IRQ entries in %s\n", controller);
return -EINVAL;
}
if (temp > max_host_irqs)
dev_warn(dev, "Too many %s interrupts defined %u\n",
(legacy ? "legacy" : "MSI"), temp);
@ -198,12 +203,13 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
if (!host_irqs[temp])
break;
}
if (temp) {
*num_irqs = temp;
ret = 0;
return 0;
}
out:
return ret;
return -EINVAL;
}
static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
@ -226,6 +232,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
ks_pcie);
}
}
if (ks_pcie->error_irq > 0)
ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
}
/*
@ -289,6 +298,14 @@ static struct pcie_host_ops keystone_pcie_host_ops = {
.scan_bus = ks_dw_pcie_v3_65_scan_bus,
};
static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
ks_pcie->va_app_base);
}
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev)
{
@ -309,6 +326,22 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
return ret;
}
/*
* Index 0 is the platform interrupt for error interrupt
* from RC. This is optional.
*/
ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
if (ks_pcie->error_irq <= 0)
dev_info(&pdev->dev, "no error IRQ defined\n");
else {
if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) {
dev_err(&pdev->dev, "failed to request error IRQ %d\n",
ks_pcie->error_irq);
return ret;
}
}
pp->root_bus_nr = -1;
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
@ -317,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
return ret;
}
return ret;
return 0;
}
static const struct of_device_id ks_pcie_of_match[] = {
@ -346,7 +379,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *reg_p;
struct phy *phy;
int ret = 0;
int ret;
ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
GFP_KERNEL);
@ -376,6 +409,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
devm_release_mem_region(dev, res->start, resource_size(res));
pp->dev = dev;
ks_pcie->np = dev->of_node;
platform_set_drvdata(pdev, ks_pcie);
ks_pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(ks_pcie->clk)) {

View file

@ -29,6 +29,9 @@ struct keystone_pcie {
int msi_host_irqs[MAX_MSI_HOST_IRQS];
struct device_node *msi_intc_np;
struct irq_domain *legacy_irq_domain;
struct device_node *np;
int error_irq;
/* Application register space */
void __iomem *va_app_base;
@ -42,6 +45,9 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
void __iomem *reg_base);
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np);
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,

View file

@ -1003,6 +1003,7 @@ static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
pcie->msi->dev = &pcie->pdev->dev;
}
#ifdef CONFIG_PM_SLEEP
static int mvebu_pcie_suspend(struct device *dev)
{
struct mvebu_pcie *pcie;
@ -1031,6 +1032,7 @@ static int mvebu_pcie_resume(struct device *dev)
return 0;
}
#endif
static void mvebu_pcie_port_clk_put(void *data)
{
@ -1298,9 +1300,8 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static struct dev_pm_ops mvebu_pcie_pm_ops = {
.suspend_noirq = mvebu_pcie_suspend,
.resume_noirq = mvebu_pcie_resume,
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
};
static struct platform_driver mvebu_pcie_driver = {

View file

@ -153,11 +153,11 @@ static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn,
* reserved bits, this makes the code simpler and is OK as the bits
* are not affected by writing zeros to them.
*/
static u32 thunder_pem_bridge_w1c_bits(int where)
static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned)
{
u32 w1c_bits = 0;
switch (where & ~3) {
switch (where_aligned) {
case 0x04: /* Command/Status */
case 0x1c: /* Base and I/O Limit/Secondary Status */
w1c_bits = 0xff000000;
@ -184,12 +184,34 @@ static u32 thunder_pem_bridge_w1c_bits(int where)
return w1c_bits;
}
/* Some bits must be written to one so they appear to be read-only. */
static u32 thunder_pem_bridge_w1_bits(u64 where_aligned)
{
u32 w1_bits;
switch (where_aligned) {
case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
/* Force 32-bit I/O addressing. */
w1_bits = 0x0101;
break;
case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
/* Force 64-bit addressing */
w1_bits = 0x00010001;
break;
default:
w1_bits = 0;
break;
}
return w1_bits;
}
static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct gen_pci *pci = bus->sysdata;
struct thunder_pem_pci *pem_pci;
u64 write_val, read_val;
u64 where_aligned = where & ~3ull;
u32 mask = 0;
pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci);
@ -205,8 +227,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
*/
switch (size) {
case 1:
read_val = where & ~3ull;
writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
read_val >>= 32;
mask = ~(0xff << (8 * (where & 3)));
@ -215,8 +236,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
val |= (u32)read_val;
break;
case 2:
read_val = where & ~3ull;
writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
read_val >>= 32;
mask = ~(0xffff << (8 * (where & 3)));
@ -243,12 +263,18 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
}
}
/*
* Some bits must be read-only with value of one. Since the
* access method allows these to be cleared if a zero is
* written, force them to one before writing.
*/
val |= thunder_pem_bridge_w1_bits(where_aligned);
/*
* Low order bits are the config address, the high order 32
* bits are the data to be written.
*/
write_val = where & ~3ull;
write_val |= (((u64)val) << 32);
write_val = (((u64)val) << 32) | where_aligned;
writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
return PCIBIOS_SUCCESSFUL;
}

View file

@ -0,0 +1,262 @@
/*
* PCIe host controller driver for Marvell Armada-8K SoCs
*
* Armada-8K PCIe Glue Layer Source Code
*
* Copyright (C) 2016 Marvell Technology Group Ltd.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/of_pci.h>
#include <linux/of_irq.h>
#include "pcie-designware.h"
struct armada8k_pcie {
void __iomem *base;
struct clk *clk;
struct pcie_port pp;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
#define PCIE_GLOBAL_CONTROL_REG 0x0
#define PCIE_APP_LTSSM_EN BIT(2)
#define PCIE_DEVICE_TYPE_SHIFT 4
#define PCIE_DEVICE_TYPE_MASK 0xF
#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
#define PCIE_GLOBAL_STATUS_REG 0x8
#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C
#define PCIE_GLOBAL_INT_MASK1_REG 0x20
#define PCIE_INT_A_ASSERT_MASK BIT(9)
#define PCIE_INT_B_ASSERT_MASK BIT(10)
#define PCIE_INT_C_ASSERT_MASK BIT(11)
#define PCIE_INT_D_ASSERT_MASK BIT(12)
#define PCIE_ARCACHE_TRC_REG 0x50
#define PCIE_AWCACHE_TRC_REG 0x54
#define PCIE_ARUSER_REG 0x5C
#define PCIE_AWUSER_REG 0x60
/*
* AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
* allocate
*/
#define ARCACHE_DEFAULT_VALUE 0x3511
#define AWCACHE_DEFAULT_VALUE 0x5311
#define DOMAIN_OUTER_SHAREABLE 0x2
#define AX_USER_DOMAIN_MASK 0x3
#define AX_USER_DOMAIN_SHIFT 4
#define to_armada8k_pcie(x) container_of(x, struct armada8k_pcie, pp)
static int armada8k_pcie_link_up(struct pcie_port *pp)
{
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
return 1;
dev_dbg(pp->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
return 0;
}
static void armada8k_pcie_establish_link(struct pcie_port *pp)
{
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
void __iomem *base = pcie->base;
u32 reg;
if (!dw_pcie_link_up(pp)) {
/* Disable LTSSM state machine to enable configuration */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_APP_LTSSM_EN);
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
}
/* Set the device to root complex mode */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
/* Set the PCIe master AxCache attributes */
writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
/* Set the PCIe master AxDomain attributes */
reg = readl(base + PCIE_ARUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
writel(reg, base + PCIE_ARUSER_REG);
reg = readl(base + PCIE_AWUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
writel(reg, base + PCIE_AWUSER_REG);
/* Enable INT A-D interrupts */
reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
if (!dw_pcie_link_up(pp)) {
/* Configuration done. Start LTSSM */
reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
reg |= PCIE_APP_LTSSM_EN;
writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
}
/* Wait until the link becomes active again */
if (dw_pcie_wait_for_link(pp))
dev_err(pp->dev, "Link not up after reconfiguration\n");
}
static void armada8k_pcie_host_init(struct pcie_port *pp)
{
dw_pcie_setup_rc(pp);
armada8k_pcie_establish_link(pp);
}
static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
void __iomem *base = pcie->base;
u32 val;
/*
* Interrupts are directly handled by the device driver of the
* PCI device. However, they are also latched into the PCIe
* controller, so we simply discard them.
*/
val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
return IRQ_HANDLED;
}
static struct pcie_host_ops armada8k_pcie_host_ops = {
.link_up = armada8k_pcie_link_up,
.host_init = armada8k_pcie_host_init,
};
static int armada8k_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
pp->root_bus_nr = -1;
pp->ops = &armada8k_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 0);
if (!pp->irq) {
dev_err(dev, "failed to get irq for port\n");
return -ENODEV;
}
ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
IRQF_SHARED, "armada8k-pcie", pp);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
}
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host: %d\n", ret);
return ret;
}
return 0;
}
static int armada8k_pcie_probe(struct platform_device *pdev)
{
struct armada8k_pcie *pcie;
struct pcie_port *pp;
struct device *dev = &pdev->dev;
struct resource *base;
int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->clk = devm_clk_get(dev, NULL);
if (IS_ERR(pcie->clk))
return PTR_ERR(pcie->clk);
clk_prepare_enable(pcie->clk);
pp = &pcie->pp;
pp->dev = dev;
platform_set_drvdata(pdev, pcie);
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
pp->dbi_base = devm_ioremap_resource(dev, base);
if (IS_ERR(pp->dbi_base)) {
dev_err(dev, "couldn't remap regs base %p\n", base);
ret = PTR_ERR(pp->dbi_base);
goto fail;
}
pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
ret = armada8k_add_pcie_port(pp, pdev);
if (ret)
goto fail;
return 0;
fail:
if (!IS_ERR(pcie->clk))
clk_disable_unprepare(pcie->clk);
return ret;
}
static const struct of_device_id armada8k_pcie_of_match[] = {
{ .compatible = "marvell,armada8k-pcie", },
{},
};
MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
static struct platform_driver armada8k_pcie_driver = {
.probe = armada8k_pcie_probe,
.driver = {
.name = "armada8k-pcie",
.of_match_table = of_match_ptr(armada8k_pcie_of_match),
},
};
module_platform_driver(armada8k_pcie_driver);
MODULE_DESCRIPTION("Armada 8k PCIe host controller driver");
MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>");
MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
MODULE_LICENSE("GPL v2");

View file

@ -434,7 +434,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct platform_device *pdev = to_platform_device(pp->dev);
struct pci_bus *bus, *child;
struct resource *cfg_res;
u32 val;
int i, ret;
LIST_HEAD(res);
struct resource_entry *win;
@ -544,25 +543,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (pp->ops->host_init)
pp->ops->host_init(pp);
/*
* If the platform provides ->rd_other_conf, it means the platform
* uses its own address translation component rather than ATU, so
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf)
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
/* program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
pp->root_bus_nr = pp->busn->start;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
@ -728,8 +708,6 @@ static struct pci_ops dw_pcie_ops = {
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
u32 membase;
u32 memlimit;
/* set the number of lanes */
dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
@ -788,18 +766,31 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= 0x00010100;
dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
/* setup memory base, memory limit */
membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
val = memlimit | membase;
dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
/* setup command register */
dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_rc(pp, val, PCI_COMMAND);
/*
* If the platform provides ->rd_other_conf, it means the platform
* uses its own address translation component rather than ATU, so
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf)
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
/* program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");

View file

@ -67,17 +67,14 @@ static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
#ifdef CONFIG_ACPI
int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
void pcie_port_acpi_setup(struct pci_dev *port, int *mask);
static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask)
{
return pcie_port_acpi_setup(port, mask);
pcie_port_acpi_setup(port, mask);
}
#else /* !CONFIG_ACPI */
static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
{
return 0;
}
static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask){}
#endif /* !CONFIG_ACPI */
#endif /* _PORTDRV_H_ */

View file

@ -32,22 +32,22 @@
* NOTE: It turns out that we cannot do that for individual port services
* separately, because that would make some systems work incorrectly.
*/
int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
{
struct acpi_pci_root *root;
acpi_handle handle;
u32 flags;
if (acpi_pci_disabled)
return 0;
return;
handle = acpi_find_root_bridge_handle(port);
if (!handle)
return -EINVAL;
return;
root = acpi_pci_find_root(handle);
if (!root)
return -ENODEV;
return;
flags = root->osc_control_set;
@ -58,6 +58,4 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
*srv_mask |= PCIE_PORT_SERVICE_PME;
if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
*srv_mask |= PCIE_PORT_SERVICE_AER;
return 0;
}

View file

@ -256,7 +256,6 @@ static int get_port_device_capability(struct pci_dev *dev)
int services = 0;
u32 reg32;
int cap_mask = 0;
int err;
if (pcie_ports_disabled)
return 0;
@ -266,11 +265,8 @@ static int get_port_device_capability(struct pci_dev *dev)
if (pci_aer_available())
cap_mask |= PCIE_PORT_SERVICE_AER;
if (pcie_ports_auto) {
err = pcie_port_platform_notify(dev, &cap_mask);
if (err)
return 0;
}
if (pcie_ports_auto)
pcie_port_platform_notify(dev, &cap_mask);
/* Hot-Plug Capable */
if ((cap_mask & PCIE_PORT_SERVICE_HP) &&

View file

@ -95,6 +95,7 @@
#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX BIT(0)
#define IMX6Q_GPR1_PCIE_REQ_MASK (0x3 << 30)
#define IMX6Q_GPR1_PCIE_SW_RST BIT(29)
#define IMX6Q_GPR1_PCIE_EXIT_L1 BIT(28)
#define IMX6Q_GPR1_PCIE_RDY_L23 BIT(27)
#define IMX6Q_GPR1_PCIE_ENTER_L1 BIT(26)