diff options
Diffstat (limited to 'drivers/pci/controller/dwc')
20 files changed, 2255 insertions, 436 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index b6d6778b0698..d9f0386396ed 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -6,6 +6,16 @@ menu "DesignWare-based PCIe controllers" config PCIE_DW bool +config PCIE_DW_DEBUGFS + bool "DesignWare PCIe debugfs entries" + depends on DEBUG_FS + depends on PCIE_DW_HOST || PCIE_DW_EP + help + Say Y here to enable debugfs entries for the PCIe controller. These + entries provide various debug features related to the controller and + expose the RAS DES capabilities such as Silicon Debug, Error Injection + and Statistical Counters. + config PCIE_DW_HOST bool select PCIE_DW @@ -27,6 +37,17 @@ config PCIE_AL required only for DT-based platforms. ACPI platforms with the Annapurna Labs PCIe controller don't need to enable this. +config PCIE_AMD_MDB + bool "AMD MDB Versal2 PCIe controller" + depends on OF && (ARM64 || COMPILE_TEST) + depends on PCI_MSI + select PCIE_DW_HOST + help + Say Y here if you want to enable PCIe controller support on AMD + Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on + DesignWare IP and therefore the driver re-uses the DesignWare + core functions to implement the driver. + config PCI_MESON tristate "Amlogic Meson PCIe controller" default m if ARCH_MESON diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index a8308d9ea986..908cb7f345db 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PCIE_DW) += pcie-designware.o +obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o +obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 5c62e1a3ba52..33d6bf460ffe 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -635,30 +635,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev) { int ret; struct device_node *np = dev->of_node; - struct of_phandle_args args; + unsigned int args[2]; struct regmap *regmap; - regmap = syscon_regmap_lookup_by_phandle(np, - "ti,syscon-unaligned-access"); + regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access", + 2, args); if (IS_ERR(regmap)) { dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); return -EINVAL; } - ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", - 2, 0, &args); - if (ret) { - dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); - return ret; - } - - ret = regmap_update_bits(regmap, args.args[0], args.args[1], - args.args[1]); + ret = regmap_update_bits(regmap, args[0], args[1], args[1]); if (ret) dev_err(dev, "failed to enable unaligned access\n"); - of_node_put(args.np); - return ret; } @@ -671,18 +661,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev, u32 mask; u32 val; - pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); + pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel", + 1, &pcie_reg); if (IS_ERR(pcie_syscon)) { dev_err(dev, "unable to get ti,syscon-lane-sel\n"); return -EINVAL; } - if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, - &pcie_reg)) { - dev_err(dev, "couldn't get lane selection reg offset\n"); - return -EINVAL; - } - mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; regmap_update_bits(pcie_syscon, pcie_reg, mask, val); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index 6a830166d37f..ace736b025b1 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -300,7 +300,7 @@ static int exynos_pcie_probe(struct platform_device *pdev) if (IS_ERR(ep->elbi_base)) return PTR_ERR(ep->elbi_base); - ret = devm_clk_bulk_get_all_enable(dev, &ep->clks); + ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks); if (ret < 0) return ret; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index c8d5c90aa4d4..5f267dd261b5 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -33,6 +33,7 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include "../../pci.h" #include "pcie-designware.h" #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) @@ -40,7 +41,6 @@ #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) -#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 #define IMX95_PCIE_REF_USE_PAD BIT(17) @@ -55,6 +55,22 @@ #define IMX95_PE0_GEN_CTRL_3 0x1058 #define IMX95_PCIE_LTSSM_EN BIT(0) +#define IMX95_PE0_LUT_ACSCTRL 0x1008 +#define IMX95_PEO_LUT_RWA BIT(16) +#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0) + +#define IMX95_PE0_LUT_DATA1 0x100c +#define IMX95_PE0_LUT_VLD BIT(31) +#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8) +#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0) + +#define IMX95_PE0_LUT_DATA2 0x1010 +#define IMX95_PE0_LUT_REQID GENMASK(31, 16) +#define IMX95_PE0_LUT_MASK GENMASK(15, 0) + +#define IMX95_SID_MASK GENMASK(5, 0) +#define IMX95_MAX_LUT 32 + #define to_imx_pcie(x) dev_get_drvdata((x)->dev) enum imx_pcie_variants { @@ -70,6 +86,7 @@ enum imx_pcie_variants { IMX8MQ_EP, IMX8MM_EP, IMX8MP_EP, + IMX8Q_EP, IMX95_EP, }; @@ -87,10 +104,10 @@ enum imx_pcie_variants { * workaround suspend resume on some devices which are affected by this errata. */ #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) +#define IMX_PCIE_FLAG_HAS_LUT BIT(10) #define imx_check_flag(pci, val) (pci->drvdata->flags & val) -#define IMX_PCIE_MAX_CLKS 6 #define IMX_PCIE_MAX_INSTANCES 2 struct imx_pcie; @@ -101,8 +118,6 @@ struct imx_pcie_drvdata { u32 flags; int dbi_length; const char *gpr; - const char * const *clk_names; - const u32 clks_cnt; const u32 ltssm_off; const u32 ltssm_mask; const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; @@ -111,19 +126,19 @@ struct imx_pcie_drvdata { int (*init_phy)(struct imx_pcie *pcie); int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); int (*core_reset)(struct imx_pcie *pcie, bool assert); + const struct dw_pcie_host_ops *ops; }; struct imx_pcie { struct dw_pcie *pci; struct gpio_desc *reset_gpiod; - bool link_is_up; - struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS]; + struct clk_bulk_data *clks; + int num_clks; struct regmap *iomuxc_gpr; u16 msi_ctrl; u32 controller_id; struct reset_control *pciephy_reset; struct reset_control *apps_reset; - struct reset_control *turnoff_reset; u32 tx_deemph_gen1; u32 tx_deemph_gen2_3p5db; u32 tx_deemph_gen2_6db; @@ -139,6 +154,9 @@ struct imx_pcie { struct device *pd_pcie_phy; struct phy *phy; const struct imx_pcie_drvdata *drvdata; + + /* Ensure that only one device's LUT is configured at any given time */ + struct mutex lock; }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ @@ -234,11 +252,11 @@ static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) id = imx_pcie->controller_id; - /* If mode_mask is 0, then generic PHY driver is used to set the mode */ + /* If mode_mask is 0, generic PHY driver is used to set the mode */ if (!drvdata->mode_mask[0]) return; - /* If mode_mask[id] is zero, means each controller have its individual gpr */ + /* If mode_mask[id] is 0, each controller has its individual GPR */ if (!drvdata->mode_mask[id]) id = 0; @@ -375,14 +393,15 @@ static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) { - /* TODO: Currently this code assumes external oscillator is being used */ + /* TODO: This code assumes external oscillator is being used */ regmap_update_bits(imx_pcie->iomuxc_gpr, imx_pcie_grp_offset(imx_pcie), IMX8MQ_GPR_PCIE_REF_USE_PAD, IMX8MQ_GPR_PCIE_REF_USE_PAD); /* - * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is - * supplied by 3.3V, the VREG_BYPASS should be cleared to zero. + * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the + * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared + * to zero. */ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) regmap_update_bits(imx_pcie->iomuxc_gpr, @@ -393,13 +412,6 @@ static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) return 0; } -static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie) -{ - regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); - - return 0; -} - static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) { regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, @@ -454,13 +466,14 @@ static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) int mult, div; u16 val; int i; + struct clk_bulk_data *clks = imx_pcie->clks; if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) return 0; - for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) - if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0) - phy_rate = clk_get_rate(imx_pcie->clks[i].clk); + for (i = 0; i < imx_pcie->num_clks; i++) + if (strncmp(clks[i].id, "pcie_phy", 8) == 0) + phy_rate = clk_get_rate(clks[i].clk); switch (phy_rate) { case 125000000: @@ -576,7 +589,7 @@ static int imx_pcie_attach_pd(struct device *dev) DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!link) { - dev_err(dev, "Failed to add device_link to pcie pd.\n"); + dev_err(dev, "Failed to add device_link to pcie pd\n"); return -EINVAL; } @@ -589,7 +602,7 @@ static int imx_pcie_attach_pd(struct device *dev) DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!link) { - dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); + dev_err(dev, "Failed to add device_link to pcie_phy pd\n"); return -EINVAL; } @@ -598,10 +611,9 @@ static int imx_pcie_attach_pd(struct device *dev) static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) { - if (enable) - regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN); - + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, + enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN); return 0; } @@ -611,10 +623,10 @@ static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) /* power up core phy and enable ref clock */ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); /* - * the async reset input need ref clock to sync internally, + * The async reset input need ref clock to sync internally, * when the ref clock comes after reset, internal synced * reset time is too short, cannot meet the requirement. - * add one ~10us delay here. + * Add a ~10us delay here. */ usleep_range(10, 100); regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); @@ -630,19 +642,20 @@ static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) { int offset = imx_pcie_grp_offset(imx_pcie); - if (enable) { - regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); - regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); - } - + regmap_update_bits(imx_pcie->iomuxc_gpr, offset, + IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, + enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); + regmap_update_bits(imx_pcie->iomuxc_gpr, offset, + IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, + enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0); return 0; } static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) { - if (!enable) - regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, + enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); return 0; } @@ -652,7 +665,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) struct device *dev = pci->dev; int ret; - ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); + ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks); if (ret) return ret; @@ -669,7 +682,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) return 0; err_ref_clk: - clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); + clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); return ret; } @@ -678,7 +691,7 @@ static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) { if (imx_pcie->drvdata->enable_ref_clk) imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); - clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); + clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); } static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) @@ -775,6 +788,7 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) { reset_control_deassert(imx_pcie->pciephy_reset); + reset_control_deassert(imx_pcie->apps_reset); if (imx_pcie->drvdata->core_reset) imx_pcie->drvdata->core_reset(imx_pcie, false); @@ -884,6 +898,7 @@ static int imx_pcie_start_link(struct dw_pcie *pci) if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_SPEED_CHANGE) { + /* * On i.MX7, DIRECT_SPEED_CHANGE behaves differently * from i.MX6 family when no link speed transition @@ -892,7 +907,6 @@ static int imx_pcie_start_link(struct dw_pcie *pci) * which will cause the following code to report false * failure. */ - ret = imx_pcie_wait_for_speed_change(imx_pcie); if (ret) { dev_err(dev, "Failed to bring link up!\n"); @@ -908,13 +922,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci) dev_info(dev, "Link: Only Gen1 is enabled\n"); } - imx_pcie->link_is_up = true; tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); return 0; err_reset_phy: - imx_pcie->link_is_up = false; dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); @@ -930,6 +942,184 @@ static void imx_pcie_stop_link(struct dw_pcie *pci) imx_pcie_ltssm_disable(dev); } +static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) +{ + struct dw_pcie *pci = imx_pcie->pci; + struct device *dev = pci->dev; + u32 data1, data2; + int free = -1; + int i; + + if (sid >= 64) { + dev_err(dev, "Invalid SID for index %d\n", sid); + return -EINVAL; + } + + guard(mutex)(&imx_pcie->lock); + + /* + * Iterate through all LUT entries to check for duplicate RID and + * identify the first available entry. Configure this available entry + * immediately after verification to avoid rescanning it. + */ + for (i = 0; i < IMX95_MAX_LUT; i++) { + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); + regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); + + if (!(data1 & IMX95_PE0_LUT_VLD)) { + if (free < 0) + free = i; + continue; + } + + regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); + + /* Do not add duplicate RID */ + if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) { + dev_warn(dev, "Existing LUT entry available for RID (%d)", rid); + return 0; + } + } + + if (free < 0) { + dev_err(dev, "LUT entry is not available\n"); + return -ENOSPC; + } + + data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0); + data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid); + data1 |= IMX95_PE0_LUT_VLD; + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); + + data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ + data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); + + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free); + + return 0; +} + +static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) +{ + u32 data2; + int i; + + guard(mutex)(&imx_pcie->lock); + + for (i = 0; i < IMX95_MAX_LUT; i++) { + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); + regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); + if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) { + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_DATA1, 0); + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_DATA2, 0); + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_ACSCTRL, i); + + break; + } + } +} + +static int imx_pcie_enable_device(struct pci_host_bridge *bridge, + struct pci_dev *pdev) +{ + struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); + u32 sid_i, sid_m, rid = pci_dev_id(pdev); + struct device_node *target; + struct device *dev; + int err_i, err_m; + u32 sid = 0; + + dev = imx_pcie->pci->dev; + + target = NULL; + err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", + &target, &sid_i); + if (target) { + of_node_put(target); + } else { + /* + * "target == NULL && err_i == 0" means RID out of map range. + * Use 1:1 map RID to streamID. Hardware can't support this + * because the streamID is only 6 bits + */ + err_i = -EINVAL; + } + + target = NULL; + err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask", + &target, &sid_m); + + /* + * err_m target + * 0 NULL RID out of range. Use 1:1 map RID to + * streamID, Current hardware can't + * support it, so return -EINVAL. + * != 0 NULL msi-map does not exist, use built-in MSI + * 0 != NULL Get correct streamID from RID + * != 0 != NULL Invalid combination + */ + if (!err_m && !target) + return -EINVAL; + else if (target) + of_node_put(target); /* Find streamID map entry for RID in msi-map */ + + /* + * msi-map iommu-map + * N N DWC MSI Ctrl + * Y Y ITS + SMMU, require the same SID + * Y N ITS + * N Y DWC MSI Ctrl + SMMU + */ + if (err_i && err_m) + return 0; + + if (!err_i && !err_m) { + /* + * Glue Layer + * <==========> + * ┌─────┐ ┌──────────┐ + * │ LUT │ 6-bit streamID │ │ + * │ │─────────────────►│ MSI │ + * └─────┘ 2-bit ctrl ID │ │ + * ┌───────────►│ │ + * (i.MX95) │ │ │ + * 00 PCIe0 │ │ │ + * 01 ENETC │ │ │ + * 10 PCIe1 │ │ │ + * │ └──────────┘ + * The MSI glue layer auto adds 2 bits controller ID ahead of + * streamID, so mask these 2 bits to get streamID. The + * IOMMU glue layer doesn't do that. + */ + if (sid_i != (sid_m & IMX95_SID_MASK)) { + dev_err(dev, "iommu-map and msi-map entries mismatch!\n"); + return -EINVAL; + } + } + + if (!err_i) + sid = sid_i; + else if (!err_m) + sid = sid_m & IMX95_SID_MASK; + + return imx_pcie_add_lut(imx_pcie, rid, sid); +} + +static void imx_pcie_disable_device(struct pci_host_bridge *bridge, + struct pci_dev *pdev) +{ + struct imx_pcie *imx_pcie; + + imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); + imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev)); +} + static int imx_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -946,6 +1136,11 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp) } } + if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) { + pp->bridge->enable_device = imx_pcie_enable_device; + pp->bridge->disable_device = imx_pcie_disable_device; + } + imx_pcie_assert_core_reset(imx_pcie); if (imx_pcie->drvdata->init_phy) @@ -966,7 +1161,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp) goto err_clk_disable; } - ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); + ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, + imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ? + PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC); if (ret) { dev_err(dev, "unable to set PCIe PHY mode\n"); goto err_phy_exit; @@ -1017,31 +1214,36 @@ static void imx_pcie_host_exit(struct dw_pcie_rp *pp) regulator_disable(imx_pcie->vpcie); } -static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr) +/* + * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2 + * register is reserved, so the generic DWC implementation of sending the + * PME_Turn_Off message using a dummy MMIO write cannot be used. + */ +static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp) { - struct imx_pcie *imx_pcie = to_imx_pcie(pcie); - struct dw_pcie_rp *pp = &pcie->pp; - struct resource_entry *entry; - - if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP)) - return cpu_addr; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct imx_pcie *imx_pcie = to_imx_pcie(pci); - entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); - if (!entry) - return cpu_addr; + regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); + regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); - return cpu_addr - entry->offset; + usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); } static const struct dw_pcie_host_ops imx_pcie_host_ops = { .init = imx_pcie_host_init, .deinit = imx_pcie_host_exit, + .pme_turn_off = imx_pcie_pme_turn_off, +}; + +static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = { + .init = imx_pcie_host_init, + .deinit = imx_pcie_host_exit, }; static const struct dw_pcie_ops dw_pcie_ops = { .start_link = imx_pcie_start_link, .stop_link = imx_pcie_stop_link, - .cpu_addr_fixup = imx_pcie_cpu_addr_fixup, }; static void imx_pcie_ep_init(struct dw_pcie_ep *ep) @@ -1082,16 +1284,27 @@ static const struct pci_epc_features imx8m_pcie_epc_features = { .align = SZ_64K, }; +static const struct pci_epc_features imx8q_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, + .align = SZ_64K, +}; + /* - * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme - * ================================================================================================ - * BAR0 | Enable | 64-bit | 1 MB | Programmable Size - * BAR1 | Disable | 32-bit | 64 KB | Fixed Size - * BAR1 should be disabled if BAR0 is 64bit. - * BAR2 | Enable | 32-bit | 1 MB | Programmable Size - * BAR3 | Enable | 32-bit | 64 KB | Programmable Size - * BAR4 | Enable | 32-bit | 1M | Programmable Size - * BAR5 | Enable | 32-bit | 64 KB | Programmable Size + * | Default | Default | Default | BAR Sizing + * BAR# | Enable? | Type | Size | Scheme + * ======================================================= + * BAR0 | Enable | 64-bit | 1 MB | Programmable Size + * BAR1 | Disable | 32-bit | 64 KB | Fixed Size + * (BAR1 should be disabled if BAR0 is 64-bit) + * BAR2 | Enable | 32-bit | 1 MB | Programmable Size + * BAR3 | Enable | 32-bit | 64 KB | Programmable Size + * BAR4 | Enable | 32-bit | 1 MB | Programmable Size + * BAR5 | Enable | 32-bit | 64 KB | Programmable Size */ static const struct pci_epc_features imx95_pcie_epc_features = { .msi_capable = true, @@ -1118,7 +1331,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, struct platform_device *pdev) { int ret; - unsigned int pcie_dbi2_offset; struct dw_pcie_ep *ep; struct dw_pcie *pci = imx_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; @@ -1128,28 +1340,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, ep = &pci->ep; ep->ops = &pcie_ep_ops; - switch (imx_pcie->drvdata->variant) { - case IMX8MQ_EP: - case IMX8MM_EP: - case IMX8MP_EP: - pcie_dbi2_offset = SZ_1M; - break; - default: - pcie_dbi2_offset = SZ_4K; - break; - } - - pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset; - - /* - * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining - * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC - * core code can fetch that from DT. But once all platform DTs were fixed, this and the - * above "dbi_base2" setting should be removed. - */ - if (device_property_match_string(dev, "reg-names", "dbi2") >= 0) - pci->dbi_base2 = NULL; - if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); @@ -1176,43 +1366,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, return 0; } -static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie) -{ - struct device *dev = imx_pcie->pci->dev; - - /* Some variants have a turnoff reset in DT */ - if (imx_pcie->turnoff_reset) { - reset_control_assert(imx_pcie->turnoff_reset); - reset_control_deassert(imx_pcie->turnoff_reset); - goto pm_turnoff_sleep; - } - - /* Others poke directly at IOMUXC registers */ - switch (imx_pcie->drvdata->variant) { - case IMX6SX: - case IMX6QP: - regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_PM_TURN_OFF, - IMX6SX_GPR12_PCIE_PM_TURN_OFF); - regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0); - break; - default: - dev_err(dev, "PME_Turn_Off not implemented\n"); - return; - } - - /* - * Components with an upstream port must respond to - * PME_Turn_Off with PME_TO_Ack but we can't check. - * - * The standard recommends a 1-10ms timeout after which to - * proceed anyway as if acks were received. - */ -pm_turnoff_sleep: - usleep_range(1000, 10000); -} - static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) { u8 offset; @@ -1236,7 +1389,6 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) static int imx_pcie_suspend_noirq(struct device *dev) { struct imx_pcie *imx_pcie = dev_get_drvdata(dev); - struct dw_pcie_rp *pp = &imx_pcie->pci->pp; if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; @@ -1251,9 +1403,7 @@ static int imx_pcie_suspend_noirq(struct device *dev) imx_pcie_assert_core_reset(imx_pcie); imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); } else { - imx_pcie_pm_turnoff(imx_pcie); - imx_pcie_stop_link(imx_pcie->pci); - imx_pcie_host_exit(pp); + return dw_pcie_suspend_noirq(imx_pcie->pci); } return 0; @@ -1263,7 +1413,6 @@ static int imx_pcie_resume_noirq(struct device *dev) { int ret; struct imx_pcie *imx_pcie = dev_get_drvdata(dev); - struct dw_pcie_rp *pp = &imx_pcie->pci->pp; if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; @@ -1275,6 +1424,7 @@ static int imx_pcie_resume_noirq(struct device *dev) ret = imx_pcie_deassert_core_reset(imx_pcie); if (ret) return ret; + /* * Using PCIE_TEST_PD seems to disable MSI and powers down the * root complex. This is why we have to setup the rc again and @@ -1283,17 +1433,12 @@ static int imx_pcie_resume_noirq(struct device *dev) ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); if (ret) return ret; - imx_pcie_msi_save_restore(imx_pcie, false); } else { - ret = imx_pcie_host_init(pp); + ret = dw_pcie_resume_noirq(imx_pcie->pci); if (ret) return ret; - imx_pcie_msi_save_restore(imx_pcie, false); - dw_pcie_setup_rc(pp); - - if (imx_pcie->link_is_up) - imx_pcie_start_link(imx_pcie->pci); } + imx_pcie_msi_save_restore(imx_pcie, false); return 0; } @@ -1309,11 +1454,9 @@ static int imx_pcie_probe(struct platform_device *pdev) struct dw_pcie *pci; struct imx_pcie *imx_pcie; struct device_node *np; - struct resource *dbi_base; struct device_node *node = dev->of_node; - int ret; + int ret, domain; u16 val; - int i; imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); if (!imx_pcie) @@ -1325,11 +1468,17 @@ static int imx_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - pci->pp.ops = &imx_pcie_host_ops; imx_pcie->pci = pci; imx_pcie->drvdata = of_device_get_match_data(dev); + mutex_init(&imx_pcie->lock); + + if (imx_pcie->drvdata->ops) + pci->pp.ops = imx_pcie->drvdata->ops; + else + pci->pp.ops = &imx_pcie_host_dw_pme_ops; + /* Find the PHY if one is defined, only imx7d uses it */ np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); if (np) { @@ -1345,10 +1494,6 @@ static int imx_pcie_probe(struct platform_device *pdev) return PTR_ERR(imx_pcie->phy_base); } - pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - /* Fetch GPIOs */ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(imx_pcie->reset_gpiod)) @@ -1356,16 +1501,11 @@ static int imx_pcie_probe(struct platform_device *pdev) "unable to get reset gpio\n"); gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); - if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS) - return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); - - for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) - imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i]; - /* Fetch clocks */ - ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks); - if (ret) - return ret; + imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks); + if (imx_pcie->num_clks < 0) + return dev_err_probe(dev, imx_pcie->num_clks, + "failed to get clocks\n"); if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); @@ -1391,21 +1531,16 @@ static int imx_pcie_probe(struct platform_device *pdev) switch (imx_pcie->drvdata->variant) { case IMX8MQ: case IMX8MQ_EP: - case IMX7D: - if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) - imx_pcie->controller_id = 1; + domain = of_get_pci_domain_nr(node); + if (domain < 0 || domain > 1) + return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n"); + + imx_pcie->controller_id = domain; break; default: break; } - /* Grab turnoff reset */ - imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); - if (IS_ERR(imx_pcie->turnoff_reset)) { - dev_err(dev, "Failed to get TURNOFF reset control\n"); - return PTR_ERR(imx_pcie->turnoff_reset); - } - if (imx_pcie->drvdata->gpr) { /* Grab GPR config register range */ imx_pcie->iomuxc_gpr = @@ -1479,11 +1614,13 @@ static int imx_pcie_probe(struct platform_device *pdev) if (ret) return ret; + pci->use_parent_dt_ranges = true; if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { ret = imx_add_pcie_ep(imx_pcie, pdev); if (ret < 0) return ret; } else { + pci->pp.use_atu_msg = true; ret = dw_pcie_host_init(&pci->pp); if (ret < 0) return ret; @@ -1508,12 +1645,6 @@ static void imx_pcie_shutdown(struct platform_device *pdev) imx_pcie_assert_core_reset(imx_pcie); } -static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"}; -static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"}; -static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"}; -static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"}; -static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"}; - static const struct imx_pcie_drvdata drvdata[] = { [IMX6Q] = { .variant = IMX6Q, @@ -1523,8 +1654,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", - .clk_names = imx6q_clks, - .clks_cnt = ARRAY_SIZE(imx6q_clks), .ltssm_off = IOMUXC_GPR12, .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, .mode_off[0] = IOMUXC_GPR12, @@ -1539,8 +1668,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_IMX_SPEED_CHANGE | IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx6q-iomuxc-gpr", - .clk_names = imx6sx_clks, - .clks_cnt = ARRAY_SIZE(imx6sx_clks), .ltssm_off = IOMUXC_GPR12, .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, .mode_off[0] = IOMUXC_GPR12, @@ -1548,6 +1675,7 @@ static const struct imx_pcie_drvdata drvdata[] = { .init_phy = imx6sx_pcie_init_phy, .enable_ref_clk = imx6sx_pcie_enable_ref_clk, .core_reset = imx6sx_pcie_core_reset, + .ops = &imx_pcie_host_ops, }, [IMX6QP] = { .variant = IMX6QP, @@ -1556,8 +1684,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", - .clk_names = imx6q_clks, - .clks_cnt = ARRAY_SIZE(imx6q_clks), .ltssm_off = IOMUXC_GPR12, .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, .mode_off[0] = IOMUXC_GPR12, @@ -1565,6 +1691,7 @@ static const struct imx_pcie_drvdata drvdata[] = { .init_phy = imx_pcie_init_phy, .enable_ref_clk = imx6q_pcie_enable_ref_clk, .core_reset = imx6qp_pcie_core_reset, + .ops = &imx_pcie_host_ops, }, [IMX7D] = { .variant = IMX7D, @@ -1572,21 +1699,17 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_APP_RESET | IMX_PCIE_FLAG_HAS_PHY_RESET, .gpr = "fsl,imx7d-iomuxc-gpr", - .clk_names = imx6q_clks, - .clks_cnt = ARRAY_SIZE(imx6q_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, - .init_phy = imx7d_pcie_init_phy, .enable_ref_clk = imx7d_pcie_enable_ref_clk, .core_reset = imx7d_pcie_core_reset, }, [IMX8MQ] = { .variant = IMX8MQ, .flags = IMX_PCIE_FLAG_HAS_APP_RESET | - IMX_PCIE_FLAG_HAS_PHY_RESET, + IMX_PCIE_FLAG_HAS_PHY_RESET | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx8mq-iomuxc-gpr", - .clk_names = imx8mq_clks, - .clks_cnt = ARRAY_SIZE(imx8mq_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .mode_off[1] = IOMUXC_GPR12, @@ -1600,8 +1723,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHYDRV | IMX_PCIE_FLAG_HAS_APP_RESET, .gpr = "fsl,imx8mm-iomuxc-gpr", - .clk_names = imx8mm_clks, - .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .enable_ref_clk = imx8mm_pcie_enable_ref_clk, @@ -1612,8 +1733,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHYDRV | IMX_PCIE_FLAG_HAS_APP_RESET, .gpr = "fsl,imx8mp-iomuxc-gpr", - .clk_names = imx8mm_clks, - .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .enable_ref_clk = imx8mm_pcie_enable_ref_clk, @@ -1621,15 +1740,14 @@ static const struct imx_pcie_drvdata drvdata[] = { [IMX8Q] = { .variant = IMX8Q, .flags = IMX_PCIE_FLAG_HAS_PHYDRV | - IMX_PCIE_FLAG_CPU_ADDR_FIXUP, - .clk_names = imx8q_clks, - .clks_cnt = ARRAY_SIZE(imx8q_clks), + IMX_PCIE_FLAG_CPU_ADDR_FIXUP | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, }, [IMX95] = { .variant = IMX95, - .flags = IMX_PCIE_FLAG_HAS_SERDES, - .clk_names = imx8mq_clks, - .clks_cnt = ARRAY_SIZE(imx8mq_clks), + .flags = IMX_PCIE_FLAG_HAS_SERDES | + IMX_PCIE_FLAG_HAS_LUT | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .ltssm_off = IMX95_PE0_GEN_CTRL_3, .ltssm_mask = IMX95_PCIE_LTSSM_EN, .mode_off[0] = IMX95_PE0_GEN_CTRL_1, @@ -1642,8 +1760,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHY_RESET, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mq-iomuxc-gpr", - .clk_names = imx8mq_clks, - .clks_cnt = ARRAY_SIZE(imx8mq_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .mode_off[1] = IOMUXC_GPR12, @@ -1658,8 +1774,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mm-iomuxc-gpr", - .clk_names = imx8mm_clks, - .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .epc_features = &imx8m_pcie_epc_features, @@ -1671,19 +1785,21 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mp-iomuxc-gpr", - .clk_names = imx8mm_clks, - .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .epc_features = &imx8m_pcie_epc_features, .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, + [IMX8Q_EP] = { + .variant = IMX8Q_EP, + .flags = IMX_PCIE_FLAG_HAS_PHYDRV, + .mode = DW_PCIE_EP_TYPE, + .epc_features = &imx8q_pcie_epc_features, + }, [IMX95_EP] = { .variant = IMX95_EP, .flags = IMX_PCIE_FLAG_HAS_SERDES | IMX_PCIE_FLAG_SUPPORT_64BIT, - .clk_names = imx8mq_clks, - .clks_cnt = ARRAY_SIZE(imx8mq_clks), .ltssm_off = IMX95_PE0_GEN_CTRL_3, .ltssm_mask = IMX95_PCIE_LTSSM_EN, .mode_off[0] = IMX95_PE0_GEN_CTRL_1, @@ -1707,6 +1823,7 @@ static const struct of_device_id imx_pcie_of_match[] = { { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, + { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], }, { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, {}, }; diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 63bd5003da45..76a37368ae4f 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -966,11 +966,11 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = { .msix_capable = true, .bar[BAR_0] = { .type = BAR_RESERVED, }, .bar[BAR_1] = { .type = BAR_RESERVED, }, - .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_2] = { .type = BAR_RESIZABLE, }, .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, }, - .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .align = SZ_1M, + .bar[BAR_5] = { .type = BAR_RESIZABLE, }, + .align = SZ_64K, }; static const struct pci_epc_features* diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index ee6f52568133..a44b5c256d6e 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -329,7 +329,6 @@ static int ls_pcie_probe(struct platform_device *pdev) struct ls_pcie *pcie; struct resource *dbi_base; u32 index[2]; - int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -355,16 +354,15 @@ static int ls_pcie_probe(struct platform_device *pdev) pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off; if (pcie->drvdata->scfg_support) { - pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg"); + pcie->scfg = + syscon_regmap_lookup_by_phandle_args(dev->of_node, + "fsl,pcie-scfg", 1, + index); if (IS_ERR(pcie->scfg)) { dev_err(dev, "No syscfg phandle specified\n"); return PTR_ERR(pcie->scfg); } - ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2); - if (ret) - return ret; - pcie->index = index[1]; } diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c new file mode 100644 index 000000000000..4eb2a4e8189d --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for AMD MDB PCIe Bridge + * + * Copyright (C) 2024-2025, Advanced Micro Devices, Inc. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0 +#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4 +#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8 +#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC + +#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16) + +#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2) + +/* Interrupt registers definitions. */ +#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15 +#define AMD_MDB_PCIE_INTR_INTX 16 +#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24 +#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25 +#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26 +#define AMD_MDB_PCIE_INTR_NONFATAL 27 +#define AMD_MDB_PCIE_INTR_FATAL 28 + +#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x) +#define AMD_MDB_PCIE_IMR_ALL_MASK \ + ( \ + IMR(CMPL_TIMEOUT) | \ + IMR(PM_PME_RCVD) | \ + IMR(PME_TO_ACK_RCVD) | \ + IMR(MISC_CORRECTABLE) | \ + IMR(NONFATAL) | \ + IMR(FATAL) | \ + AMD_MDB_TLP_PCIE_INTX_MASK \ + ) + +/** + * struct amd_mdb_pcie - PCIe port information + * @pci: DesignWare PCIe controller structure + * @slcr: MDB System Level Control and Status Register (SLCR) base + * @intx_domain: INTx IRQ domain pointer + * @mdb_domain: MDB IRQ domain pointer + * @intx_irq: INTx IRQ interrupt number + */ +struct amd_mdb_pcie { + struct dw_pcie pci; + void __iomem *slcr; + struct irq_domain *intx_domain; + struct irq_domain *mdb_domain; + int intx_irq; +}; + +static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = { +}; + +static void amd_mdb_intx_irq_mask(struct irq_data *data) +{ + struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data); + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *port = &pci->pp; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK, + AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq)); + + /* + * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that + * interrupt, writing '0' has no effect. + */ + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void amd_mdb_intx_irq_unmask(struct irq_data *data) +{ + struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data); + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *port = &pci->pp; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK, + AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq)); + + /* + * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that + * interrupt, writing '0' has no effect. + */ + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip amd_mdb_intx_irq_chip = { + .name = "AMD MDB INTx", + .irq_mask = amd_mdb_intx_irq_mask, + .irq_unmask = amd_mdb_intx_irq_unmask, +}; + +/** + * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid + * @domain: IRQ domain + * @irq: Virtual IRQ number + * @hwirq: Hardware interrupt number + * + * Return: Always returns '0'. + */ +static int amd_mdb_pcie_intx_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + + return 0; +} + +/* INTx IRQ domain operations. */ +static const struct irq_domain_ops amd_intx_domain_ops = { + .map = amd_mdb_pcie_intx_map, +}; + +static irqreturn_t dw_pcie_rp_intx(int irq, void *args) +{ + struct amd_mdb_pcie *pcie = args; + unsigned long val; + int i, int_status; + + val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val); + + for (i = 0; i < PCI_NUM_INTX; i++) { + if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i)) + generic_handle_domain_irq(pcie->intx_domain, i); + } + + return IRQ_HANDLED; +} + +#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s } + +static const struct { + const char *sym; + const char *str; +} intr_cause[32] = { + _IC(CMPL_TIMEOUT, "Completion timeout"), + _IC(PM_PME_RCVD, "PM_PME message received"), + _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), + _IC(MISC_CORRECTABLE, "Correctable error message"), + _IC(NONFATAL, "Non fatal error message"), + _IC(FATAL, "Fatal error message"), +}; + +static void amd_mdb_event_irq_mask(struct irq_data *d) +{ + struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *port = &pci->pp; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = BIT(d->hwirq); + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void amd_mdb_event_irq_unmask(struct irq_data *d) +{ + struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *port = &pci->pp; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = BIT(d->hwirq); + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip amd_mdb_event_irq_chip = { + .name = "AMD MDB RC-Event", + .irq_mask = amd_mdb_event_irq_mask, + .irq_unmask = amd_mdb_event_irq_unmask, +}; + +static int amd_mdb_pcie_event_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + + return 0; +} + +static const struct irq_domain_ops event_domain_ops = { + .map = amd_mdb_pcie_event_map, +}; + +static irqreturn_t amd_mdb_pcie_event(int irq, void *args) +{ + struct amd_mdb_pcie *pcie = args; + unsigned long val; + int i; + + val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC); + for_each_set_bit(i, &val, 32) + generic_handle_domain_irq(pcie->mdb_domain, i); + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + + return IRQ_HANDLED; +} + +static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie) +{ + if (pcie->intx_domain) { + irq_domain_remove(pcie->intx_domain); + pcie->intx_domain = NULL; + } + + if (pcie->mdb_domain) { + irq_domain_remove(pcie->mdb_domain); + pcie->mdb_domain = NULL; + } +} + +static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie) +{ + unsigned long val; + + /* Disable all TLP interrupts. */ + writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK, + pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); + + /* Clear pending TLP interrupts. */ + val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + val &= AMD_MDB_PCIE_IMR_ALL_MASK; + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + + /* Enable all TLP interrupts. */ + writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK, + pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); + + return 0; +} + +/** + * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain + * @pcie: PCIe port information + * @pdev: Platform device + * + * Return: Returns '0' on success and error value on failure. + */ +static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *pp = &pci->pp; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + int err; + + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32, + &event_domain_ops, pcie); + if (!pcie->mdb_domain) { + err = -ENOMEM; + dev_err(dev, "Failed to add MDB domain\n"); + goto out; + } + + irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS); + + pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &amd_intx_domain_ops, pcie); + if (!pcie->intx_domain) { + err = -ENOMEM; + dev_err(dev, "Failed to add INTx domain\n"); + goto mdb_out; + } + + of_node_put(pcie_intc_node); + irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED); + + raw_spin_lock_init(&pp->lock); + + return 0; +mdb_out: + amd_mdb_pcie_free_irq_domains(pcie); +out: + of_node_put(pcie_intc_node); + return err; +} + +static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args) +{ + struct amd_mdb_pcie *pcie = args; + struct device *dev; + struct irq_data *d; + + dev = pcie->pci.dev; + + /* + * In the future, error reporting will be hooked to the AER subsystem. + * Currently, the driver prints a warning message to the user. + */ + d = irq_domain_get_irq_data(pcie->mdb_domain, irq); + if (intr_cause[d->hwirq].str) + dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); + else + dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq); + + return IRQ_HANDLED; +} + +static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *pp = &pci->pp; + struct device *dev = &pdev->dev; + int i, irq, err; + + amd_mdb_pcie_init_port(pcie); + + pp->irq = platform_get_irq(pdev, 0); + if (pp->irq < 0) + return pp->irq; + + for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { + if (!intr_cause[i].str) + continue; + + irq = irq_create_mapping(pcie->mdb_domain, i); + if (!irq) { + dev_err(dev, "Failed to map MDB domain interrupt\n"); + return -ENOMEM; + } + + err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler, + IRQF_NO_THREAD, intr_cause[i].sym, pcie); + if (err) { + dev_err(dev, "Failed to request IRQ %d, err=%d\n", + irq, err); + return err; + } + } + + pcie->intx_irq = irq_create_mapping(pcie->mdb_domain, + AMD_MDB_PCIE_INTR_INTX); + if (!pcie->intx_irq) { + dev_err(dev, "Failed to map INTx interrupt\n"); + return -ENXIO; + } + + err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx, + IRQF_NO_THREAD, NULL, pcie); + if (err) { + dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n", + irq, err); + return err; + } + + /* Plug the main event handler. */ + err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD, + "amd_mdb pcie_irq", pcie); + if (err) { + dev_err(dev, "Failed to request event IRQ %d, err=%d\n", + pp->irq, err); + return err; + } + + return 0; +} + +static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *pp = &pci->pp; + struct device *dev = &pdev->dev; + int err; + + pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr"); + if (IS_ERR(pcie->slcr)) + return PTR_ERR(pcie->slcr); + + err = amd_mdb_pcie_init_irq_domains(pcie, pdev); + if (err) + return err; + + err = amd_mdb_setup_irq(pcie, pdev); + if (err) { + dev_err(dev, "Failed to set up interrupts, err=%d\n", err); + goto out; + } + + pp->ops = &amd_mdb_pcie_host_ops; + + err = dw_pcie_host_init(pp); + if (err) { + dev_err(dev, "Failed to initialize host, err=%d\n", err); + goto out; + } + + return 0; + +out: + amd_mdb_pcie_free_irq_domains(pcie); + return err; +} + +static int amd_mdb_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct amd_mdb_pcie *pcie; + struct dw_pcie *pci; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = &pcie->pci; + pci->dev = dev; + + platform_set_drvdata(pdev, pcie); + + return amd_mdb_add_pcie_port(pcie, pdev); +} + +static const struct of_device_id amd_mdb_pcie_of_match[] = { + { + .compatible = "amd,versal2-mdb-host", + }, + {}, +}; + +static struct platform_driver amd_mdb_pcie_driver = { + .driver = { + .name = "amd-mdb-pcie", + .of_match_table = amd_mdb_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = amd_mdb_pcie_probe, +}; + +builtin_platform_driver(amd_mdb_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index f8e7283dacd4..234c8cbcae3a 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -369,9 +369,22 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, return 0; } +static const struct pci_epc_features artpec6_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, +}; + +static const struct pci_epc_features * +artpec6_pcie_get_features(struct dw_pcie_ep *ep) +{ + return &artpec6_pcie_epc_features; +} + static const struct dw_pcie_ep_ops pcie_ep_ops = { .init = artpec6_pcie_ep_init, .raise_irq = artpec6_pcie_raise_irq, + .get_features = artpec6_pcie_get_features, }; static int artpec6_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c new file mode 100644 index 000000000000..9e6f4d00f262 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe controller debugfs driver + * + * Copyright (C) 2025 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Shradha Todi <shradha.t@samsung.com> + */ + +#include <linux/debugfs.h> + +#include "pcie-designware.h" + +#define SD_STATUS_L1LANE_REG 0xb0 +#define PIPE_RXVALID BIT(18) +#define PIPE_DETECT_LANE BIT(17) +#define LANE_SELECT GENMASK(3, 0) + +#define ERR_INJ0_OFF 0x34 +#define EINJ_VAL_DIFF GENMASK(28, 16) +#define EINJ_VC_NUM GENMASK(14, 12) +#define EINJ_TYPE_SHIFT 8 +#define EINJ0_TYPE GENMASK(11, 8) +#define EINJ1_TYPE BIT(8) +#define EINJ2_TYPE GENMASK(9, 8) +#define EINJ3_TYPE GENMASK(10, 8) +#define EINJ4_TYPE GENMASK(10, 8) +#define EINJ5_TYPE BIT(8) +#define EINJ_COUNT GENMASK(7, 0) + +#define ERR_INJ_ENABLE_REG 0x30 + +#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc + +#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8 +#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24) +#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16) +#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8) +#define EVENT_COUNTER_STATUS BIT(7) +#define EVENT_COUNTER_ENABLE GENMASK(4, 2) +#define PER_EVENT_ON 0x3 +#define PER_EVENT_OFF 0x1 + +#define DWC_DEBUGFS_BUF_MAX 128 + +/** + * struct dwc_pcie_rasdes_info - Stores controller common information + * @ras_cap_offset: RAS DES vendor specific extended capability offset + * @reg_event_lock: Mutex used for RAS DES shadow event registers + * + * Any parameter constant to all files of the debugfs hierarchy for a single + * controller will be stored in this struct. It is allocated and assigned to + * controller specific struct dw_pcie during initialization. + */ +struct dwc_pcie_rasdes_info { + u32 ras_cap_offset; + struct mutex reg_event_lock; +}; + +/** + * struct dwc_pcie_rasdes_priv - Stores file specific private data information + * @pci: Reference to the dw_pcie structure + * @idx: Index of specific file related information in array of structs + * + * All debugfs files will have this struct as its private data. + */ +struct dwc_pcie_rasdes_priv { + struct dw_pcie *pci; + int idx; +}; + +/** + * struct dwc_pcie_err_inj - Store details about each error injection + * supported by DWC RAS DES + * @name: Name of the error that can be injected + * @err_inj_group: Group number to which the error belongs. The value + * can range from 0 to 5 + * @err_inj_type: Each group can have multiple types of error + */ +struct dwc_pcie_err_inj { + const char *name; + u32 err_inj_group; + u32 err_inj_type; +}; + +static const struct dwc_pcie_err_inj err_inj_list[] = { + {"tx_lcrc", 0x0, 0x0}, + {"b16_crc_dllp", 0x0, 0x1}, + {"b16_crc_upd_fc", 0x0, 0x2}, + {"tx_ecrc", 0x0, 0x3}, + {"fcrc_tlp", 0x0, 0x4}, + {"parity_tsos", 0x0, 0x5}, + {"parity_skpos", 0x0, 0x6}, + {"rx_lcrc", 0x0, 0x8}, + {"rx_ecrc", 0x0, 0xb}, + {"tlp_err_seq", 0x1, 0x0}, + {"ack_nak_dllp_seq", 0x1, 0x1}, + {"ack_nak_dllp", 0x2, 0x0}, + {"upd_fc_dllp", 0x2, 0x1}, + {"nak_dllp", 0x2, 0x2}, + {"inv_sync_hdr_sym", 0x3, 0x0}, + {"com_pad_ts1", 0x3, 0x1}, + {"com_pad_ts2", 0x3, 0x2}, + {"com_fts", 0x3, 0x3}, + {"com_idl", 0x3, 0x4}, + {"end_edb", 0x3, 0x5}, + {"stp_sdp", 0x3, 0x6}, + {"com_skp", 0x3, 0x7}, + {"posted_tlp_hdr", 0x4, 0x0}, + {"non_post_tlp_hdr", 0x4, 0x1}, + {"cmpl_tlp_hdr", 0x4, 0x2}, + {"posted_tlp_data", 0x4, 0x4}, + {"non_post_tlp_data", 0x4, 0x5}, + {"cmpl_tlp_data", 0x4, 0x6}, + {"duplicate_tlp", 0x5, 0x0}, + {"nullified_tlp", 0x5, 0x1}, +}; + +static const u32 err_inj_type_mask[] = { + EINJ0_TYPE, + EINJ1_TYPE, + EINJ2_TYPE, + EINJ3_TYPE, + EINJ4_TYPE, + EINJ5_TYPE, +}; + +/** + * struct dwc_pcie_event_counter - Store details about each event counter + * supported in DWC RAS DES + * @name: Name of the error counter + * @group_no: Group number that the event belongs to. The value can range + * from 0 to 4 + * @event_no: Event number of the particular event. The value ranges are: + * Group 0: 0 - 10 + * Group 1: 5 - 13 + * Group 2: 0 - 7 + * Group 3: 0 - 5 + * Group 4: 0 - 1 + */ +struct dwc_pcie_event_counter { + const char *name; + u32 group_no; + u32 event_no; +}; + +static const struct dwc_pcie_event_counter event_list[] = { + {"ebuf_overflow", 0x0, 0x0}, + {"ebuf_underrun", 0x0, 0x1}, + {"decode_err", 0x0, 0x2}, + {"running_disparity_err", 0x0, 0x3}, + {"skp_os_parity_err", 0x0, 0x4}, + {"sync_header_err", 0x0, 0x5}, + {"rx_valid_deassertion", 0x0, 0x6}, + {"ctl_skp_os_parity_err", 0x0, 0x7}, + {"retimer_parity_err_1st", 0x0, 0x8}, + {"retimer_parity_err_2nd", 0x0, 0x9}, + {"margin_crc_parity_err", 0x0, 0xA}, + {"detect_ei_infer", 0x1, 0x5}, + {"receiver_err", 0x1, 0x6}, + {"rx_recovery_req", 0x1, 0x7}, + {"n_fts_timeout", 0x1, 0x8}, + {"framing_err", 0x1, 0x9}, + {"deskew_err", 0x1, 0xa}, + {"framing_err_in_l0", 0x1, 0xc}, + {"deskew_uncompleted_err", 0x1, 0xd}, + {"bad_tlp", 0x2, 0x0}, + {"lcrc_err", 0x2, 0x1}, + {"bad_dllp", 0x2, 0x2}, + {"replay_num_rollover", 0x2, 0x3}, + {"replay_timeout", 0x2, 0x4}, + {"rx_nak_dllp", 0x2, 0x5}, + {"tx_nak_dllp", 0x2, 0x6}, + {"retry_tlp", 0x2, 0x7}, + {"fc_timeout", 0x3, 0x0}, + {"poisoned_tlp", 0x3, 0x1}, + {"ecrc_error", 0x3, 0x2}, + {"unsupported_request", 0x3, 0x3}, + {"completer_abort", 0x3, 0x4}, + {"completion_timeout", 0x3, 0x5}, + {"ebuf_skp_add", 0x4, 0x0}, + {"ebuf_skp_del", 0x4, 0x1}, +}; + +static ssize_t lane_detect_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dw_pcie *pci = file->private_data; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); + val = FIELD_GET(PIPE_DETECT_LANE, val); + if (val) + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n"); + else + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n"); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t lane_detect_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dw_pcie *pci = file->private_data; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + u32 lane, val; + + val = kstrtou32_from_user(buf, count, 0, &lane); + if (val) + return val; + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); + val &= ~(LANE_SELECT); + val |= FIELD_PREP(LANE_SELECT, lane); + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val); + + return count; +} + +static ssize_t rx_valid_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dw_pcie *pci = file->private_data; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); + val = FIELD_GET(PIPE_RXVALID, val); + if (val) + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n"); + else + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n"); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t rx_valid_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return lane_detect_write(file, buf, count, ppos); +} + +static ssize_t err_inj_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + u32 val, counter, vc_num, err_group, type_mask; + int val_diff = 0; + char *kern_buf; + + err_group = err_inj_list[pdata->idx].err_inj_group; + type_mask = err_inj_type_mask[err_group]; + + kern_buf = memdup_user_nul(buf, count); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + if (err_group == 4) { + val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num); + if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) { + kfree(kern_buf); + return -EINVAL; + } + } else if (err_group == 1) { + val = sscanf(kern_buf, "%u %d", &counter, &val_diff); + if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) { + kfree(kern_buf); + return -EINVAL; + } + } else { + val = kstrtou32(kern_buf, 0, &counter); + if (val) { + kfree(kern_buf); + return val; + } + } + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group)); + val &= ~(type_mask | EINJ_COUNT); + val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask); + val |= FIELD_PREP(EINJ_COUNT, counter); + + if (err_group == 1 || err_group == 4) { + val &= ~(EINJ_VAL_DIFF); + val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff); + } + if (err_group == 4) { + val &= ~(EINJ_VC_NUM); + val |= FIELD_PREP(EINJ_VC_NUM, vc_num); + } + + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val); + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group)); + + kfree(kern_buf); + return count; +} + +static void set_event_number(struct dwc_pcie_rasdes_priv *pdata, + struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo) +{ + u32 val; + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + val &= ~EVENT_COUNTER_ENABLE; + val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT); + val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no); + val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no); + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); +} + +static ssize_t counter_enable_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + mutex_unlock(&rinfo->reg_event_lock); + val = FIELD_GET(EVENT_COUNTER_STATUS, val); + if (val) + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n"); + else + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n"); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t counter_enable_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + u32 val, enable; + + val = kstrtou32_from_user(buf, count, 0, &enable); + if (val) + return val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + if (enable) + val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON); + else + val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF); + + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); + + /* + * While enabling the counter, always read the status back to check if + * it is enabled or not. Return error if it is not enabled to let the + * users know that the counter is not supported on the platform. + */ + if (enable) { + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + + RAS_DES_EVENT_COUNTER_CTRL_REG); + if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) { + mutex_unlock(&rinfo->reg_event_lock); + return -EOPNOTSUPP; + } + } + + mutex_unlock(&rinfo->reg_event_lock); + + return count; +} + +static ssize_t counter_lane_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + mutex_unlock(&rinfo->reg_event_lock); + val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val); + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t counter_lane_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + u32 val, lane; + + val = kstrtou32_from_user(buf, count, 0, &lane); + if (val) + return val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + val &= ~(EVENT_COUNTER_LANE_SELECT); + val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane); + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); + mutex_unlock(&rinfo->reg_event_lock); + + return count; +} + +static ssize_t counter_value_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG); + mutex_unlock(&rinfo->reg_event_lock); + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm) +{ + const char *str; + + switch (ltssm) { +#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3); + default: + str = "DW_PCIE_LTSSM_UNKNOWN"; + break; + } + + return str + strlen("DW_PCIE_LTSSM_"); +} + +static int ltssm_status_show(struct seq_file *s, void *v) +{ + struct dw_pcie *pci = s->private; + enum dw_pcie_ltssm val; + + val = dw_pcie_get_ltssm(pci); + seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val); + + return 0; +} + +static int ltssm_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, ltssm_status_show, inode->i_private); +} + +#define dwc_debugfs_create(name) \ +debugfs_create_file(#name, 0644, rasdes_debug, pci, \ + &dbg_ ## name ## _fops) + +#define DWC_DEBUGFS_FOPS(name) \ +static const struct file_operations dbg_ ## name ## _fops = { \ + .open = simple_open, \ + .read = name ## _read, \ + .write = name ## _write \ +} + +DWC_DEBUGFS_FOPS(lane_detect); +DWC_DEBUGFS_FOPS(rx_valid); + +static const struct file_operations dwc_pcie_err_inj_ops = { + .open = simple_open, + .write = err_inj_write, +}; + +static const struct file_operations dwc_pcie_counter_enable_ops = { + .open = simple_open, + .read = counter_enable_read, + .write = counter_enable_write, +}; + +static const struct file_operations dwc_pcie_counter_lane_ops = { + .open = simple_open, + .read = counter_lane_read, + .write = counter_lane_write, +}; + +static const struct file_operations dwc_pcie_counter_value_ops = { + .open = simple_open, + .read = counter_value_read, +}; + +static const struct file_operations dwc_pcie_ltssm_status_ops = { + .open = ltssm_status_open, + .read = seq_read, +}; + +static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci) +{ + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + + mutex_destroy(&rinfo->reg_event_lock); +} + +static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir) +{ + struct dentry *rasdes_debug, *rasdes_err_inj; + struct dentry *rasdes_event_counter, *rasdes_events; + struct dwc_pcie_rasdes_info *rasdes_info; + struct dwc_pcie_rasdes_priv *priv_tmp; + struct device *dev = pci->dev; + int ras_cap, i, ret; + + /* + * If a given SoC has no RAS DES capability, the following call is + * bound to return an error, breaking some existing platforms. So, + * return 0 here, as this is not necessarily an error. + */ + ras_cap = dw_pcie_find_rasdes_capability(pci); + if (!ras_cap) { + dev_dbg(dev, "no RAS DES capability available\n"); + return 0; + } + + rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL); + if (!rasdes_info) + return -ENOMEM; + + /* Create subdirectories for Debug, Error Injection, Statistics. */ + rasdes_debug = debugfs_create_dir("rasdes_debug", dir); + rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir); + rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir); + + mutex_init(&rasdes_info->reg_event_lock); + rasdes_info->ras_cap_offset = ras_cap; + pci->debugfs->rasdes_info = rasdes_info; + + /* Create debugfs files for Debug subdirectory. */ + dwc_debugfs_create(lane_detect); + dwc_debugfs_create(rx_valid); + + /* Create debugfs files for Error Injection subdirectory. */ + for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) { + priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL); + if (!priv_tmp) { + ret = -ENOMEM; + goto err_deinit; + } + + priv_tmp->idx = i; + priv_tmp->pci = pci; + debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp, + &dwc_pcie_err_inj_ops); + } + + /* Create debugfs files for Statistical Counter subdirectory. */ + for (i = 0; i < ARRAY_SIZE(event_list); i++) { + priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL); + if (!priv_tmp) { + ret = -ENOMEM; + goto err_deinit; + } + + priv_tmp->idx = i; + priv_tmp->pci = pci; + rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter); + if (event_list[i].group_no == 0 || event_list[i].group_no == 4) { + debugfs_create_file("lane_select", 0644, rasdes_events, + priv_tmp, &dwc_pcie_counter_lane_ops); + } + debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp, + &dwc_pcie_counter_value_ops); + debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp, + &dwc_pcie_counter_enable_ops); + } + + return 0; + +err_deinit: + dwc_pcie_rasdes_debugfs_deinit(pci); + return ret; +} + +static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir) +{ + debugfs_create_file("ltssm_status", 0444, dir, pci, + &dwc_pcie_ltssm_status_ops); +} + +void dwc_pcie_debugfs_deinit(struct dw_pcie *pci) +{ + if (!pci->debugfs) + return; + + dwc_pcie_rasdes_debugfs_deinit(pci); + debugfs_remove_recursive(pci->debugfs->debug_dir); +} + +void dwc_pcie_debugfs_init(struct dw_pcie *pci) +{ + char dirname[DWC_DEBUGFS_BUF_MAX]; + struct device *dev = pci->dev; + struct debugfs_info *debugfs; + struct dentry *dir; + int err; + + /* Create main directory for each platform driver. */ + snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev)); + dir = debugfs_create_dir(dirname, NULL); + debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL); + if (!debugfs) + return; + + debugfs->debug_dir = dir; + pci->debugfs = debugfs; + err = dwc_pcie_rasdes_debugfs_init(pci, dir); + if (err) + dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n", + err); + + dwc_pcie_ltssm_debugfs_init(pci, dir); +} diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index f3ac7d46a855..1a0bf9341542 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -102,6 +102,45 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); } +/** + * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list + * @pci: DWC PCI device + * @prev_cap: Capability preceding the capability that should be hidden + * @cap: Capability that should be hidden + * + * Return: 0 if success, errno otherwise. + */ +int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap) +{ + u16 prev_cap_offset, cap_offset; + u32 prev_cap_header, cap_header; + + prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap); + if (!prev_cap_offset) + return -EINVAL; + + prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset); + cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header); + cap_header = dw_pcie_readl_dbi(pci, cap_offset); + + /* cap must immediately follow prev_cap. */ + if (PCI_EXT_CAP_ID(cap_header) != cap) + return -EINVAL; + + /* Clear next ptr. */ + prev_cap_header &= ~GENMASK(31, 20); + + /* Set next ptr to next ptr of cap. */ + prev_cap_header |= cap_header & GENMASK(31, 20); + + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header); + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability); + static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_header *hdr) { @@ -128,7 +167,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, } static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, - dma_addr_t cpu_addr, enum pci_barno bar) + dma_addr_t parent_bus_addr, enum pci_barno bar, + size_t size) { int ret; u32 free_win; @@ -145,7 +185,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, } ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, - cpu_addr, bar); + parent_bus_addr, bar, size); if (ret < 0) { dev_err(pci->dev, "Failed to program IB window\n"); return ret; @@ -180,7 +220,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, return ret; set_bit(free_win, ep->ob_window_map); - ep->outbound_addr[free_win] = atu->cpu_addr; + ep->outbound_addr[free_win] = atu->parent_bus_addr; return 0; } @@ -204,6 +244,125 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, ep->bar_to_atu[bar] = 0; } +static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci, + enum pci_barno bar) +{ + u32 reg, bar_index; + unsigned int offset, nbars; + int i; + + offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + if (!offset) + return offset; + + reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + + for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { + reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + bar_index = reg & PCI_REBAR_CTRL_BAR_IDX; + if (bar_index == bar) + return offset; + } + + return 0; +} + +static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no, + struct pci_epf_bar *epf_bar) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar = epf_bar->barno; + size_t size = epf_bar->size; + int flags = epf_bar->flags; + u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + unsigned int rebar_offset; + u32 rebar_cap, rebar_ctrl; + int ret; + + rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar); + if (!rebar_offset) + return -EINVAL; + + ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap); + if (ret) + return ret; + + dw_pcie_dbi_ro_wr_en(pci); + + /* + * A BAR mask should not be written for a resizable BAR. The BAR mask + * is automatically derived by the controller every time the "selected + * size" bits are updated, see "Figure 3-26 Resizable BAR Example for + * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write + * BIT(0) to set the BAR enable bit. + */ + dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0)); + dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0); + dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); + } + + /* + * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes + * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes" + * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB. + */ + rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL); + rebar_ctrl &= ~GENMASK(31, 16); + dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl); + + /* + * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically + * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR + * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a. + */ + dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap); + + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no, + struct pci_epf_bar *epf_bar) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar = epf_bar->barno; + size_t size = epf_bar->size; + int flags = epf_bar->flags; + u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + + dw_pcie_dbi_ro_wr_en(pci); + + dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); + dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); + dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); + } + + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep, + enum pci_barno bar) +{ + const struct pci_epc_features *epc_features; + + if (!ep->ops->get_features) + return BAR_PROGRAMMABLE; + + epc_features = ep->ops->get_features(ep); + + return epc_features->bar[bar].type; +} + static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { @@ -211,9 +370,9 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar = epf_bar->barno; size_t size = epf_bar->size; + enum pci_epc_bar_type bar_type; int flags = epf_bar->flags; int ret, type; - u32 reg; /* * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs @@ -222,32 +381,66 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1)) return -EINVAL; - reg = PCI_BASE_ADDRESS_0 + (4 * bar); + /* + * Certain EPF drivers dynamically change the physical address of a BAR + * (i.e. they call set_bar() twice, without ever calling clear_bar(), as + * calling clear_bar() would clear the BAR's PCI address assigned by the + * host). + */ + if (ep->epf_bar[bar]) { + /* + * We can only dynamically change a BAR if the new BAR size and + * BAR flags do not differ from the existing configuration. + */ + if (ep->epf_bar[bar]->barno != bar || + ep->epf_bar[bar]->size != size || + ep->epf_bar[bar]->flags != flags) + return -EINVAL; + + /* + * When dynamically changing a BAR, skip writing the BAR reg, as + * that would clear the BAR's PCI address assigned by the host. + */ + goto config_atu; + } + + bar_type = dw_pcie_ep_get_bar_type(ep, bar); + switch (bar_type) { + case BAR_FIXED: + /* + * There is no need to write a BAR mask for a fixed BAR (except + * to write 1 to the LSB of the BAR mask register, to enable the + * BAR). Write the BAR mask regardless. (The fixed bits in the + * BAR mask register will be read-only anyway.) + */ + fallthrough; + case BAR_PROGRAMMABLE: + ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar); + break; + case BAR_RESIZABLE: + ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar); + break; + default: + ret = -EINVAL; + dev_err(pci->dev, "Invalid BAR type\n"); + break; + } + if (ret) + return ret; + +config_atu: if (!(flags & PCI_BASE_ADDRESS_SPACE)) type = PCIE_ATU_TYPE_MEM; else type = PCIE_ATU_TYPE_IO; - ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar); + ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar, + size); if (ret) return ret; - if (ep->epf_bar[bar]) - return 0; - - dw_pcie_dbi_ro_wr_en(pci); - - dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); - dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); - - if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { - dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); - dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); - } - ep->epf_bar[bar] = epf_bar; - dw_pcie_dbi_ro_wr_dis(pci); return 0; } @@ -258,7 +451,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, u32 index; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - for (index = 0; index < pci->num_ob_windows; index++) { + for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) { if (ep->outbound_addr[index] != addr) continue; *atu_index = index; @@ -290,7 +483,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - ret = dw_pcie_find_index(ep, addr, &atu_index); + ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset, + &atu_index); if (ret < 0) return; @@ -309,7 +503,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, atu.func_no = func_no; atu.type = PCIE_ATU_TYPE_MEM; - atu.cpu_addr = addr; + atu.parent_bus_addr = addr - pci->parent_bus_offset; atu.pci_addr = pci_addr; atu.size = size; ret = dw_pcie_ep_outbound_atu(ep, &atu); @@ -642,6 +836,7 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + dwc_pcie_debugfs_deinit(pci); dw_pcie_edma_remove(pci); } EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup); @@ -666,31 +861,15 @@ void dw_pcie_ep_deinit(struct dw_pcie_ep *ep) } EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit); -static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) -{ - u32 header; - int pos = PCI_CFG_SPACE_SIZE; - - while (pos) { - header = dw_pcie_readl_dbi(pci, pos); - if (PCI_EXT_CAP_ID(header) == cap) - return pos; - - pos = PCI_EXT_CAP_NEXT(header); - if (!pos) - break; - } - - return 0; -} - static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) { + struct dw_pcie_ep *ep = &pci->ep; unsigned int offset; unsigned int nbars; - u32 reg, i; + enum pci_barno bar; + u32 reg, i, val; - offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); dw_pcie_dbi_ro_wr_en(pci); @@ -703,9 +882,29 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) * PCIe r6.0, sec 7.8.6.2 require us to support at least one * size in the range from 1 MB to 512 GB. Advertise support * for 1 MB BAR size only. + * + * For a BAR that has been configured via dw_pcie_ep_set_bar(), + * advertise support for only that size instead. */ - for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); + for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { + /* + * While the RESBAR_CAP_REG_* fields are sticky, the + * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is + * sticky in certain versions of DWC PCIe, but not all). + * + * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by + * the controller when RESBAR_CAP_REG is written, which + * is why RESBAR_CAP_REG is written here. + */ + val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + bar = val & PCI_REBAR_CTRL_BAR_IDX; + if (ep->epf_bar[bar]) + pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val); + else + val = BIT(4); + + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val); + } } dw_pcie_setup(pci); @@ -749,6 +948,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) if (ret) return ret; + ret = -ENOMEM; if (!ep->ib_window_map) { ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, GFP_KERNEL); @@ -793,7 +993,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) if (ep->ops->init) ep->ops->init(ep); - ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); + ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); /* * PTM responder capability can be disabled only after disabling @@ -813,6 +1013,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) dw_pcie_ep_init_non_sticky_registers(pci); + dwc_pcie_debugfs_init(pci); + return 0; err_remove_edma: @@ -859,26 +1061,15 @@ void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep) } EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown); -/** - * dw_pcie_ep_init - Initialize the endpoint device - * @ep: DWC EP device - * - * Initialize the endpoint device. Allocate resources and create the EPC - * device with the endpoint framework. - * - * Return: 0 if success, errno otherwise. - */ -int dw_pcie_ep_init(struct dw_pcie_ep *ep) +static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep) { - int ret; - struct resource *res; - struct pci_epc *epc; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; - - INIT_LIST_HEAD(&ep->func_list); + struct pci_epc *epc = ep->epc; + struct resource *res; + int ret; ret = dw_pcie_get_resources(pci); if (ret) @@ -891,8 +1082,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ep->phys_base = res->start; ep->addr_size = resource_size(res); - if (ep->ops->pre_init) - ep->ops->pre_init(ep); + /* + * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call + * dw_pcie_parent_bus_offset() after setting ep->phys_base. + */ + pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space", + ep->phys_base); + + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); + if (ret < 0) + epc->max_functions = 1; + + return 0; +} + +/** + * dw_pcie_ep_init - Initialize the endpoint device + * @ep: DWC EP device + * + * Initialize the endpoint device. Allocate resources and create the EPC + * device with the endpoint framework. + * + * Return: 0 if success, errno otherwise. + */ +int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ + int ret; + struct pci_epc *epc; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + + INIT_LIST_HEAD(&ep->func_list); epc = devm_pci_epc_create(dev, &epc_ops); if (IS_ERR(epc)) { @@ -903,9 +1123,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ep->epc = epc; epc_set_drvdata(epc, ep); - ret = of_property_read_u8(np, "max-functions", &epc->max_functions); - if (ret < 0) - epc->max_functions = 1; + ret = dw_pcie_ep_get_resources(ep); + if (ret) + return ret; + + if (ep->ops->pre_init) + ep->ops->pre_init(ep); ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, ep->page_size); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index d2291c3ceb8b..ecc33f6789e3 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -418,50 +418,69 @@ static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp) } } -int dw_pcie_host_init(struct dw_pcie_rp *pp) +static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; - struct device_node *np = dev->of_node; struct platform_device *pdev = to_platform_device(dev); struct resource_entry *win; - struct pci_host_bridge *bridge; struct resource *res; int ret; - raw_spin_lock_init(&pp->lock); - ret = dw_pcie_get_resources(pci); if (ret) return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); - if (res) { - pp->cfg0_size = resource_size(res); - pp->cfg0_base = res->start; - - pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pp->va_cfg0_base)) - return PTR_ERR(pp->va_cfg0_base); - } else { - dev_err(dev, "Missing *config* reg space\n"); + if (!res) { + dev_err(dev, "Missing \"config\" reg space\n"); return -ENODEV; } - bridge = devm_pci_alloc_host_bridge(dev, 0); - if (!bridge) - return -ENOMEM; + pp->cfg0_size = resource_size(res); + pp->cfg0_base = res->start; - pp->bridge = bridge; + pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pp->va_cfg0_base)) + return PTR_ERR(pp->va_cfg0_base); /* Get the I/O range from DT */ - win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); + win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO); if (win) { pp->io_size = resource_size(win->res); pp->io_bus_addr = win->res->start - win->offset; pp->io_base = pci_pio_to_address(win->res->start); } + /* + * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to + * call dw_pcie_parent_bus_offset() after setting pp->io_base. + */ + pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config", + pp->cfg0_base); + return 0; +} + +int dw_pcie_host_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge; + int ret; + + raw_spin_lock_init(&pp->lock); + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) + return -ENOMEM; + + pp->bridge = bridge; + + ret = dw_pcie_host_get_resources(pp); + if (ret) + return ret; + /* Set default bus ops */ bridge->ops = &dw_pcie_ops; bridge->child_ops = &dw_child_pcie_ops; @@ -530,8 +549,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) goto err_remove_edma; } - /* Ignore errors, the link may come up later */ - dw_pcie_wait_for_link(pci); + /* + * Note: Skip the link up delay only when a Link Up IRQ is present. + * If there is no Link Up IRQ, we should not bypass the delay + * because that would require users to manually rescan for devices. + */ + if (!pp->use_linkup_irq) + /* Ignore errors, the link may come up later */ + dw_pcie_wait_for_link(pci); bridge->sysdata = pp; @@ -542,6 +567,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (pp->ops->post_init) pp->ops->post_init(pp); + dwc_pcie_debugfs_init(pci); + return 0; err_stop_link: @@ -566,6 +593,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + dwc_pcie_debugfs_deinit(pci); + pci_stop_root_bus(pp->bridge->bus); pci_remove_root_bus(pp->bridge->bus); @@ -610,7 +639,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, type = PCIE_ATU_TYPE_CFG1; atu.type = type; - atu.cpu_addr = pp->cfg0_base; + atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset; atu.pci_addr = busdev; atu.size = pp->cfg0_size; @@ -635,7 +664,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, if (pp->cfg0_io_shared) { atu.type = PCIE_ATU_TYPE_IO; - atu.cpu_addr = pp->io_base; + atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset; atu.pci_addr = pp->io_bus_addr; atu.size = pp->io_size; @@ -661,7 +690,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, if (pp->cfg0_io_shared) { atu.type = PCIE_ATU_TYPE_IO; - atu.cpu_addr = pp->io_base; + atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset; atu.pci_addr = pp->io_bus_addr; atu.size = pp->io_size; @@ -730,7 +759,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) atu.index = i; atu.type = PCIE_ATU_TYPE_MEM; - atu.cpu_addr = entry->res->start; + atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset; atu.pci_addr = entry->res->start - entry->offset; /* Adjust iATU size if MSG TLP region was allocated before */ @@ -752,7 +781,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) if (pci->num_ob_windows > ++i) { atu.index = i; atu.type = PCIE_ATU_TYPE_IO; - atu.cpu_addr = pp->io_base; + atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset; atu.pci_addr = pp->io_bus_addr; atu.size = pp->io_size; @@ -896,13 +925,13 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci) atu.size = resource_size(pci->pp.msg_res); atu.index = pci->pp.msg_atu_index; - atu.cpu_addr = pci->pp.msg_res->start; + atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset; ret = dw_pcie_prog_outbound_atu(pci, &atu); if (ret) return ret; - mem = ioremap(atu.cpu_addr, pci->region_align); + mem = ioremap(pci->pp.msg_res->start, pci->region_align); if (!mem) return -ENOMEM; @@ -918,7 +947,7 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci) { u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; - int ret = 0; + int ret; /* * If L1SS is supported, then do not put the link into L2 as some @@ -927,25 +956,33 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci) if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1) return 0; - if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT) - return 0; - - if (pci->pp.ops->pme_turn_off) + if (pci->pp.ops->pme_turn_off) { pci->pp.ops->pme_turn_off(&pci->pp); - else + } else { ret = dw_pcie_pme_turn_off(pci); + if (ret) + return ret; + } - if (ret) - return ret; - - ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE, + ret = read_poll_timeout(dw_pcie_get_ltssm, val, + val == DW_PCIE_LTSSM_L2_IDLE || + val <= DW_PCIE_LTSSM_DETECT_WAIT, PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US, false, pci); if (ret) { + /* Only log message when LTSSM isn't in DETECT or POLL */ dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val); return ret; } + /* + * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least + * 100ns after L2/L3 Ready before turning off refclock and + * main power. This is harmless when no endpoint is connected. + */ + udelay(1); + + dw_pcie_stop_link(pci); if (pci->pp.ops->deinit) pci->pp.ops->deinit(&pci->pp); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 6d6cbc8b5b2c..97d76d3dc066 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -16,6 +16,8 @@ #include <linux/gpio/consumer.h> #include <linux/ioport.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/pcie-dwc.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include <linux/types.h> @@ -283,6 +285,51 @@ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) } EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); +static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id, + u16 vsec_id) +{ + u16 vsec = 0; + u32 header; + + if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID)) + return 0; + + while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec, + PCI_EXT_CAP_ID_VNDR))) { + header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER); + if (PCI_VNDR_HEADER_ID(header) == vsec_id) + return vsec; + } + + return 0; +} + +static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci, + const struct dwc_pcie_vsec_id *vsec_ids) +{ + const struct dwc_pcie_vsec_id *vid; + u16 vsec; + u32 header; + + for (vid = vsec_ids; vid->vendor_id; vid++) { + vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id, + vid->vsec_id); + if (vsec) { + header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER); + if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev) + return vsec; + } + } + + return 0; +} + +u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci) +{ + return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids); +} +EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability); + int dw_pcie_read(void __iomem *addr, int size, u32 *val) { if (!IS_ALIGNED((uintptr_t)addr, size)) { @@ -470,25 +517,22 @@ static inline u32 dw_pcie_enable_ecrc(u32 val) int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, const struct dw_pcie_ob_atu_cfg *atu) { - u64 cpu_addr = atu->cpu_addr; + u64 parent_bus_addr = atu->parent_bus_addr; u32 retries, val; u64 limit_addr; - if (pci->ops && pci->ops->cpu_addr_fixup) - cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); + limit_addr = parent_bus_addr + atu->size - 1; - limit_addr = cpu_addr + atu->size - 1; - - if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) || - !IS_ALIGNED(cpu_addr, pci->region_align) || + if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) || + !IS_ALIGNED(parent_bus_addr, pci->region_align) || !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) { return -EINVAL; } dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE, - lower_32_bits(cpu_addr)); + lower_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE, - upper_32_bits(cpu_addr)); + upper_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT, lower_32_bits(limit_addr)); @@ -502,7 +546,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, upper_32_bits(atu->pci_addr)); val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no); - if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && + if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) && dw_pcie_ver_is_ge(pci, 460A)) val |= PCIE_ATU_INCREASE_REGION_SIZE; if (dw_pcie_ver_is(pci, 490A)) @@ -545,13 +589,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg } int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, - u64 cpu_addr, u64 pci_addr, u64 size) + u64 parent_bus_addr, u64 pci_addr, u64 size) { u64 limit_addr = pci_addr + size - 1; u32 retries, val; if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) || - !IS_ALIGNED(cpu_addr, pci->region_align) || + !IS_ALIGNED(parent_bus_addr, pci->region_align) || !IS_ALIGNED(pci_addr, pci->region_align) || !size) { return -EINVAL; } @@ -568,9 +612,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, upper_32_bits(limit_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, - lower_32_bits(cpu_addr)); + lower_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, - upper_32_bits(cpu_addr)); + upper_32_bits(parent_bus_addr)); val = type; if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) && @@ -597,17 +641,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, } int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u8 bar) + int type, u64 parent_bus_addr, u8 bar, size_t size) { u32 retries, val; - if (!IS_ALIGNED(cpu_addr, pci->region_align)) + if (!IS_ALIGNED(parent_bus_addr, pci->region_align) || + !IS_ALIGNED(parent_bus_addr, size)) return -EINVAL; dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, - lower_32_bits(cpu_addr)); + lower_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, - upper_32_bits(cpu_addr)); + upper_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type | PCIE_ATU_FUNC_NUM(func_no)); @@ -970,7 +1015,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci) { struct platform_device *pdev = to_platform_device(pci->dev); u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt; - char name[6]; + char name[15]; int ret; if (pci->edma.nr_irqs == 1) @@ -1104,3 +1149,63 @@ void dw_pcie_setup(struct dw_pcie *pci) dw_pcie_link_set_max_link_width(pci, pci->num_lanes); } + +resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci, + const char *reg_name, + resource_size_t cpu_phys_addr) +{ + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + int index; + u64 reg_addr, fixup_addr; + u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr); + + /* Look up reg_name address on parent bus */ + index = of_property_match_string(np, "reg-names", reg_name); + + if (index < 0) { + dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name); + return 0; + } + + of_property_read_reg(np, index, ®_addr, NULL); + + fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL; + if (fixup) { + fixup_addr = fixup(pci, cpu_phys_addr); + if (reg_addr == fixup_addr) { + dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n", + reg_name, index, reg_addr, fixup_addr, + (unsigned long long) cpu_phys_addr, fixup); + } else { + dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n", + reg_name, index, reg_addr, fixup_addr, + (unsigned long long) cpu_phys_addr); + reg_addr = fixup_addr; + } + + return cpu_phys_addr - reg_addr; + } + + if (pci->use_parent_dt_ranges) { + + /* + * This platform once had a fixup, presumably because it + * translates between CPU and PCI controller addresses. + * Log a note if devicetree didn't describe a translation. + */ + if (reg_addr == cpu_phys_addr) + dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n", + reg_name, index, reg_addr, + (unsigned long long) cpu_phys_addr); + } else { + if (reg_addr != cpu_phys_addr) { + dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n", + reg_name, index, reg_addr, + (unsigned long long) cpu_phys_addr); + return 0; + } + } + + return cpu_phys_addr - reg_addr; +} diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 347ab74ac35a..56aafdbcdaca 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -330,8 +330,40 @@ enum dw_pcie_ltssm { /* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */ DW_PCIE_LTSSM_DETECT_QUIET = 0x0, DW_PCIE_LTSSM_DETECT_ACT = 0x1, + DW_PCIE_LTSSM_POLL_ACTIVE = 0x2, + DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3, + DW_PCIE_LTSSM_POLL_CONFIG = 0x4, + DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5, + DW_PCIE_LTSSM_DETECT_WAIT = 0x6, + DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7, + DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8, + DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9, + DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa, + DW_PCIE_LTSSM_CFG_COMPLETE = 0xb, + DW_PCIE_LTSSM_CFG_IDLE = 0xc, + DW_PCIE_LTSSM_RCVRY_LOCK = 0xd, + DW_PCIE_LTSSM_RCVRY_SPEED = 0xe, + DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf, + DW_PCIE_LTSSM_RCVRY_IDLE = 0x10, DW_PCIE_LTSSM_L0 = 0x11, + DW_PCIE_LTSSM_L0S = 0x12, + DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13, + DW_PCIE_LTSSM_L1_IDLE = 0x14, DW_PCIE_LTSSM_L2_IDLE = 0x15, + DW_PCIE_LTSSM_L2_WAKE = 0x16, + DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17, + DW_PCIE_LTSSM_DISABLED_IDLE = 0x18, + DW_PCIE_LTSSM_DISABLED = 0x19, + DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a, + DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b, + DW_PCIE_LTSSM_LPBK_EXIT = 0x1c, + DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d, + DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e, + DW_PCIE_LTSSM_HOT_RESET = 0x1f, + DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20, + DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21, + DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22, + DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23, DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF, }; @@ -342,7 +374,7 @@ struct dw_pcie_ob_atu_cfg { u8 func_no; u8 code; u8 routing; - u64 cpu_addr; + u64 parent_bus_addr; u64 pci_addr; u64 size; }; @@ -379,6 +411,7 @@ struct dw_pcie_rp { bool use_atu_msg; int msg_atu_index; struct resource *msg_res; + bool use_linkup_irq; }; struct dw_pcie_ep_ops { @@ -435,6 +468,11 @@ struct dw_pcie_ops { void (*stop_link)(struct dw_pcie *pcie); }; +struct debugfs_info { + struct dentry *debug_dir; + void *rasdes_info; +}; + struct dw_pcie { struct device *dev; void __iomem *dbi_base; @@ -443,6 +481,7 @@ struct dw_pcie { void __iomem *atu_base; resource_size_t atu_phys_addr; size_t atu_size; + resource_size_t parent_bus_offset; u32 num_ib_windows; u32 num_ob_windows; u32 region_align; @@ -463,6 +502,20 @@ struct dw_pcie { struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS]; struct gpio_desc *pe_rst; bool suspended; + struct debugfs_info *debugfs; + + /* + * If iATU input addresses are offset from CPU physical addresses, + * we previously required .cpu_addr_fixup() to convert them. We + * now rely on the devicetree instead. If .cpu_addr_fixup() + * exists, we compare its results with devicetree. + * + * If .cpu_addr_fixup() does not exist, we assume the offset is + * zero and warn if devicetree claims otherwise. If we know all + * devicetrees correctly describe the offset, set + * use_parent_dt_ranges to true to avoid this warning. + */ + bool use_parent_dt_ranges; }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -476,6 +529,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci); u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap); u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap); +u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci); int dw_pcie_read(void __iomem *addr, int size, u32 *val); int dw_pcie_write(void __iomem *addr, int size, u32 val); @@ -489,17 +543,18 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci); int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, const struct dw_pcie_ob_atu_cfg *atu); int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, - u64 cpu_addr, u64 pci_addr, u64 size); + u64 parent_bus_addr, u64 pci_addr, u64 size); int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u8 bar); + int type, u64 parent_bus_addr, + u8 bar, size_t size); void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index); void dw_pcie_setup(struct dw_pcie *pci); void dw_pcie_iatu_detect(struct dw_pcie *pci); int dw_pcie_edma_detect(struct dw_pcie *pci); void dw_pcie_edma_remove(struct dw_pcie *pci); - -int dw_pcie_suspend_noirq(struct dw_pcie *pci); -int dw_pcie_resume_noirq(struct dw_pcie *pci); +resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci, + const char *reg_name, + resource_size_t cpu_phy_addr); static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) { @@ -678,6 +733,8 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci) } #ifdef CONFIG_PCIE_DW_HOST +int dw_pcie_suspend_noirq(struct dw_pcie *pci); +int dw_pcie_resume_noirq(struct dw_pcie *pci); irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp); int dw_pcie_setup_rc(struct dw_pcie_rp *pp); int dw_pcie_host_init(struct dw_pcie_rp *pp); @@ -686,6 +743,16 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp); void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where); #else +static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci) +{ + return 0; +} + +static inline int dw_pcie_resume_noirq(struct dw_pcie *pci) +{ + return 0; +} + static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) { return IRQ_NONE; @@ -732,6 +799,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num); void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); +int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap); struct dw_pcie_ep_func * dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no); #else @@ -789,10 +857,29 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) { } +static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, + u8 prev_cap, u8 cap) +{ + return 0; +} + static inline struct dw_pcie_ep_func * dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) { return NULL; } #endif + +#ifdef CONFIG_PCIE_DW_DEBUGFS +void dwc_pcie_debugfs_init(struct dw_pcie *pci); +void dwc_pcie_debugfs_deinit(struct dw_pcie *pci); +#else +static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci) +{ +} +static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci) +{ +} +#endif + #endif /* _PCIE_DESIGNWARE_H */ diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 1170e1107508..c624b7ebd118 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -240,6 +240,34 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = { .init = rockchip_pcie_host_init, }; +/* + * ATS does not work on RK3588 when running in EP mode. + * + * After the host has enabled ATS on the EP side, it will send an IOTLB + * invalidation request to the EP side. However, the RK3588 will never send + * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT + * error, and the EP will not be operational. If we hide the ATS capability, + * things work as expected. + */ +static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + + /* Only hide the ATS capability for RK3588 running in EP mode. */ + if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep")) + return; + + if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI, + PCI_EXT_CAP_ID_ATS)) + dev_err(dev, "failed to hide ATS capability\n"); +} + +static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep) +{ + rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep); +} + static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -272,13 +300,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = { .linkup_notifier = true, .msi_capable = true, .msix_capable = true, + .intx_capable = false, .align = SZ_64K, - .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_0] = { .type = BAR_RESIZABLE, }, + .bar[BAR_1] = { .type = BAR_RESIZABLE, }, + .bar[BAR_2] = { .type = BAR_RESIZABLE, }, + .bar[BAR_3] = { .type = BAR_RESIZABLE, }, + .bar[BAR_4] = { .type = BAR_RESIZABLE, }, + .bar[BAR_5] = { .type = BAR_RESIZABLE, }, }; /* @@ -292,13 +321,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = { .linkup_notifier = true, .msi_capable = true, .msix_capable = true, + .intx_capable = false, .align = SZ_64K, - .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_0] = { .type = BAR_RESIZABLE, }, + .bar[BAR_1] = { .type = BAR_RESIZABLE, }, + .bar[BAR_2] = { .type = BAR_RESIZABLE, }, + .bar[BAR_3] = { .type = BAR_RESIZABLE, }, .bar[BAR_4] = { .type = BAR_RESERVED, }, - .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_5] = { .type = BAR_RESIZABLE, }, }; static const struct pci_epc_features * @@ -312,6 +342,7 @@ rockchip_pcie_get_features(struct dw_pcie_ep *ep) static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = { .init = rockchip_pcie_ep_init, + .pre_init = rockchip_pcie_ep_pre_init, .raise_irq = rockchip_pcie_raise_irq, .get_features = rockchip_pcie_get_features, }; @@ -389,6 +420,34 @@ static const struct dw_pcie_ops dw_pcie_ops = { .stop_link = rockchip_pcie_stop_link, }; +static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg) +{ + struct rockchip_pcie *rockchip = arg; + struct dw_pcie *pci = &rockchip->pci; + struct dw_pcie_rp *pp = &pci->pp; + struct device *dev = pci->dev; + u32 reg, val; + + reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC); + rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC); + + dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg); + dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip)); + + if (reg & PCIE_RDLH_LINK_UP_CHGED) { + val = rockchip_pcie_get_ltssm(rockchip); + if ((val & PCIE_LINKUP) == PCIE_LINKUP) { + dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); + /* Rescan the bus to enumerate endpoint devices */ + pci_lock_rescan_remove(); + pci_rescan_bus(pp->bridge->bus); + pci_unlock_rescan_remove(); + } + } + + return IRQ_HANDLED; +} + static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg) { struct rockchip_pcie *rockchip = arg; @@ -418,14 +477,29 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg) return IRQ_HANDLED; } -static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip) +static int rockchip_pcie_configure_rc(struct platform_device *pdev, + struct rockchip_pcie *rockchip) { + struct device *dev = &pdev->dev; struct dw_pcie_rp *pp; + int irq, ret; u32 val; if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST)) return -ENODEV; + irq = platform_get_irq_byname(pdev, "sys"); + if (irq < 0) + return irq; + + ret = devm_request_threaded_irq(dev, irq, NULL, + rockchip_pcie_rc_sys_irq_thread, + IRQF_ONESHOT, "pcie-sys-rc", rockchip); + if (ret) { + dev_err(dev, "failed to request PCIe sys IRQ\n"); + return ret; + } + /* LTSSM enable control mode */ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE); rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); @@ -435,8 +509,19 @@ static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip) pp = &rockchip->pci.pp; pp->ops = &rockchip_pcie_host_ops; + pp->use_linkup_irq = true; - return dw_pcie_host_init(pp); + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + /* unmask DLL up/down indicator */ + val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0); + rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC); + + return ret; } static int rockchip_pcie_configure_ep(struct platform_device *pdev, @@ -450,14 +535,12 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev, return -ENODEV; irq = platform_get_irq_byname(pdev, "sys"); - if (irq < 0) { - dev_err(dev, "missing sys IRQ resource\n"); + if (irq < 0) return irq; - } ret = devm_request_threaded_irq(dev, irq, NULL, rockchip_pcie_ep_sys_irq_thread, - IRQF_ONESHOT, "pcie-sys", rockchip); + IRQF_ONESHOT, "pcie-sys-ep", rockchip); if (ret) { dev_err(dev, "failed to request PCIe sys IRQ\n"); return ret; @@ -491,7 +574,8 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev, pci_epc_init_notify(rockchip->pci.ep.epc); /* unmask DLL up/down indicator and hot reset/link-down reset */ - rockchip_pcie_writel_apb(rockchip, 0x60000, PCIE_CLIENT_INTR_MASK_MISC); + val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0); + rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC); return ret; } @@ -553,7 +637,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) switch (data->mode) { case DW_PCIE_RC_TYPE: - ret = rockchip_pcie_configure_rc(rockchip); + ret = rockchip_pcie_configure_rc(pdev, rockchip); if (ret) goto deinit_clk; break; diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 615a0e3e6d7e..1f2f4c28a949 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev) ret = histb_pcie_host_enable(pp); if (ret) { dev_err(dev, "failed to enable host\n"); - return ret; + goto err_exit_phy; } ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); - return ret; + goto err_exit_phy; } return 0; + +err_exit_phy: + phy_exit(hipcie->phy); + + return ret; } static void histb_pcie_remove(struct platform_device *pdev) @@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev) histb_pcie_host_disable(hipcie); - if (hipcie->phy) - phy_exit(hipcie->phy); + phy_exit(hipcie->phy); } static const struct of_device_id histb_pcie_of_match[] = { diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 9b53b8f6f268..c21906eced61 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -57,7 +57,6 @@ PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \ PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD) -#define BUS_IATU_OFFSET SZ_256M #define RESET_INTERVAL_MS 100 struct intel_pcie { @@ -381,13 +380,7 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp) return intel_pcie_host_setup(pcie); } -static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) -{ - return cpu_addr + BUS_IATU_OFFSET; -} - static const struct dw_pcie_ops intel_pcie_ops = { - .cpu_addr_fixup = intel_pcie_cpu_addr, }; static const struct dw_pcie_host_ops intel_pcie_dw_ops = { @@ -409,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); pci = &pcie->pci; pci->dev = dev; + pci->use_parent_dt_ranges = true; pp = &pci->pp; ret = intel_pcie_get_resources(pdev); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 1b2088acb538..d0e6a3811b00 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -216,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy) usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0); - if (reg_val & PIPE_CLK_STABLE) { - dev_err(dev, "PIPE clk is not stable\n"); - return -EINVAL; - } + if (reg_val & PIPE_CLK_STABLE) + return dev_err_probe(dev, -ETIMEDOUT, + "PIPE clk is not stable\n"); return 0; } @@ -371,10 +370,9 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie, if (ret < 0) return 0; - if (ret > MAX_PCI_SLOTS) { - dev_err(dev, "Too many GPIO clock requests!\n"); - return -EINVAL; - } + if (ret > MAX_PCI_SLOTS) + return dev_err_probe(dev, -EINVAL, + "Too many GPIO clock requests!\n"); pcie->n_gpio_clkreq = ret; @@ -420,17 +418,16 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, "unable to get a valid reset gpio\n"); } - if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) { - dev_err(dev, "Too many PCI slots!\n"); - return -EINVAL; - } + if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) + return dev_err_probe(dev, -EINVAL, + "Too many PCI slots!\n"); + pcie->num_slots++; ret = of_pci_get_devfn(child); - if (ret < 0) { - dev_err(dev, "failed to parse devfn: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, + "failed to parse devfn\n"); slot = PCI_SLOT(ret); @@ -452,7 +449,7 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *child, *node = dev->of_node; + struct device_node *node = dev->of_node; void __iomem *apb_base; int ret; @@ -477,17 +474,13 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, return ret; /* Parse OF children */ - for_each_available_child_of_node(node, child) { + for_each_available_child_of_node_scoped(node, child) { ret = kirin_pcie_parse_port(kirin_pcie, pdev, child); if (ret) - goto put_node; + return ret; } return 0; - -put_node: - of_node_put(child); - return ret; } static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, @@ -729,16 +722,9 @@ static int kirin_pcie_probe(struct platform_device *pdev) struct dw_pcie *pci; int ret; - if (!dev->of_node) { - dev_err(dev, "NULL node\n"); - return -EINVAL; - } - data = of_device_get_match_data(dev); - if (!data) { - dev_err(dev, "OF data missing\n"); - return -EINVAL; - } + if (!data) + return dev_err_probe(dev, -EINVAL, "OF data missing\n"); kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); if (!kirin_pcie) diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index c08f64d7a825..46b1c6d19974 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -48,7 +48,7 @@ #define PARF_DBI_BASE_ADDR_HI 0x354 #define PARF_SLV_ADDR_SPACE_SIZE 0x358 #define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c -#define PARF_NO_SNOOP_OVERIDE 0x3d4 +#define PARF_NO_SNOOP_OVERRIDE 0x3d4 #define PARF_ATU_BASE_ADDR 0x634 #define PARF_ATU_BASE_ADDR_HI 0x638 #define PARF_SRIS_MODE 0x644 @@ -89,9 +89,9 @@ #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2) #define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3) -/* PARF_NO_SNOOP_OVERIDE register fields */ -#define WR_NO_SNOOP_OVERIDE_EN BIT(1) -#define RD_NO_SNOOP_OVERIDE_EN BIT(3) +/* PARF_NO_SNOOP_OVERRIDE register fields */ +#define WR_NO_SNOOP_OVERRIDE_EN BIT(1) +#define RD_NO_SNOOP_OVERRIDE_EN BIT(3) /* PARF_DEVICE_TYPE register fields */ #define PARF_DEVICE_TYPE_EP 0x0 @@ -529,8 +529,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) writel_relaxed(val, pcie_ep->parf + PARF_LTSSM); if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop) - writel_relaxed(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, - pcie_ep->parf + PARF_NO_SNOOP_OVERIDE); + writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, + pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE); return 0; @@ -825,6 +825,10 @@ static const struct pci_epc_features qcom_pcie_epc_features = { .msi_capable = true, .msix_capable = false, .align = SZ_4K, + .bar[BAR_0] = { .only_64bit = true, }, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_2] = { .only_64bit = true, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, }; static const struct pci_epc_features * @@ -933,6 +937,7 @@ static const struct of_device_id qcom_pcie_ep_match[] = { { .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0}, { .compatible = "qcom,sdx55-pcie-ep", }, { .compatible = "qcom,sm8450-pcie-ep", }, + { .compatible = "qcom,sar2130p-pcie-ep", }, { } }; MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index dc102d8bd58c..dc98ae63362d 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -61,7 +61,7 @@ #define PARF_DBI_BASE_ADDR_V2_HI 0x354 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c -#define PARF_NO_SNOOP_OVERIDE 0x3d4 +#define PARF_NO_SNOOP_OVERRIDE 0x3d4 #define PARF_ATU_BASE_ADDR 0x634 #define PARF_ATU_BASE_ADDR_HI 0x638 #define PARF_DEVICE_TYPE 0x1000 @@ -135,9 +135,9 @@ #define PARF_INT_ALL_LINK_UP BIT(13) #define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) -/* PARF_NO_SNOOP_OVERIDE register fields */ -#define WR_NO_SNOOP_OVERIDE_EN BIT(1) -#define RD_NO_SNOOP_OVERIDE_EN BIT(3) +/* PARF_NO_SNOOP_OVERRIDE register fields */ +#define WR_NO_SNOOP_OVERRIDE_EN BIT(1) +#define RD_NO_SNOOP_OVERRIDE_EN BIT(3) /* PARF_DEVICE_TYPE register fields */ #define DEVICE_TYPE_RC 0x4 @@ -1007,8 +1007,8 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; if (pcie_cfg->override_no_snoop) - writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, - pcie->parf + PARF_NO_SNOOP_OVERIDE); + writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, + pcie->parf + PARF_NO_SNOOP_OVERRIDE); qcom_pcie_clear_aspm_l0s(pcie->pci); qcom_pcie_clear_hpc(pcie->pci); @@ -1569,6 +1569,8 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) pci_lock_rescan_remove(); pci_rescan_bus(pp->bridge->bus); pci_unlock_rescan_remove(); + + qcom_pcie_icc_opp_update(pcie); } else { dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n", status); @@ -1703,6 +1705,10 @@ static int qcom_pcie_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); + irq = platform_get_irq_byname_optional(pdev, "global"); + if (irq > 0) + pp->use_linkup_irq = true; + ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "cannot initialize host\n"); @@ -1716,7 +1722,6 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_host_deinit; } - irq = platform_get_irq_byname_optional(pdev, "global"); if (irq > 0) { ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qcom_pcie_global_irq_thread, |