diff options
Diffstat (limited to 'drivers/pci/controller')
46 files changed, 3121 insertions, 958 deletions
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 0341d51d6aed..ef1cfdae33bb 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -355,6 +355,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = { static const struct j721e_pcie_data j7200_pcie_ep_data = { .mode = PCI_MODE_EP, .quirk_detect_quiet_flag = true, + .linkdown_irq_regfield = J7200_LINK_DOWN, .quirk_disable_flr = true, .max_lanes = 2, }; @@ -376,13 +377,13 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = { .mode = PCI_MODE_RC, .quirk_retrain_flag = true, .byte_access_allowed = false, - .linkdown_irq_regfield = LINK_DOWN, + .linkdown_irq_regfield = J7200_LINK_DOWN, .max_lanes = 4, }; static const struct j721e_pcie_data j784s4_pcie_ep_data = { .mode = PCI_MODE_EP, - .linkdown_irq_regfield = LINK_DOWN, + .linkdown_irq_regfield = J7200_LINK_DOWN, .max_lanes = 4, }; diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index e0cc4560dfde..599ec4b1223e 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -301,12 +301,12 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, val |= interrupts; cdns_pcie_ep_fn_writew(pcie, fn, reg, val); - /* Set MSIX BAR and offset */ + /* Set MSI-X BAR and offset */ reg = cap + PCI_MSIX_TABLE; val = offset | bir; cdns_pcie_ep_fn_writel(pcie, fn, reg, val); - /* Set PBA BAR and offset. BAR must match MSIX BAR */ + /* Set PBA BAR and offset. BAR must match MSI-X BAR */ reg = cap + PCI_MSIX_PBA; val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; cdns_pcie_ep_fn_writel(pcie, fn, reg, val); @@ -352,8 +352,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, spin_unlock_irqrestore(&ep->lock, flags); offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | - CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | - CDNS_PCIE_MSG_NO_DATA; + CDNS_PCIE_NORMAL_MSG_CODE(msg_code); writel(0, ep->irq_cpu_addr + offset); } @@ -573,8 +572,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) /* * Next function field in ARI_CAP_AND_CTR register for last function - * should be 0. - * Clearing Next Function Number field for the last function used. + * should be 0. Clear Next Function Number field for the last + * function used. */ last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG); reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index f5eeff834ec1..39ee9945c903 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -246,7 +246,7 @@ struct cdns_pcie_rp_ib_bar { #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) #define CDNS_PCIE_NORMAL_MSG_CODE(code) \ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) -#define CDNS_PCIE_MSG_NO_DATA BIT(16) +#define CDNS_PCIE_MSG_DATA BIT(16) struct cdns_pcie; diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index b6d6778b0698..d9f0386396ed 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -6,6 +6,16 @@ menu "DesignWare-based PCIe controllers" config PCIE_DW bool +config PCIE_DW_DEBUGFS + bool "DesignWare PCIe debugfs entries" + depends on DEBUG_FS + depends on PCIE_DW_HOST || PCIE_DW_EP + help + Say Y here to enable debugfs entries for the PCIe controller. These + entries provide various debug features related to the controller and + expose the RAS DES capabilities such as Silicon Debug, Error Injection + and Statistical Counters. + config PCIE_DW_HOST bool select PCIE_DW @@ -27,6 +37,17 @@ config PCIE_AL required only for DT-based platforms. ACPI platforms with the Annapurna Labs PCIe controller don't need to enable this. +config PCIE_AMD_MDB + bool "AMD MDB Versal2 PCIe controller" + depends on OF && (ARM64 || COMPILE_TEST) + depends on PCI_MSI + select PCIE_DW_HOST + help + Say Y here if you want to enable PCIe controller support on AMD + Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on + DesignWare IP and therefore the driver re-uses the DesignWare + core functions to implement the driver. + config PCI_MESON tristate "Amlogic Meson PCIe controller" default m if ARCH_MESON diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index a8308d9ea986..908cb7f345db 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PCIE_DW) += pcie-designware.o +obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o +obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 5c62e1a3ba52..33d6bf460ffe 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -635,30 +635,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev) { int ret; struct device_node *np = dev->of_node; - struct of_phandle_args args; + unsigned int args[2]; struct regmap *regmap; - regmap = syscon_regmap_lookup_by_phandle(np, - "ti,syscon-unaligned-access"); + regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access", + 2, args); if (IS_ERR(regmap)) { dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); return -EINVAL; } - ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", - 2, 0, &args); - if (ret) { - dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); - return ret; - } - - ret = regmap_update_bits(regmap, args.args[0], args.args[1], - args.args[1]); + ret = regmap_update_bits(regmap, args[0], args[1], args[1]); if (ret) dev_err(dev, "failed to enable unaligned access\n"); - of_node_put(args.np); - return ret; } @@ -671,18 +661,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev, u32 mask; u32 val; - pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); + pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel", + 1, &pcie_reg); if (IS_ERR(pcie_syscon)) { dev_err(dev, "unable to get ti,syscon-lane-sel\n"); return -EINVAL; } - if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, - &pcie_reg)) { - dev_err(dev, "couldn't get lane selection reg offset\n"); - return -EINVAL; - } - mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; regmap_update_bits(pcie_syscon, pcie_reg, mask, val); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index 6a830166d37f..ace736b025b1 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -300,7 +300,7 @@ static int exynos_pcie_probe(struct platform_device *pdev) if (IS_ERR(ep->elbi_base)) return PTR_ERR(ep->elbi_base); - ret = devm_clk_bulk_get_all_enable(dev, &ep->clks); + ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks); if (ret < 0) return ret; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index c8d5c90aa4d4..5f267dd261b5 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -33,6 +33,7 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include "../../pci.h" #include "pcie-designware.h" #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) @@ -40,7 +41,6 @@ #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) #define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12) #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) -#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 #define IMX95_PCIE_PHY_GEN_CTRL 0x0 #define IMX95_PCIE_REF_USE_PAD BIT(17) @@ -55,6 +55,22 @@ #define IMX95_PE0_GEN_CTRL_3 0x1058 #define IMX95_PCIE_LTSSM_EN BIT(0) +#define IMX95_PE0_LUT_ACSCTRL 0x1008 +#define IMX95_PEO_LUT_RWA BIT(16) +#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0) + +#define IMX95_PE0_LUT_DATA1 0x100c +#define IMX95_PE0_LUT_VLD BIT(31) +#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8) +#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0) + +#define IMX95_PE0_LUT_DATA2 0x1010 +#define IMX95_PE0_LUT_REQID GENMASK(31, 16) +#define IMX95_PE0_LUT_MASK GENMASK(15, 0) + +#define IMX95_SID_MASK GENMASK(5, 0) +#define IMX95_MAX_LUT 32 + #define to_imx_pcie(x) dev_get_drvdata((x)->dev) enum imx_pcie_variants { @@ -70,6 +86,7 @@ enum imx_pcie_variants { IMX8MQ_EP, IMX8MM_EP, IMX8MP_EP, + IMX8Q_EP, IMX95_EP, }; @@ -87,10 +104,10 @@ enum imx_pcie_variants { * workaround suspend resume on some devices which are affected by this errata. */ #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) +#define IMX_PCIE_FLAG_HAS_LUT BIT(10) #define imx_check_flag(pci, val) (pci->drvdata->flags & val) -#define IMX_PCIE_MAX_CLKS 6 #define IMX_PCIE_MAX_INSTANCES 2 struct imx_pcie; @@ -101,8 +118,6 @@ struct imx_pcie_drvdata { u32 flags; int dbi_length; const char *gpr; - const char * const *clk_names; - const u32 clks_cnt; const u32 ltssm_off; const u32 ltssm_mask; const u32 mode_off[IMX_PCIE_MAX_INSTANCES]; @@ -111,19 +126,19 @@ struct imx_pcie_drvdata { int (*init_phy)(struct imx_pcie *pcie); int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); int (*core_reset)(struct imx_pcie *pcie, bool assert); + const struct dw_pcie_host_ops *ops; }; struct imx_pcie { struct dw_pcie *pci; struct gpio_desc *reset_gpiod; - bool link_is_up; - struct clk_bulk_data clks[IMX_PCIE_MAX_CLKS]; + struct clk_bulk_data *clks; + int num_clks; struct regmap *iomuxc_gpr; u16 msi_ctrl; u32 controller_id; struct reset_control *pciephy_reset; struct reset_control *apps_reset; - struct reset_control *turnoff_reset; u32 tx_deemph_gen1; u32 tx_deemph_gen2_3p5db; u32 tx_deemph_gen2_6db; @@ -139,6 +154,9 @@ struct imx_pcie { struct device *pd_pcie_phy; struct phy *phy; const struct imx_pcie_drvdata *drvdata; + + /* Ensure that only one device's LUT is configured at any given time */ + struct mutex lock; }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ @@ -234,11 +252,11 @@ static void imx_pcie_configure_type(struct imx_pcie *imx_pcie) id = imx_pcie->controller_id; - /* If mode_mask is 0, then generic PHY driver is used to set the mode */ + /* If mode_mask is 0, generic PHY driver is used to set the mode */ if (!drvdata->mode_mask[0]) return; - /* If mode_mask[id] is zero, means each controller have its individual gpr */ + /* If mode_mask[id] is 0, each controller has its individual GPR */ if (!drvdata->mode_mask[id]) id = 0; @@ -375,14 +393,15 @@ static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data) static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) { - /* TODO: Currently this code assumes external oscillator is being used */ + /* TODO: This code assumes external oscillator is being used */ regmap_update_bits(imx_pcie->iomuxc_gpr, imx_pcie_grp_offset(imx_pcie), IMX8MQ_GPR_PCIE_REF_USE_PAD, IMX8MQ_GPR_PCIE_REF_USE_PAD); /* - * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is - * supplied by 3.3V, the VREG_BYPASS should be cleared to zero. + * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the + * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared + * to zero. */ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000) regmap_update_bits(imx_pcie->iomuxc_gpr, @@ -393,13 +412,6 @@ static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie) return 0; } -static int imx7d_pcie_init_phy(struct imx_pcie *imx_pcie) -{ - regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); - - return 0; -} - static int imx_pcie_init_phy(struct imx_pcie *imx_pcie) { regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, @@ -454,13 +466,14 @@ static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) int mult, div; u16 val; int i; + struct clk_bulk_data *clks = imx_pcie->clks; if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY)) return 0; - for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) - if (strncmp(imx_pcie->clks[i].id, "pcie_phy", 8) == 0) - phy_rate = clk_get_rate(imx_pcie->clks[i].clk); + for (i = 0; i < imx_pcie->num_clks; i++) + if (strncmp(clks[i].id, "pcie_phy", 8) == 0) + phy_rate = clk_get_rate(clks[i].clk); switch (phy_rate) { case 125000000: @@ -576,7 +589,7 @@ static int imx_pcie_attach_pd(struct device *dev) DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!link) { - dev_err(dev, "Failed to add device_link to pcie pd.\n"); + dev_err(dev, "Failed to add device_link to pcie pd\n"); return -EINVAL; } @@ -589,7 +602,7 @@ static int imx_pcie_attach_pd(struct device *dev) DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!link) { - dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); + dev_err(dev, "Failed to add device_link to pcie_phy pd\n"); return -EINVAL; } @@ -598,10 +611,9 @@ static int imx_pcie_attach_pd(struct device *dev) static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) { - if (enable) - regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN); - + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, + enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN); return 0; } @@ -611,10 +623,10 @@ static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) /* power up core phy and enable ref clock */ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD); /* - * the async reset input need ref clock to sync internally, + * The async reset input need ref clock to sync internally, * when the ref clock comes after reset, internal synced * reset time is too short, cannot meet the requirement. - * add one ~10us delay here. + * Add a ~10us delay here. */ usleep_range(10, 100); regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN); @@ -630,19 +642,20 @@ static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) { int offset = imx_pcie_grp_offset(imx_pcie); - if (enable) { - regmap_clear_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); - regmap_set_bits(imx_pcie->iomuxc_gpr, offset, IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); - } - + regmap_update_bits(imx_pcie->iomuxc_gpr, offset, + IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, + enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE); + regmap_update_bits(imx_pcie->iomuxc_gpr, offset, + IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, + enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0); return 0; } static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable) { - if (!enable) - regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); + regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, + enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); return 0; } @@ -652,7 +665,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) struct device *dev = pci->dev; int ret; - ret = clk_bulk_prepare_enable(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); + ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks); if (ret) return ret; @@ -669,7 +682,7 @@ static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie) return 0; err_ref_clk: - clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); + clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); return ret; } @@ -678,7 +691,7 @@ static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie) { if (imx_pcie->drvdata->enable_ref_clk) imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); - clk_bulk_disable_unprepare(imx_pcie->drvdata->clks_cnt, imx_pcie->clks); + clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks); } static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) @@ -775,6 +788,7 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) { reset_control_deassert(imx_pcie->pciephy_reset); + reset_control_deassert(imx_pcie->apps_reset); if (imx_pcie->drvdata->core_reset) imx_pcie->drvdata->core_reset(imx_pcie, false); @@ -884,6 +898,7 @@ static int imx_pcie_start_link(struct dw_pcie *pci) if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_SPEED_CHANGE) { + /* * On i.MX7, DIRECT_SPEED_CHANGE behaves differently * from i.MX6 family when no link speed transition @@ -892,7 +907,6 @@ static int imx_pcie_start_link(struct dw_pcie *pci) * which will cause the following code to report false * failure. */ - ret = imx_pcie_wait_for_speed_change(imx_pcie); if (ret) { dev_err(dev, "Failed to bring link up!\n"); @@ -908,13 +922,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci) dev_info(dev, "Link: Only Gen1 is enabled\n"); } - imx_pcie->link_is_up = true; tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); return 0; err_reset_phy: - imx_pcie->link_is_up = false; dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); @@ -930,6 +942,184 @@ static void imx_pcie_stop_link(struct dw_pcie *pci) imx_pcie_ltssm_disable(dev); } +static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) +{ + struct dw_pcie *pci = imx_pcie->pci; + struct device *dev = pci->dev; + u32 data1, data2; + int free = -1; + int i; + + if (sid >= 64) { + dev_err(dev, "Invalid SID for index %d\n", sid); + return -EINVAL; + } + + guard(mutex)(&imx_pcie->lock); + + /* + * Iterate through all LUT entries to check for duplicate RID and + * identify the first available entry. Configure this available entry + * immediately after verification to avoid rescanning it. + */ + for (i = 0; i < IMX95_MAX_LUT; i++) { + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); + regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); + + if (!(data1 & IMX95_PE0_LUT_VLD)) { + if (free < 0) + free = i; + continue; + } + + regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); + + /* Do not add duplicate RID */ + if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) { + dev_warn(dev, "Existing LUT entry available for RID (%d)", rid); + return 0; + } + } + + if (free < 0) { + dev_err(dev, "LUT entry is not available\n"); + return -ENOSPC; + } + + data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0); + data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid); + data1 |= IMX95_PE0_LUT_VLD; + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); + + data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ + data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); + + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free); + + return 0; +} + +static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) +{ + u32 data2; + int i; + + guard(mutex)(&imx_pcie->lock); + + for (i = 0; i < IMX95_MAX_LUT; i++) { + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i); + regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); + if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) { + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_DATA1, 0); + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_DATA2, 0); + regmap_write(imx_pcie->iomuxc_gpr, + IMX95_PE0_LUT_ACSCTRL, i); + + break; + } + } +} + +static int imx_pcie_enable_device(struct pci_host_bridge *bridge, + struct pci_dev *pdev) +{ + struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); + u32 sid_i, sid_m, rid = pci_dev_id(pdev); + struct device_node *target; + struct device *dev; + int err_i, err_m; + u32 sid = 0; + + dev = imx_pcie->pci->dev; + + target = NULL; + err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", + &target, &sid_i); + if (target) { + of_node_put(target); + } else { + /* + * "target == NULL && err_i == 0" means RID out of map range. + * Use 1:1 map RID to streamID. Hardware can't support this + * because the streamID is only 6 bits + */ + err_i = -EINVAL; + } + + target = NULL; + err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask", + &target, &sid_m); + + /* + * err_m target + * 0 NULL RID out of range. Use 1:1 map RID to + * streamID, Current hardware can't + * support it, so return -EINVAL. + * != 0 NULL msi-map does not exist, use built-in MSI + * 0 != NULL Get correct streamID from RID + * != 0 != NULL Invalid combination + */ + if (!err_m && !target) + return -EINVAL; + else if (target) + of_node_put(target); /* Find streamID map entry for RID in msi-map */ + + /* + * msi-map iommu-map + * N N DWC MSI Ctrl + * Y Y ITS + SMMU, require the same SID + * Y N ITS + * N Y DWC MSI Ctrl + SMMU + */ + if (err_i && err_m) + return 0; + + if (!err_i && !err_m) { + /* + * Glue Layer + * <==========> + * ┌─────┐ ┌──────────┐ + * │ LUT │ 6-bit streamID │ │ + * │ │─────────────────►│ MSI │ + * └─────┘ 2-bit ctrl ID │ │ + * ┌───────────►│ │ + * (i.MX95) │ │ │ + * 00 PCIe0 │ │ │ + * 01 ENETC │ │ │ + * 10 PCIe1 │ │ │ + * │ └──────────┘ + * The MSI glue layer auto adds 2 bits controller ID ahead of + * streamID, so mask these 2 bits to get streamID. The + * IOMMU glue layer doesn't do that. + */ + if (sid_i != (sid_m & IMX95_SID_MASK)) { + dev_err(dev, "iommu-map and msi-map entries mismatch!\n"); + return -EINVAL; + } + } + + if (!err_i) + sid = sid_i; + else if (!err_m) + sid = sid_m & IMX95_SID_MASK; + + return imx_pcie_add_lut(imx_pcie, rid, sid); +} + +static void imx_pcie_disable_device(struct pci_host_bridge *bridge, + struct pci_dev *pdev) +{ + struct imx_pcie *imx_pcie; + + imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); + imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev)); +} + static int imx_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -946,6 +1136,11 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp) } } + if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) { + pp->bridge->enable_device = imx_pcie_enable_device; + pp->bridge->disable_device = imx_pcie_disable_device; + } + imx_pcie_assert_core_reset(imx_pcie); if (imx_pcie->drvdata->init_phy) @@ -966,7 +1161,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp) goto err_clk_disable; } - ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); + ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE, + imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ? + PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC); if (ret) { dev_err(dev, "unable to set PCIe PHY mode\n"); goto err_phy_exit; @@ -1017,31 +1214,36 @@ static void imx_pcie_host_exit(struct dw_pcie_rp *pp) regulator_disable(imx_pcie->vpcie); } -static u64 imx_pcie_cpu_addr_fixup(struct dw_pcie *pcie, u64 cpu_addr) +/* + * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2 + * register is reserved, so the generic DWC implementation of sending the + * PME_Turn_Off message using a dummy MMIO write cannot be used. + */ +static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp) { - struct imx_pcie *imx_pcie = to_imx_pcie(pcie); - struct dw_pcie_rp *pp = &pcie->pp; - struct resource_entry *entry; - - if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_CPU_ADDR_FIXUP)) - return cpu_addr; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct imx_pcie *imx_pcie = to_imx_pcie(pci); - entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); - if (!entry) - return cpu_addr; + regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); + regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF); - return cpu_addr - entry->offset; + usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US); } static const struct dw_pcie_host_ops imx_pcie_host_ops = { .init = imx_pcie_host_init, .deinit = imx_pcie_host_exit, + .pme_turn_off = imx_pcie_pme_turn_off, +}; + +static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = { + .init = imx_pcie_host_init, + .deinit = imx_pcie_host_exit, }; static const struct dw_pcie_ops dw_pcie_ops = { .start_link = imx_pcie_start_link, .stop_link = imx_pcie_stop_link, - .cpu_addr_fixup = imx_pcie_cpu_addr_fixup, }; static void imx_pcie_ep_init(struct dw_pcie_ep *ep) @@ -1082,16 +1284,27 @@ static const struct pci_epc_features imx8m_pcie_epc_features = { .align = SZ_64K, }; +static const struct pci_epc_features imx8q_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, + .align = SZ_64K, +}; + /* - * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme - * ================================================================================================ - * BAR0 | Enable | 64-bit | 1 MB | Programmable Size - * BAR1 | Disable | 32-bit | 64 KB | Fixed Size - * BAR1 should be disabled if BAR0 is 64bit. - * BAR2 | Enable | 32-bit | 1 MB | Programmable Size - * BAR3 | Enable | 32-bit | 64 KB | Programmable Size - * BAR4 | Enable | 32-bit | 1M | Programmable Size - * BAR5 | Enable | 32-bit | 64 KB | Programmable Size + * | Default | Default | Default | BAR Sizing + * BAR# | Enable? | Type | Size | Scheme + * ======================================================= + * BAR0 | Enable | 64-bit | 1 MB | Programmable Size + * BAR1 | Disable | 32-bit | 64 KB | Fixed Size + * (BAR1 should be disabled if BAR0 is 64-bit) + * BAR2 | Enable | 32-bit | 1 MB | Programmable Size + * BAR3 | Enable | 32-bit | 64 KB | Programmable Size + * BAR4 | Enable | 32-bit | 1 MB | Programmable Size + * BAR5 | Enable | 32-bit | 64 KB | Programmable Size */ static const struct pci_epc_features imx95_pcie_epc_features = { .msi_capable = true, @@ -1118,7 +1331,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, struct platform_device *pdev) { int ret; - unsigned int pcie_dbi2_offset; struct dw_pcie_ep *ep; struct dw_pcie *pci = imx_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; @@ -1128,28 +1340,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, ep = &pci->ep; ep->ops = &pcie_ep_ops; - switch (imx_pcie->drvdata->variant) { - case IMX8MQ_EP: - case IMX8MM_EP: - case IMX8MP_EP: - pcie_dbi2_offset = SZ_1M; - break; - default: - pcie_dbi2_offset = SZ_4K; - break; - } - - pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset; - - /* - * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining - * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC - * core code can fetch that from DT. But once all platform DTs were fixed, this and the - * above "dbi_base2" setting should be removed. - */ - if (device_property_match_string(dev, "reg-names", "dbi2") >= 0) - pci->dbi_base2 = NULL; - if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT)) dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); @@ -1176,43 +1366,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, return 0; } -static void imx_pcie_pm_turnoff(struct imx_pcie *imx_pcie) -{ - struct device *dev = imx_pcie->pci->dev; - - /* Some variants have a turnoff reset in DT */ - if (imx_pcie->turnoff_reset) { - reset_control_assert(imx_pcie->turnoff_reset); - reset_control_deassert(imx_pcie->turnoff_reset); - goto pm_turnoff_sleep; - } - - /* Others poke directly at IOMUXC registers */ - switch (imx_pcie->drvdata->variant) { - case IMX6SX: - case IMX6QP: - regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_PM_TURN_OFF, - IMX6SX_GPR12_PCIE_PM_TURN_OFF); - regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0); - break; - default: - dev_err(dev, "PME_Turn_Off not implemented\n"); - return; - } - - /* - * Components with an upstream port must respond to - * PME_Turn_Off with PME_TO_Ack but we can't check. - * - * The standard recommends a 1-10ms timeout after which to - * proceed anyway as if acks were received. - */ -pm_turnoff_sleep: - usleep_range(1000, 10000); -} - static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) { u8 offset; @@ -1236,7 +1389,6 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) static int imx_pcie_suspend_noirq(struct device *dev) { struct imx_pcie *imx_pcie = dev_get_drvdata(dev); - struct dw_pcie_rp *pp = &imx_pcie->pci->pp; if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; @@ -1251,9 +1403,7 @@ static int imx_pcie_suspend_noirq(struct device *dev) imx_pcie_assert_core_reset(imx_pcie); imx_pcie->drvdata->enable_ref_clk(imx_pcie, false); } else { - imx_pcie_pm_turnoff(imx_pcie); - imx_pcie_stop_link(imx_pcie->pci); - imx_pcie_host_exit(pp); + return dw_pcie_suspend_noirq(imx_pcie->pci); } return 0; @@ -1263,7 +1413,6 @@ static int imx_pcie_resume_noirq(struct device *dev) { int ret; struct imx_pcie *imx_pcie = dev_get_drvdata(dev); - struct dw_pcie_rp *pp = &imx_pcie->pci->pp; if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; @@ -1275,6 +1424,7 @@ static int imx_pcie_resume_noirq(struct device *dev) ret = imx_pcie_deassert_core_reset(imx_pcie); if (ret) return ret; + /* * Using PCIE_TEST_PD seems to disable MSI and powers down the * root complex. This is why we have to setup the rc again and @@ -1283,17 +1433,12 @@ static int imx_pcie_resume_noirq(struct device *dev) ret = dw_pcie_setup_rc(&imx_pcie->pci->pp); if (ret) return ret; - imx_pcie_msi_save_restore(imx_pcie, false); } else { - ret = imx_pcie_host_init(pp); + ret = dw_pcie_resume_noirq(imx_pcie->pci); if (ret) return ret; - imx_pcie_msi_save_restore(imx_pcie, false); - dw_pcie_setup_rc(pp); - - if (imx_pcie->link_is_up) - imx_pcie_start_link(imx_pcie->pci); } + imx_pcie_msi_save_restore(imx_pcie, false); return 0; } @@ -1309,11 +1454,9 @@ static int imx_pcie_probe(struct platform_device *pdev) struct dw_pcie *pci; struct imx_pcie *imx_pcie; struct device_node *np; - struct resource *dbi_base; struct device_node *node = dev->of_node; - int ret; + int ret, domain; u16 val; - int i; imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL); if (!imx_pcie) @@ -1325,11 +1468,17 @@ static int imx_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - pci->pp.ops = &imx_pcie_host_ops; imx_pcie->pci = pci; imx_pcie->drvdata = of_device_get_match_data(dev); + mutex_init(&imx_pcie->lock); + + if (imx_pcie->drvdata->ops) + pci->pp.ops = imx_pcie->drvdata->ops; + else + pci->pp.ops = &imx_pcie_host_dw_pme_ops; + /* Find the PHY if one is defined, only imx7d uses it */ np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); if (np) { @@ -1345,10 +1494,6 @@ static int imx_pcie_probe(struct platform_device *pdev) return PTR_ERR(imx_pcie->phy_base); } - pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - /* Fetch GPIOs */ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(imx_pcie->reset_gpiod)) @@ -1356,16 +1501,11 @@ static int imx_pcie_probe(struct platform_device *pdev) "unable to get reset gpio\n"); gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset"); - if (imx_pcie->drvdata->clks_cnt >= IMX_PCIE_MAX_CLKS) - return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n"); - - for (i = 0; i < imx_pcie->drvdata->clks_cnt; i++) - imx_pcie->clks[i].id = imx_pcie->drvdata->clk_names[i]; - /* Fetch clocks */ - ret = devm_clk_bulk_get(dev, imx_pcie->drvdata->clks_cnt, imx_pcie->clks); - if (ret) - return ret; + imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks); + if (imx_pcie->num_clks < 0) + return dev_err_probe(dev, imx_pcie->num_clks, + "failed to get clocks\n"); if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) { imx_pcie->phy = devm_phy_get(dev, "pcie-phy"); @@ -1391,21 +1531,16 @@ static int imx_pcie_probe(struct platform_device *pdev) switch (imx_pcie->drvdata->variant) { case IMX8MQ: case IMX8MQ_EP: - case IMX7D: - if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) - imx_pcie->controller_id = 1; + domain = of_get_pci_domain_nr(node); + if (domain < 0 || domain > 1) + return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n"); + + imx_pcie->controller_id = domain; break; default: break; } - /* Grab turnoff reset */ - imx_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); - if (IS_ERR(imx_pcie->turnoff_reset)) { - dev_err(dev, "Failed to get TURNOFF reset control\n"); - return PTR_ERR(imx_pcie->turnoff_reset); - } - if (imx_pcie->drvdata->gpr) { /* Grab GPR config register range */ imx_pcie->iomuxc_gpr = @@ -1479,11 +1614,13 @@ static int imx_pcie_probe(struct platform_device *pdev) if (ret) return ret; + pci->use_parent_dt_ranges = true; if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) { ret = imx_add_pcie_ep(imx_pcie, pdev); if (ret < 0) return ret; } else { + pci->pp.use_atu_msg = true; ret = dw_pcie_host_init(&pci->pp); if (ret < 0) return ret; @@ -1508,12 +1645,6 @@ static void imx_pcie_shutdown(struct platform_device *pdev) imx_pcie_assert_core_reset(imx_pcie); } -static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"}; -static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"}; -static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"}; -static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"}; -static const char * const imx8q_clks[] = {"mstr", "slv", "dbi"}; - static const struct imx_pcie_drvdata drvdata[] = { [IMX6Q] = { .variant = IMX6Q, @@ -1523,8 +1654,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", - .clk_names = imx6q_clks, - .clks_cnt = ARRAY_SIZE(imx6q_clks), .ltssm_off = IOMUXC_GPR12, .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, .mode_off[0] = IOMUXC_GPR12, @@ -1539,8 +1668,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_IMX_SPEED_CHANGE | IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx6q-iomuxc-gpr", - .clk_names = imx6sx_clks, - .clks_cnt = ARRAY_SIZE(imx6sx_clks), .ltssm_off = IOMUXC_GPR12, .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, .mode_off[0] = IOMUXC_GPR12, @@ -1548,6 +1675,7 @@ static const struct imx_pcie_drvdata drvdata[] = { .init_phy = imx6sx_pcie_init_phy, .enable_ref_clk = imx6sx_pcie_enable_ref_clk, .core_reset = imx6sx_pcie_core_reset, + .ops = &imx_pcie_host_ops, }, [IMX6QP] = { .variant = IMX6QP, @@ -1556,8 +1684,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", - .clk_names = imx6q_clks, - .clks_cnt = ARRAY_SIZE(imx6q_clks), .ltssm_off = IOMUXC_GPR12, .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2, .mode_off[0] = IOMUXC_GPR12, @@ -1565,6 +1691,7 @@ static const struct imx_pcie_drvdata drvdata[] = { .init_phy = imx_pcie_init_phy, .enable_ref_clk = imx6q_pcie_enable_ref_clk, .core_reset = imx6qp_pcie_core_reset, + .ops = &imx_pcie_host_ops, }, [IMX7D] = { .variant = IMX7D, @@ -1572,21 +1699,17 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_APP_RESET | IMX_PCIE_FLAG_HAS_PHY_RESET, .gpr = "fsl,imx7d-iomuxc-gpr", - .clk_names = imx6q_clks, - .clks_cnt = ARRAY_SIZE(imx6q_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, - .init_phy = imx7d_pcie_init_phy, .enable_ref_clk = imx7d_pcie_enable_ref_clk, .core_reset = imx7d_pcie_core_reset, }, [IMX8MQ] = { .variant = IMX8MQ, .flags = IMX_PCIE_FLAG_HAS_APP_RESET | - IMX_PCIE_FLAG_HAS_PHY_RESET, + IMX_PCIE_FLAG_HAS_PHY_RESET | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx8mq-iomuxc-gpr", - .clk_names = imx8mq_clks, - .clks_cnt = ARRAY_SIZE(imx8mq_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .mode_off[1] = IOMUXC_GPR12, @@ -1600,8 +1723,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHYDRV | IMX_PCIE_FLAG_HAS_APP_RESET, .gpr = "fsl,imx8mm-iomuxc-gpr", - .clk_names = imx8mm_clks, - .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .enable_ref_clk = imx8mm_pcie_enable_ref_clk, @@ -1612,8 +1733,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHYDRV | IMX_PCIE_FLAG_HAS_APP_RESET, .gpr = "fsl,imx8mp-iomuxc-gpr", - .clk_names = imx8mm_clks, - .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .enable_ref_clk = imx8mm_pcie_enable_ref_clk, @@ -1621,15 +1740,14 @@ static const struct imx_pcie_drvdata drvdata[] = { [IMX8Q] = { .variant = IMX8Q, .flags = IMX_PCIE_FLAG_HAS_PHYDRV | - IMX_PCIE_FLAG_CPU_ADDR_FIXUP, - .clk_names = imx8q_clks, - .clks_cnt = ARRAY_SIZE(imx8q_clks), + IMX_PCIE_FLAG_CPU_ADDR_FIXUP | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, }, [IMX95] = { .variant = IMX95, - .flags = IMX_PCIE_FLAG_HAS_SERDES, - .clk_names = imx8mq_clks, - .clks_cnt = ARRAY_SIZE(imx8mq_clks), + .flags = IMX_PCIE_FLAG_HAS_SERDES | + IMX_PCIE_FLAG_HAS_LUT | + IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .ltssm_off = IMX95_PE0_GEN_CTRL_3, .ltssm_mask = IMX95_PCIE_LTSSM_EN, .mode_off[0] = IMX95_PE0_GEN_CTRL_1, @@ -1642,8 +1760,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHY_RESET, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mq-iomuxc-gpr", - .clk_names = imx8mq_clks, - .clks_cnt = ARRAY_SIZE(imx8mq_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .mode_off[1] = IOMUXC_GPR12, @@ -1658,8 +1774,6 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mm-iomuxc-gpr", - .clk_names = imx8mm_clks, - .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .epc_features = &imx8m_pcie_epc_features, @@ -1671,19 +1785,21 @@ static const struct imx_pcie_drvdata drvdata[] = { IMX_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mp-iomuxc-gpr", - .clk_names = imx8mm_clks, - .clks_cnt = ARRAY_SIZE(imx8mm_clks), .mode_off[0] = IOMUXC_GPR12, .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .epc_features = &imx8m_pcie_epc_features, .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, + [IMX8Q_EP] = { + .variant = IMX8Q_EP, + .flags = IMX_PCIE_FLAG_HAS_PHYDRV, + .mode = DW_PCIE_EP_TYPE, + .epc_features = &imx8q_pcie_epc_features, + }, [IMX95_EP] = { .variant = IMX95_EP, .flags = IMX_PCIE_FLAG_HAS_SERDES | IMX_PCIE_FLAG_SUPPORT_64BIT, - .clk_names = imx8mq_clks, - .clks_cnt = ARRAY_SIZE(imx8mq_clks), .ltssm_off = IMX95_PE0_GEN_CTRL_3, .ltssm_mask = IMX95_PCIE_LTSSM_EN, .mode_off[0] = IMX95_PE0_GEN_CTRL_1, @@ -1707,6 +1823,7 @@ static const struct of_device_id imx_pcie_of_match[] = { { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], }, { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], }, { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], }, + { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], }, { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], }, {}, }; diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 63bd5003da45..76a37368ae4f 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -966,11 +966,11 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = { .msix_capable = true, .bar[BAR_0] = { .type = BAR_RESERVED, }, .bar[BAR_1] = { .type = BAR_RESERVED, }, - .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_2] = { .type = BAR_RESIZABLE, }, .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, }, .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, }, - .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .align = SZ_1M, + .bar[BAR_5] = { .type = BAR_RESIZABLE, }, + .align = SZ_64K, }; static const struct pci_epc_features* diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index ee6f52568133..a44b5c256d6e 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -329,7 +329,6 @@ static int ls_pcie_probe(struct platform_device *pdev) struct ls_pcie *pcie; struct resource *dbi_base; u32 index[2]; - int ret; pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -355,16 +354,15 @@ static int ls_pcie_probe(struct platform_device *pdev) pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off; if (pcie->drvdata->scfg_support) { - pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg"); + pcie->scfg = + syscon_regmap_lookup_by_phandle_args(dev->of_node, + "fsl,pcie-scfg", 1, + index); if (IS_ERR(pcie->scfg)) { dev_err(dev, "No syscfg phandle specified\n"); return PTR_ERR(pcie->scfg); } - ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2); - if (ret) - return ret; - pcie->index = index[1]; } diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c new file mode 100644 index 000000000000..4eb2a4e8189d --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for AMD MDB PCIe Bridge + * + * Copyright (C) 2024-2025, Advanced Micro Devices, Inc. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0 +#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4 +#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8 +#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC + +#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16) + +#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2) + +/* Interrupt registers definitions. */ +#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15 +#define AMD_MDB_PCIE_INTR_INTX 16 +#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24 +#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25 +#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26 +#define AMD_MDB_PCIE_INTR_NONFATAL 27 +#define AMD_MDB_PCIE_INTR_FATAL 28 + +#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x) +#define AMD_MDB_PCIE_IMR_ALL_MASK \ + ( \ + IMR(CMPL_TIMEOUT) | \ + IMR(PM_PME_RCVD) | \ + IMR(PME_TO_ACK_RCVD) | \ + IMR(MISC_CORRECTABLE) | \ + IMR(NONFATAL) | \ + IMR(FATAL) | \ + AMD_MDB_TLP_PCIE_INTX_MASK \ + ) + +/** + * struct amd_mdb_pcie - PCIe port information + * @pci: DesignWare PCIe controller structure + * @slcr: MDB System Level Control and Status Register (SLCR) base + * @intx_domain: INTx IRQ domain pointer + * @mdb_domain: MDB IRQ domain pointer + * @intx_irq: INTx IRQ interrupt number + */ +struct amd_mdb_pcie { + struct dw_pcie pci; + void __iomem *slcr; + struct irq_domain *intx_domain; + struct irq_domain *mdb_domain; + int intx_irq; +}; + +static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = { +}; + +static void amd_mdb_intx_irq_mask(struct irq_data *data) +{ + struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data); + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *port = &pci->pp; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK, + AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq)); + + /* + * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that + * interrupt, writing '0' has no effect. + */ + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void amd_mdb_intx_irq_unmask(struct irq_data *data) +{ + struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data); + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *port = &pci->pp; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK, + AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq)); + + /* + * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that + * interrupt, writing '0' has no effect. + */ + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip amd_mdb_intx_irq_chip = { + .name = "AMD MDB INTx", + .irq_mask = amd_mdb_intx_irq_mask, + .irq_unmask = amd_mdb_intx_irq_unmask, +}; + +/** + * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid + * @domain: IRQ domain + * @irq: Virtual IRQ number + * @hwirq: Hardware interrupt number + * + * Return: Always returns '0'. + */ +static int amd_mdb_pcie_intx_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + + return 0; +} + +/* INTx IRQ domain operations. */ +static const struct irq_domain_ops amd_intx_domain_ops = { + .map = amd_mdb_pcie_intx_map, +}; + +static irqreturn_t dw_pcie_rp_intx(int irq, void *args) +{ + struct amd_mdb_pcie *pcie = args; + unsigned long val; + int i, int_status; + + val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val); + + for (i = 0; i < PCI_NUM_INTX; i++) { + if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i)) + generic_handle_domain_irq(pcie->intx_domain, i); + } + + return IRQ_HANDLED; +} + +#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s } + +static const struct { + const char *sym; + const char *str; +} intr_cause[32] = { + _IC(CMPL_TIMEOUT, "Completion timeout"), + _IC(PM_PME_RCVD, "PM_PME message received"), + _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), + _IC(MISC_CORRECTABLE, "Correctable error message"), + _IC(NONFATAL, "Non fatal error message"), + _IC(FATAL, "Fatal error message"), +}; + +static void amd_mdb_event_irq_mask(struct irq_data *d) +{ + struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *port = &pci->pp; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = BIT(d->hwirq); + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void amd_mdb_event_irq_unmask(struct irq_data *d) +{ + struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *port = &pci->pp; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&port->lock, flags); + val = BIT(d->hwirq); + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip amd_mdb_event_irq_chip = { + .name = "AMD MDB RC-Event", + .irq_mask = amd_mdb_event_irq_mask, + .irq_unmask = amd_mdb_event_irq_unmask, +}; + +static int amd_mdb_pcie_event_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + + return 0; +} + +static const struct irq_domain_ops event_domain_ops = { + .map = amd_mdb_pcie_event_map, +}; + +static irqreturn_t amd_mdb_pcie_event(int irq, void *args) +{ + struct amd_mdb_pcie *pcie = args; + unsigned long val; + int i; + + val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC); + for_each_set_bit(i, &val, 32) + generic_handle_domain_irq(pcie->mdb_domain, i); + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + + return IRQ_HANDLED; +} + +static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie) +{ + if (pcie->intx_domain) { + irq_domain_remove(pcie->intx_domain); + pcie->intx_domain = NULL; + } + + if (pcie->mdb_domain) { + irq_domain_remove(pcie->mdb_domain); + pcie->mdb_domain = NULL; + } +} + +static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie) +{ + unsigned long val; + + /* Disable all TLP interrupts. */ + writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK, + pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC); + + /* Clear pending TLP interrupts. */ + val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + val &= AMD_MDB_PCIE_IMR_ALL_MASK; + writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC); + + /* Enable all TLP interrupts. */ + writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK, + pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC); + + return 0; +} + +/** + * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain + * @pcie: PCIe port information + * @pdev: Platform device + * + * Return: Returns '0' on success and error value on failure. + */ +static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *pp = &pci->pp; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + int err; + + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32, + &event_domain_ops, pcie); + if (!pcie->mdb_domain) { + err = -ENOMEM; + dev_err(dev, "Failed to add MDB domain\n"); + goto out; + } + + irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS); + + pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &amd_intx_domain_ops, pcie); + if (!pcie->intx_domain) { + err = -ENOMEM; + dev_err(dev, "Failed to add INTx domain\n"); + goto mdb_out; + } + + of_node_put(pcie_intc_node); + irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED); + + raw_spin_lock_init(&pp->lock); + + return 0; +mdb_out: + amd_mdb_pcie_free_irq_domains(pcie); +out: + of_node_put(pcie_intc_node); + return err; +} + +static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args) +{ + struct amd_mdb_pcie *pcie = args; + struct device *dev; + struct irq_data *d; + + dev = pcie->pci.dev; + + /* + * In the future, error reporting will be hooked to the AER subsystem. + * Currently, the driver prints a warning message to the user. + */ + d = irq_domain_get_irq_data(pcie->mdb_domain, irq); + if (intr_cause[d->hwirq].str) + dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); + else + dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq); + + return IRQ_HANDLED; +} + +static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *pp = &pci->pp; + struct device *dev = &pdev->dev; + int i, irq, err; + + amd_mdb_pcie_init_port(pcie); + + pp->irq = platform_get_irq(pdev, 0); + if (pp->irq < 0) + return pp->irq; + + for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { + if (!intr_cause[i].str) + continue; + + irq = irq_create_mapping(pcie->mdb_domain, i); + if (!irq) { + dev_err(dev, "Failed to map MDB domain interrupt\n"); + return -ENOMEM; + } + + err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler, + IRQF_NO_THREAD, intr_cause[i].sym, pcie); + if (err) { + dev_err(dev, "Failed to request IRQ %d, err=%d\n", + irq, err); + return err; + } + } + + pcie->intx_irq = irq_create_mapping(pcie->mdb_domain, + AMD_MDB_PCIE_INTR_INTX); + if (!pcie->intx_irq) { + dev_err(dev, "Failed to map INTx interrupt\n"); + return -ENXIO; + } + + err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx, + IRQF_NO_THREAD, NULL, pcie); + if (err) { + dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n", + irq, err); + return err; + } + + /* Plug the main event handler. */ + err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD, + "amd_mdb pcie_irq", pcie); + if (err) { + dev_err(dev, "Failed to request event IRQ %d, err=%d\n", + pp->irq, err); + return err; + } + + return 0; +} + +static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_rp *pp = &pci->pp; + struct device *dev = &pdev->dev; + int err; + + pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr"); + if (IS_ERR(pcie->slcr)) + return PTR_ERR(pcie->slcr); + + err = amd_mdb_pcie_init_irq_domains(pcie, pdev); + if (err) + return err; + + err = amd_mdb_setup_irq(pcie, pdev); + if (err) { + dev_err(dev, "Failed to set up interrupts, err=%d\n", err); + goto out; + } + + pp->ops = &amd_mdb_pcie_host_ops; + + err = dw_pcie_host_init(pp); + if (err) { + dev_err(dev, "Failed to initialize host, err=%d\n", err); + goto out; + } + + return 0; + +out: + amd_mdb_pcie_free_irq_domains(pcie); + return err; +} + +static int amd_mdb_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct amd_mdb_pcie *pcie; + struct dw_pcie *pci; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = &pcie->pci; + pci->dev = dev; + + platform_set_drvdata(pdev, pcie); + + return amd_mdb_add_pcie_port(pcie, pdev); +} + +static const struct of_device_id amd_mdb_pcie_of_match[] = { + { + .compatible = "amd,versal2-mdb-host", + }, + {}, +}; + +static struct platform_driver amd_mdb_pcie_driver = { + .driver = { + .name = "amd-mdb-pcie", + .of_match_table = amd_mdb_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = amd_mdb_pcie_probe, +}; + +builtin_platform_driver(amd_mdb_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index f8e7283dacd4..234c8cbcae3a 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -369,9 +369,22 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, return 0; } +static const struct pci_epc_features artpec6_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, +}; + +static const struct pci_epc_features * +artpec6_pcie_get_features(struct dw_pcie_ep *ep) +{ + return &artpec6_pcie_epc_features; +} + static const struct dw_pcie_ep_ops pcie_ep_ops = { .init = artpec6_pcie_ep_init, .raise_irq = artpec6_pcie_raise_irq, + .get_features = artpec6_pcie_get_features, }; static int artpec6_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c new file mode 100644 index 000000000000..9e6f4d00f262 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe controller debugfs driver + * + * Copyright (C) 2025 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Shradha Todi <shradha.t@samsung.com> + */ + +#include <linux/debugfs.h> + +#include "pcie-designware.h" + +#define SD_STATUS_L1LANE_REG 0xb0 +#define PIPE_RXVALID BIT(18) +#define PIPE_DETECT_LANE BIT(17) +#define LANE_SELECT GENMASK(3, 0) + +#define ERR_INJ0_OFF 0x34 +#define EINJ_VAL_DIFF GENMASK(28, 16) +#define EINJ_VC_NUM GENMASK(14, 12) +#define EINJ_TYPE_SHIFT 8 +#define EINJ0_TYPE GENMASK(11, 8) +#define EINJ1_TYPE BIT(8) +#define EINJ2_TYPE GENMASK(9, 8) +#define EINJ3_TYPE GENMASK(10, 8) +#define EINJ4_TYPE GENMASK(10, 8) +#define EINJ5_TYPE BIT(8) +#define EINJ_COUNT GENMASK(7, 0) + +#define ERR_INJ_ENABLE_REG 0x30 + +#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc + +#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8 +#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24) +#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16) +#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8) +#define EVENT_COUNTER_STATUS BIT(7) +#define EVENT_COUNTER_ENABLE GENMASK(4, 2) +#define PER_EVENT_ON 0x3 +#define PER_EVENT_OFF 0x1 + +#define DWC_DEBUGFS_BUF_MAX 128 + +/** + * struct dwc_pcie_rasdes_info - Stores controller common information + * @ras_cap_offset: RAS DES vendor specific extended capability offset + * @reg_event_lock: Mutex used for RAS DES shadow event registers + * + * Any parameter constant to all files of the debugfs hierarchy for a single + * controller will be stored in this struct. It is allocated and assigned to + * controller specific struct dw_pcie during initialization. + */ +struct dwc_pcie_rasdes_info { + u32 ras_cap_offset; + struct mutex reg_event_lock; +}; + +/** + * struct dwc_pcie_rasdes_priv - Stores file specific private data information + * @pci: Reference to the dw_pcie structure + * @idx: Index of specific file related information in array of structs + * + * All debugfs files will have this struct as its private data. + */ +struct dwc_pcie_rasdes_priv { + struct dw_pcie *pci; + int idx; +}; + +/** + * struct dwc_pcie_err_inj - Store details about each error injection + * supported by DWC RAS DES + * @name: Name of the error that can be injected + * @err_inj_group: Group number to which the error belongs. The value + * can range from 0 to 5 + * @err_inj_type: Each group can have multiple types of error + */ +struct dwc_pcie_err_inj { + const char *name; + u32 err_inj_group; + u32 err_inj_type; +}; + +static const struct dwc_pcie_err_inj err_inj_list[] = { + {"tx_lcrc", 0x0, 0x0}, + {"b16_crc_dllp", 0x0, 0x1}, + {"b16_crc_upd_fc", 0x0, 0x2}, + {"tx_ecrc", 0x0, 0x3}, + {"fcrc_tlp", 0x0, 0x4}, + {"parity_tsos", 0x0, 0x5}, + {"parity_skpos", 0x0, 0x6}, + {"rx_lcrc", 0x0, 0x8}, + {"rx_ecrc", 0x0, 0xb}, + {"tlp_err_seq", 0x1, 0x0}, + {"ack_nak_dllp_seq", 0x1, 0x1}, + {"ack_nak_dllp", 0x2, 0x0}, + {"upd_fc_dllp", 0x2, 0x1}, + {"nak_dllp", 0x2, 0x2}, + {"inv_sync_hdr_sym", 0x3, 0x0}, + {"com_pad_ts1", 0x3, 0x1}, + {"com_pad_ts2", 0x3, 0x2}, + {"com_fts", 0x3, 0x3}, + {"com_idl", 0x3, 0x4}, + {"end_edb", 0x3, 0x5}, + {"stp_sdp", 0x3, 0x6}, + {"com_skp", 0x3, 0x7}, + {"posted_tlp_hdr", 0x4, 0x0}, + {"non_post_tlp_hdr", 0x4, 0x1}, + {"cmpl_tlp_hdr", 0x4, 0x2}, + {"posted_tlp_data", 0x4, 0x4}, + {"non_post_tlp_data", 0x4, 0x5}, + {"cmpl_tlp_data", 0x4, 0x6}, + {"duplicate_tlp", 0x5, 0x0}, + {"nullified_tlp", 0x5, 0x1}, +}; + +static const u32 err_inj_type_mask[] = { + EINJ0_TYPE, + EINJ1_TYPE, + EINJ2_TYPE, + EINJ3_TYPE, + EINJ4_TYPE, + EINJ5_TYPE, +}; + +/** + * struct dwc_pcie_event_counter - Store details about each event counter + * supported in DWC RAS DES + * @name: Name of the error counter + * @group_no: Group number that the event belongs to. The value can range + * from 0 to 4 + * @event_no: Event number of the particular event. The value ranges are: + * Group 0: 0 - 10 + * Group 1: 5 - 13 + * Group 2: 0 - 7 + * Group 3: 0 - 5 + * Group 4: 0 - 1 + */ +struct dwc_pcie_event_counter { + const char *name; + u32 group_no; + u32 event_no; +}; + +static const struct dwc_pcie_event_counter event_list[] = { + {"ebuf_overflow", 0x0, 0x0}, + {"ebuf_underrun", 0x0, 0x1}, + {"decode_err", 0x0, 0x2}, + {"running_disparity_err", 0x0, 0x3}, + {"skp_os_parity_err", 0x0, 0x4}, + {"sync_header_err", 0x0, 0x5}, + {"rx_valid_deassertion", 0x0, 0x6}, + {"ctl_skp_os_parity_err", 0x0, 0x7}, + {"retimer_parity_err_1st", 0x0, 0x8}, + {"retimer_parity_err_2nd", 0x0, 0x9}, + {"margin_crc_parity_err", 0x0, 0xA}, + {"detect_ei_infer", 0x1, 0x5}, + {"receiver_err", 0x1, 0x6}, + {"rx_recovery_req", 0x1, 0x7}, + {"n_fts_timeout", 0x1, 0x8}, + {"framing_err", 0x1, 0x9}, + {"deskew_err", 0x1, 0xa}, + {"framing_err_in_l0", 0x1, 0xc}, + {"deskew_uncompleted_err", 0x1, 0xd}, + {"bad_tlp", 0x2, 0x0}, + {"lcrc_err", 0x2, 0x1}, + {"bad_dllp", 0x2, 0x2}, + {"replay_num_rollover", 0x2, 0x3}, + {"replay_timeout", 0x2, 0x4}, + {"rx_nak_dllp", 0x2, 0x5}, + {"tx_nak_dllp", 0x2, 0x6}, + {"retry_tlp", 0x2, 0x7}, + {"fc_timeout", 0x3, 0x0}, + {"poisoned_tlp", 0x3, 0x1}, + {"ecrc_error", 0x3, 0x2}, + {"unsupported_request", 0x3, 0x3}, + {"completer_abort", 0x3, 0x4}, + {"completion_timeout", 0x3, 0x5}, + {"ebuf_skp_add", 0x4, 0x0}, + {"ebuf_skp_del", 0x4, 0x1}, +}; + +static ssize_t lane_detect_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dw_pcie *pci = file->private_data; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); + val = FIELD_GET(PIPE_DETECT_LANE, val); + if (val) + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n"); + else + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n"); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t lane_detect_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dw_pcie *pci = file->private_data; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + u32 lane, val; + + val = kstrtou32_from_user(buf, count, 0, &lane); + if (val) + return val; + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); + val &= ~(LANE_SELECT); + val |= FIELD_PREP(LANE_SELECT, lane); + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val); + + return count; +} + +static ssize_t rx_valid_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dw_pcie *pci = file->private_data; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG); + val = FIELD_GET(PIPE_RXVALID, val); + if (val) + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n"); + else + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n"); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t rx_valid_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return lane_detect_write(file, buf, count, ppos); +} + +static ssize_t err_inj_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + u32 val, counter, vc_num, err_group, type_mask; + int val_diff = 0; + char *kern_buf; + + err_group = err_inj_list[pdata->idx].err_inj_group; + type_mask = err_inj_type_mask[err_group]; + + kern_buf = memdup_user_nul(buf, count); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + if (err_group == 4) { + val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num); + if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) { + kfree(kern_buf); + return -EINVAL; + } + } else if (err_group == 1) { + val = sscanf(kern_buf, "%u %d", &counter, &val_diff); + if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) { + kfree(kern_buf); + return -EINVAL; + } + } else { + val = kstrtou32(kern_buf, 0, &counter); + if (val) { + kfree(kern_buf); + return val; + } + } + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group)); + val &= ~(type_mask | EINJ_COUNT); + val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask); + val |= FIELD_PREP(EINJ_COUNT, counter); + + if (err_group == 1 || err_group == 4) { + val &= ~(EINJ_VAL_DIFF); + val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff); + } + if (err_group == 4) { + val &= ~(EINJ_VC_NUM); + val |= FIELD_PREP(EINJ_VC_NUM, vc_num); + } + + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val); + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group)); + + kfree(kern_buf); + return count; +} + +static void set_event_number(struct dwc_pcie_rasdes_priv *pdata, + struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo) +{ + u32 val; + + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + val &= ~EVENT_COUNTER_ENABLE; + val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT); + val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no); + val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no); + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); +} + +static ssize_t counter_enable_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + mutex_unlock(&rinfo->reg_event_lock); + val = FIELD_GET(EVENT_COUNTER_STATUS, val); + if (val) + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n"); + else + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n"); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t counter_enable_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + u32 val, enable; + + val = kstrtou32_from_user(buf, count, 0, &enable); + if (val) + return val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + if (enable) + val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON); + else + val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF); + + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); + + /* + * While enabling the counter, always read the status back to check if + * it is enabled or not. Return error if it is not enabled to let the + * users know that the counter is not supported on the platform. + */ + if (enable) { + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + + RAS_DES_EVENT_COUNTER_CTRL_REG); + if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) { + mutex_unlock(&rinfo->reg_event_lock); + return -EOPNOTSUPP; + } + } + + mutex_unlock(&rinfo->reg_event_lock); + + return count; +} + +static ssize_t counter_lane_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + mutex_unlock(&rinfo->reg_event_lock); + val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val); + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static ssize_t counter_lane_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + u32 val, lane; + + val = kstrtou32_from_user(buf, count, 0, &lane); + if (val) + return val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG); + val &= ~(EVENT_COUNTER_LANE_SELECT); + val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane); + dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val); + mutex_unlock(&rinfo->reg_event_lock); + + return count; +} + +static ssize_t counter_value_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dwc_pcie_rasdes_priv *pdata = file->private_data; + struct dw_pcie *pci = pdata->pci; + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + char debugfs_buf[DWC_DEBUGFS_BUF_MAX]; + ssize_t pos; + u32 val; + + mutex_lock(&rinfo->reg_event_lock); + set_event_number(pdata, pci, rinfo); + val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG); + mutex_unlock(&rinfo->reg_event_lock); + pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val); + + return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos); +} + +static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm) +{ + const char *str; + + switch (ltssm) { +#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2); + DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3); + default: + str = "DW_PCIE_LTSSM_UNKNOWN"; + break; + } + + return str + strlen("DW_PCIE_LTSSM_"); +} + +static int ltssm_status_show(struct seq_file *s, void *v) +{ + struct dw_pcie *pci = s->private; + enum dw_pcie_ltssm val; + + val = dw_pcie_get_ltssm(pci); + seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val); + + return 0; +} + +static int ltssm_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, ltssm_status_show, inode->i_private); +} + +#define dwc_debugfs_create(name) \ +debugfs_create_file(#name, 0644, rasdes_debug, pci, \ + &dbg_ ## name ## _fops) + +#define DWC_DEBUGFS_FOPS(name) \ +static const struct file_operations dbg_ ## name ## _fops = { \ + .open = simple_open, \ + .read = name ## _read, \ + .write = name ## _write \ +} + +DWC_DEBUGFS_FOPS(lane_detect); +DWC_DEBUGFS_FOPS(rx_valid); + +static const struct file_operations dwc_pcie_err_inj_ops = { + .open = simple_open, + .write = err_inj_write, +}; + +static const struct file_operations dwc_pcie_counter_enable_ops = { + .open = simple_open, + .read = counter_enable_read, + .write = counter_enable_write, +}; + +static const struct file_operations dwc_pcie_counter_lane_ops = { + .open = simple_open, + .read = counter_lane_read, + .write = counter_lane_write, +}; + +static const struct file_operations dwc_pcie_counter_value_ops = { + .open = simple_open, + .read = counter_value_read, +}; + +static const struct file_operations dwc_pcie_ltssm_status_ops = { + .open = ltssm_status_open, + .read = seq_read, +}; + +static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci) +{ + struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info; + + mutex_destroy(&rinfo->reg_event_lock); +} + +static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir) +{ + struct dentry *rasdes_debug, *rasdes_err_inj; + struct dentry *rasdes_event_counter, *rasdes_events; + struct dwc_pcie_rasdes_info *rasdes_info; + struct dwc_pcie_rasdes_priv *priv_tmp; + struct device *dev = pci->dev; + int ras_cap, i, ret; + + /* + * If a given SoC has no RAS DES capability, the following call is + * bound to return an error, breaking some existing platforms. So, + * return 0 here, as this is not necessarily an error. + */ + ras_cap = dw_pcie_find_rasdes_capability(pci); + if (!ras_cap) { + dev_dbg(dev, "no RAS DES capability available\n"); + return 0; + } + + rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL); + if (!rasdes_info) + return -ENOMEM; + + /* Create subdirectories for Debug, Error Injection, Statistics. */ + rasdes_debug = debugfs_create_dir("rasdes_debug", dir); + rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir); + rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir); + + mutex_init(&rasdes_info->reg_event_lock); + rasdes_info->ras_cap_offset = ras_cap; + pci->debugfs->rasdes_info = rasdes_info; + + /* Create debugfs files for Debug subdirectory. */ + dwc_debugfs_create(lane_detect); + dwc_debugfs_create(rx_valid); + + /* Create debugfs files for Error Injection subdirectory. */ + for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) { + priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL); + if (!priv_tmp) { + ret = -ENOMEM; + goto err_deinit; + } + + priv_tmp->idx = i; + priv_tmp->pci = pci; + debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp, + &dwc_pcie_err_inj_ops); + } + + /* Create debugfs files for Statistical Counter subdirectory. */ + for (i = 0; i < ARRAY_SIZE(event_list); i++) { + priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL); + if (!priv_tmp) { + ret = -ENOMEM; + goto err_deinit; + } + + priv_tmp->idx = i; + priv_tmp->pci = pci; + rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter); + if (event_list[i].group_no == 0 || event_list[i].group_no == 4) { + debugfs_create_file("lane_select", 0644, rasdes_events, + priv_tmp, &dwc_pcie_counter_lane_ops); + } + debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp, + &dwc_pcie_counter_value_ops); + debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp, + &dwc_pcie_counter_enable_ops); + } + + return 0; + +err_deinit: + dwc_pcie_rasdes_debugfs_deinit(pci); + return ret; +} + +static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir) +{ + debugfs_create_file("ltssm_status", 0444, dir, pci, + &dwc_pcie_ltssm_status_ops); +} + +void dwc_pcie_debugfs_deinit(struct dw_pcie *pci) +{ + if (!pci->debugfs) + return; + + dwc_pcie_rasdes_debugfs_deinit(pci); + debugfs_remove_recursive(pci->debugfs->debug_dir); +} + +void dwc_pcie_debugfs_init(struct dw_pcie *pci) +{ + char dirname[DWC_DEBUGFS_BUF_MAX]; + struct device *dev = pci->dev; + struct debugfs_info *debugfs; + struct dentry *dir; + int err; + + /* Create main directory for each platform driver. */ + snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev)); + dir = debugfs_create_dir(dirname, NULL); + debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL); + if (!debugfs) + return; + + debugfs->debug_dir = dir; + pci->debugfs = debugfs; + err = dwc_pcie_rasdes_debugfs_init(pci, dir); + if (err) + dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n", + err); + + dwc_pcie_ltssm_debugfs_init(pci, dir); +} diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index f3ac7d46a855..1a0bf9341542 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -102,6 +102,45 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); } +/** + * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list + * @pci: DWC PCI device + * @prev_cap: Capability preceding the capability that should be hidden + * @cap: Capability that should be hidden + * + * Return: 0 if success, errno otherwise. + */ +int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap) +{ + u16 prev_cap_offset, cap_offset; + u32 prev_cap_header, cap_header; + + prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap); + if (!prev_cap_offset) + return -EINVAL; + + prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset); + cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header); + cap_header = dw_pcie_readl_dbi(pci, cap_offset); + + /* cap must immediately follow prev_cap. */ + if (PCI_EXT_CAP_ID(cap_header) != cap) + return -EINVAL; + + /* Clear next ptr. */ + prev_cap_header &= ~GENMASK(31, 20); + + /* Set next ptr to next ptr of cap. */ + prev_cap_header |= cap_header & GENMASK(31, 20); + + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header); + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability); + static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_header *hdr) { @@ -128,7 +167,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, } static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, - dma_addr_t cpu_addr, enum pci_barno bar) + dma_addr_t parent_bus_addr, enum pci_barno bar, + size_t size) { int ret; u32 free_win; @@ -145,7 +185,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, } ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type, - cpu_addr, bar); + parent_bus_addr, bar, size); if (ret < 0) { dev_err(pci->dev, "Failed to program IB window\n"); return ret; @@ -180,7 +220,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, return ret; set_bit(free_win, ep->ob_window_map); - ep->outbound_addr[free_win] = atu->cpu_addr; + ep->outbound_addr[free_win] = atu->parent_bus_addr; return 0; } @@ -204,6 +244,125 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, ep->bar_to_atu[bar] = 0; } +static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci, + enum pci_barno bar) +{ + u32 reg, bar_index; + unsigned int offset, nbars; + int i; + + offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + if (!offset) + return offset; + + reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + + for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { + reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + bar_index = reg & PCI_REBAR_CTRL_BAR_IDX; + if (bar_index == bar) + return offset; + } + + return 0; +} + +static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no, + struct pci_epf_bar *epf_bar) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar = epf_bar->barno; + size_t size = epf_bar->size; + int flags = epf_bar->flags; + u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + unsigned int rebar_offset; + u32 rebar_cap, rebar_ctrl; + int ret; + + rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar); + if (!rebar_offset) + return -EINVAL; + + ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap); + if (ret) + return ret; + + dw_pcie_dbi_ro_wr_en(pci); + + /* + * A BAR mask should not be written for a resizable BAR. The BAR mask + * is automatically derived by the controller every time the "selected + * size" bits are updated, see "Figure 3-26 Resizable BAR Example for + * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write + * BIT(0) to set the BAR enable bit. + */ + dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0)); + dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0); + dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); + } + + /* + * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes + * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes" + * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB. + */ + rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL); + rebar_ctrl &= ~GENMASK(31, 16); + dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl); + + /* + * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically + * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR + * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a. + */ + dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap); + + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no, + struct pci_epf_bar *epf_bar) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar = epf_bar->barno; + size_t size = epf_bar->size; + int flags = epf_bar->flags; + u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); + + dw_pcie_dbi_ro_wr_en(pci); + + dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); + dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); + + if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { + dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); + dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); + } + + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep, + enum pci_barno bar) +{ + const struct pci_epc_features *epc_features; + + if (!ep->ops->get_features) + return BAR_PROGRAMMABLE; + + epc_features = ep->ops->get_features(ep); + + return epc_features->bar[bar].type; +} + static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { @@ -211,9 +370,9 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar = epf_bar->barno; size_t size = epf_bar->size; + enum pci_epc_bar_type bar_type; int flags = epf_bar->flags; int ret, type; - u32 reg; /* * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs @@ -222,32 +381,66 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1)) return -EINVAL; - reg = PCI_BASE_ADDRESS_0 + (4 * bar); + /* + * Certain EPF drivers dynamically change the physical address of a BAR + * (i.e. they call set_bar() twice, without ever calling clear_bar(), as + * calling clear_bar() would clear the BAR's PCI address assigned by the + * host). + */ + if (ep->epf_bar[bar]) { + /* + * We can only dynamically change a BAR if the new BAR size and + * BAR flags do not differ from the existing configuration. + */ + if (ep->epf_bar[bar]->barno != bar || + ep->epf_bar[bar]->size != size || + ep->epf_bar[bar]->flags != flags) + return -EINVAL; + + /* + * When dynamically changing a BAR, skip writing the BAR reg, as + * that would clear the BAR's PCI address assigned by the host. + */ + goto config_atu; + } + + bar_type = dw_pcie_ep_get_bar_type(ep, bar); + switch (bar_type) { + case BAR_FIXED: + /* + * There is no need to write a BAR mask for a fixed BAR (except + * to write 1 to the LSB of the BAR mask register, to enable the + * BAR). Write the BAR mask regardless. (The fixed bits in the + * BAR mask register will be read-only anyway.) + */ + fallthrough; + case BAR_PROGRAMMABLE: + ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar); + break; + case BAR_RESIZABLE: + ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar); + break; + default: + ret = -EINVAL; + dev_err(pci->dev, "Invalid BAR type\n"); + break; + } + if (ret) + return ret; + +config_atu: if (!(flags & PCI_BASE_ADDRESS_SPACE)) type = PCIE_ATU_TYPE_MEM; else type = PCIE_ATU_TYPE_IO; - ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar); + ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar, + size); if (ret) return ret; - if (ep->epf_bar[bar]) - return 0; - - dw_pcie_dbi_ro_wr_en(pci); - - dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1)); - dw_pcie_ep_writel_dbi(ep, func_no, reg, flags); - - if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { - dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1)); - dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0); - } - ep->epf_bar[bar] = epf_bar; - dw_pcie_dbi_ro_wr_dis(pci); return 0; } @@ -258,7 +451,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, u32 index; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - for (index = 0; index < pci->num_ob_windows; index++) { + for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) { if (ep->outbound_addr[index] != addr) continue; *atu_index = index; @@ -290,7 +483,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - ret = dw_pcie_find_index(ep, addr, &atu_index); + ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset, + &atu_index); if (ret < 0) return; @@ -309,7 +503,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, atu.func_no = func_no; atu.type = PCIE_ATU_TYPE_MEM; - atu.cpu_addr = addr; + atu.parent_bus_addr = addr - pci->parent_bus_offset; atu.pci_addr = pci_addr; atu.size = size; ret = dw_pcie_ep_outbound_atu(ep, &atu); @@ -642,6 +836,7 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + dwc_pcie_debugfs_deinit(pci); dw_pcie_edma_remove(pci); } EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup); @@ -666,31 +861,15 @@ void dw_pcie_ep_deinit(struct dw_pcie_ep *ep) } EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit); -static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) -{ - u32 header; - int pos = PCI_CFG_SPACE_SIZE; - - while (pos) { - header = dw_pcie_readl_dbi(pci, pos); - if (PCI_EXT_CAP_ID(header) == cap) - return pos; - - pos = PCI_EXT_CAP_NEXT(header); - if (!pos) - break; - } - - return 0; -} - static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) { + struct dw_pcie_ep *ep = &pci->ep; unsigned int offset; unsigned int nbars; - u32 reg, i; + enum pci_barno bar; + u32 reg, i, val; - offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); dw_pcie_dbi_ro_wr_en(pci); @@ -703,9 +882,29 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) * PCIe r6.0, sec 7.8.6.2 require us to support at least one * size in the range from 1 MB to 512 GB. Advertise support * for 1 MB BAR size only. + * + * For a BAR that has been configured via dw_pcie_ep_set_bar(), + * advertise support for only that size instead. */ - for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); + for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { + /* + * While the RESBAR_CAP_REG_* fields are sticky, the + * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is + * sticky in certain versions of DWC PCIe, but not all). + * + * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by + * the controller when RESBAR_CAP_REG is written, which + * is why RESBAR_CAP_REG is written here. + */ + val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + bar = val & PCI_REBAR_CTRL_BAR_IDX; + if (ep->epf_bar[bar]) + pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val); + else + val = BIT(4); + + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val); + } } dw_pcie_setup(pci); @@ -749,6 +948,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) if (ret) return ret; + ret = -ENOMEM; if (!ep->ib_window_map) { ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, GFP_KERNEL); @@ -793,7 +993,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) if (ep->ops->init) ep->ops->init(ep); - ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); + ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); /* * PTM responder capability can be disabled only after disabling @@ -813,6 +1013,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) dw_pcie_ep_init_non_sticky_registers(pci); + dwc_pcie_debugfs_init(pci); + return 0; err_remove_edma: @@ -859,26 +1061,15 @@ void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep) } EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown); -/** - * dw_pcie_ep_init - Initialize the endpoint device - * @ep: DWC EP device - * - * Initialize the endpoint device. Allocate resources and create the EPC - * device with the endpoint framework. - * - * Return: 0 if success, errno otherwise. - */ -int dw_pcie_ep_init(struct dw_pcie_ep *ep) +static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep) { - int ret; - struct resource *res; - struct pci_epc *epc; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; - - INIT_LIST_HEAD(&ep->func_list); + struct pci_epc *epc = ep->epc; + struct resource *res; + int ret; ret = dw_pcie_get_resources(pci); if (ret) @@ -891,8 +1082,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ep->phys_base = res->start; ep->addr_size = resource_size(res); - if (ep->ops->pre_init) - ep->ops->pre_init(ep); + /* + * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call + * dw_pcie_parent_bus_offset() after setting ep->phys_base. + */ + pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space", + ep->phys_base); + + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); + if (ret < 0) + epc->max_functions = 1; + + return 0; +} + +/** + * dw_pcie_ep_init - Initialize the endpoint device + * @ep: DWC EP device + * + * Initialize the endpoint device. Allocate resources and create the EPC + * device with the endpoint framework. + * + * Return: 0 if success, errno otherwise. + */ +int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ + int ret; + struct pci_epc *epc; + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + + INIT_LIST_HEAD(&ep->func_list); epc = devm_pci_epc_create(dev, &epc_ops); if (IS_ERR(epc)) { @@ -903,9 +1123,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ep->epc = epc; epc_set_drvdata(epc, ep); - ret = of_property_read_u8(np, "max-functions", &epc->max_functions); - if (ret < 0) - epc->max_functions = 1; + ret = dw_pcie_ep_get_resources(ep); + if (ret) + return ret; + + if (ep->ops->pre_init) + ep->ops->pre_init(ep); ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, ep->page_size); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index d2291c3ceb8b..ecc33f6789e3 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -418,50 +418,69 @@ static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp) } } -int dw_pcie_host_init(struct dw_pcie_rp *pp) +static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; - struct device_node *np = dev->of_node; struct platform_device *pdev = to_platform_device(dev); struct resource_entry *win; - struct pci_host_bridge *bridge; struct resource *res; int ret; - raw_spin_lock_init(&pp->lock); - ret = dw_pcie_get_resources(pci); if (ret) return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); - if (res) { - pp->cfg0_size = resource_size(res); - pp->cfg0_base = res->start; - - pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pp->va_cfg0_base)) - return PTR_ERR(pp->va_cfg0_base); - } else { - dev_err(dev, "Missing *config* reg space\n"); + if (!res) { + dev_err(dev, "Missing \"config\" reg space\n"); return -ENODEV; } - bridge = devm_pci_alloc_host_bridge(dev, 0); - if (!bridge) - return -ENOMEM; + pp->cfg0_size = resource_size(res); + pp->cfg0_base = res->start; - pp->bridge = bridge; + pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pp->va_cfg0_base)) + return PTR_ERR(pp->va_cfg0_base); /* Get the I/O range from DT */ - win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); + win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO); if (win) { pp->io_size = resource_size(win->res); pp->io_bus_addr = win->res->start - win->offset; pp->io_base = pci_pio_to_address(win->res->start); } + /* + * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to + * call dw_pcie_parent_bus_offset() after setting pp->io_base. + */ + pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config", + pp->cfg0_base); + return 0; +} + +int dw_pcie_host_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge; + int ret; + + raw_spin_lock_init(&pp->lock); + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) + return -ENOMEM; + + pp->bridge = bridge; + + ret = dw_pcie_host_get_resources(pp); + if (ret) + return ret; + /* Set default bus ops */ bridge->ops = &dw_pcie_ops; bridge->child_ops = &dw_child_pcie_ops; @@ -530,8 +549,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) goto err_remove_edma; } - /* Ignore errors, the link may come up later */ - dw_pcie_wait_for_link(pci); + /* + * Note: Skip the link up delay only when a Link Up IRQ is present. + * If there is no Link Up IRQ, we should not bypass the delay + * because that would require users to manually rescan for devices. + */ + if (!pp->use_linkup_irq) + /* Ignore errors, the link may come up later */ + dw_pcie_wait_for_link(pci); bridge->sysdata = pp; @@ -542,6 +567,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (pp->ops->post_init) pp->ops->post_init(pp); + dwc_pcie_debugfs_init(pci); + return 0; err_stop_link: @@ -566,6 +593,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + dwc_pcie_debugfs_deinit(pci); + pci_stop_root_bus(pp->bridge->bus); pci_remove_root_bus(pp->bridge->bus); @@ -610,7 +639,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, type = PCIE_ATU_TYPE_CFG1; atu.type = type; - atu.cpu_addr = pp->cfg0_base; + atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset; atu.pci_addr = busdev; atu.size = pp->cfg0_size; @@ -635,7 +664,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, if (pp->cfg0_io_shared) { atu.type = PCIE_ATU_TYPE_IO; - atu.cpu_addr = pp->io_base; + atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset; atu.pci_addr = pp->io_bus_addr; atu.size = pp->io_size; @@ -661,7 +690,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, if (pp->cfg0_io_shared) { atu.type = PCIE_ATU_TYPE_IO; - atu.cpu_addr = pp->io_base; + atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset; atu.pci_addr = pp->io_bus_addr; atu.size = pp->io_size; @@ -730,7 +759,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) atu.index = i; atu.type = PCIE_ATU_TYPE_MEM; - atu.cpu_addr = entry->res->start; + atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset; atu.pci_addr = entry->res->start - entry->offset; /* Adjust iATU size if MSG TLP region was allocated before */ @@ -752,7 +781,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) if (pci->num_ob_windows > ++i) { atu.index = i; atu.type = PCIE_ATU_TYPE_IO; - atu.cpu_addr = pp->io_base; + atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset; atu.pci_addr = pp->io_bus_addr; atu.size = pp->io_size; @@ -896,13 +925,13 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci) atu.size = resource_size(pci->pp.msg_res); atu.index = pci->pp.msg_atu_index; - atu.cpu_addr = pci->pp.msg_res->start; + atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset; ret = dw_pcie_prog_outbound_atu(pci, &atu); if (ret) return ret; - mem = ioremap(atu.cpu_addr, pci->region_align); + mem = ioremap(pci->pp.msg_res->start, pci->region_align); if (!mem) return -ENOMEM; @@ -918,7 +947,7 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci) { u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u32 val; - int ret = 0; + int ret; /* * If L1SS is supported, then do not put the link into L2 as some @@ -927,25 +956,33 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci) if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1) return 0; - if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT) - return 0; - - if (pci->pp.ops->pme_turn_off) + if (pci->pp.ops->pme_turn_off) { pci->pp.ops->pme_turn_off(&pci->pp); - else + } else { ret = dw_pcie_pme_turn_off(pci); + if (ret) + return ret; + } - if (ret) - return ret; - - ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE, + ret = read_poll_timeout(dw_pcie_get_ltssm, val, + val == DW_PCIE_LTSSM_L2_IDLE || + val <= DW_PCIE_LTSSM_DETECT_WAIT, PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US, false, pci); if (ret) { + /* Only log message when LTSSM isn't in DETECT or POLL */ dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val); return ret; } + /* + * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least + * 100ns after L2/L3 Ready before turning off refclock and + * main power. This is harmless when no endpoint is connected. + */ + udelay(1); + + dw_pcie_stop_link(pci); if (pci->pp.ops->deinit) pci->pp.ops->deinit(&pci->pp); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 6d6cbc8b5b2c..97d76d3dc066 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -16,6 +16,8 @@ #include <linux/gpio/consumer.h> #include <linux/ioport.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/pcie-dwc.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include <linux/types.h> @@ -283,6 +285,51 @@ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap) } EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability); +static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id, + u16 vsec_id) +{ + u16 vsec = 0; + u32 header; + + if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID)) + return 0; + + while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec, + PCI_EXT_CAP_ID_VNDR))) { + header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER); + if (PCI_VNDR_HEADER_ID(header) == vsec_id) + return vsec; + } + + return 0; +} + +static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci, + const struct dwc_pcie_vsec_id *vsec_ids) +{ + const struct dwc_pcie_vsec_id *vid; + u16 vsec; + u32 header; + + for (vid = vsec_ids; vid->vendor_id; vid++) { + vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id, + vid->vsec_id); + if (vsec) { + header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER); + if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev) + return vsec; + } + } + + return 0; +} + +u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci) +{ + return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids); +} +EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability); + int dw_pcie_read(void __iomem *addr, int size, u32 *val) { if (!IS_ALIGNED((uintptr_t)addr, size)) { @@ -470,25 +517,22 @@ static inline u32 dw_pcie_enable_ecrc(u32 val) int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, const struct dw_pcie_ob_atu_cfg *atu) { - u64 cpu_addr = atu->cpu_addr; + u64 parent_bus_addr = atu->parent_bus_addr; u32 retries, val; u64 limit_addr; - if (pci->ops && pci->ops->cpu_addr_fixup) - cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); + limit_addr = parent_bus_addr + atu->size - 1; - limit_addr = cpu_addr + atu->size - 1; - - if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) || - !IS_ALIGNED(cpu_addr, pci->region_align) || + if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) || + !IS_ALIGNED(parent_bus_addr, pci->region_align) || !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) { return -EINVAL; } dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE, - lower_32_bits(cpu_addr)); + lower_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE, - upper_32_bits(cpu_addr)); + upper_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT, lower_32_bits(limit_addr)); @@ -502,7 +546,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, upper_32_bits(atu->pci_addr)); val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no); - if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && + if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) && dw_pcie_ver_is_ge(pci, 460A)) val |= PCIE_ATU_INCREASE_REGION_SIZE; if (dw_pcie_ver_is(pci, 490A)) @@ -545,13 +589,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg } int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, - u64 cpu_addr, u64 pci_addr, u64 size) + u64 parent_bus_addr, u64 pci_addr, u64 size) { u64 limit_addr = pci_addr + size - 1; u32 retries, val; if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) || - !IS_ALIGNED(cpu_addr, pci->region_align) || + !IS_ALIGNED(parent_bus_addr, pci->region_align) || !IS_ALIGNED(pci_addr, pci->region_align) || !size) { return -EINVAL; } @@ -568,9 +612,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, upper_32_bits(limit_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, - lower_32_bits(cpu_addr)); + lower_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, - upper_32_bits(cpu_addr)); + upper_32_bits(parent_bus_addr)); val = type; if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) && @@ -597,17 +641,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, } int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u8 bar) + int type, u64 parent_bus_addr, u8 bar, size_t size) { u32 retries, val; - if (!IS_ALIGNED(cpu_addr, pci->region_align)) + if (!IS_ALIGNED(parent_bus_addr, pci->region_align) || + !IS_ALIGNED(parent_bus_addr, size)) return -EINVAL; dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, - lower_32_bits(cpu_addr)); + lower_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, - upper_32_bits(cpu_addr)); + upper_32_bits(parent_bus_addr)); dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type | PCIE_ATU_FUNC_NUM(func_no)); @@ -970,7 +1015,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci) { struct platform_device *pdev = to_platform_device(pci->dev); u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt; - char name[6]; + char name[15]; int ret; if (pci->edma.nr_irqs == 1) @@ -1104,3 +1149,63 @@ void dw_pcie_setup(struct dw_pcie *pci) dw_pcie_link_set_max_link_width(pci, pci->num_lanes); } + +resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci, + const char *reg_name, + resource_size_t cpu_phys_addr) +{ + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + int index; + u64 reg_addr, fixup_addr; + u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr); + + /* Look up reg_name address on parent bus */ + index = of_property_match_string(np, "reg-names", reg_name); + + if (index < 0) { + dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name); + return 0; + } + + of_property_read_reg(np, index, ®_addr, NULL); + + fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL; + if (fixup) { + fixup_addr = fixup(pci, cpu_phys_addr); + if (reg_addr == fixup_addr) { + dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n", + reg_name, index, reg_addr, fixup_addr, + (unsigned long long) cpu_phys_addr, fixup); + } else { + dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n", + reg_name, index, reg_addr, fixup_addr, + (unsigned long long) cpu_phys_addr); + reg_addr = fixup_addr; + } + + return cpu_phys_addr - reg_addr; + } + + if (pci->use_parent_dt_ranges) { + + /* + * This platform once had a fixup, presumably because it + * translates between CPU and PCI controller addresses. + * Log a note if devicetree didn't describe a translation. + */ + if (reg_addr == cpu_phys_addr) + dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n", + reg_name, index, reg_addr, + (unsigned long long) cpu_phys_addr); + } else { + if (reg_addr != cpu_phys_addr) { + dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n", + reg_name, index, reg_addr, + (unsigned long long) cpu_phys_addr); + return 0; + } + } + + return cpu_phys_addr - reg_addr; +} diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 347ab74ac35a..56aafdbcdaca 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -330,8 +330,40 @@ enum dw_pcie_ltssm { /* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */ DW_PCIE_LTSSM_DETECT_QUIET = 0x0, DW_PCIE_LTSSM_DETECT_ACT = 0x1, + DW_PCIE_LTSSM_POLL_ACTIVE = 0x2, + DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3, + DW_PCIE_LTSSM_POLL_CONFIG = 0x4, + DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5, + DW_PCIE_LTSSM_DETECT_WAIT = 0x6, + DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7, + DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8, + DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9, + DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa, + DW_PCIE_LTSSM_CFG_COMPLETE = 0xb, + DW_PCIE_LTSSM_CFG_IDLE = 0xc, + DW_PCIE_LTSSM_RCVRY_LOCK = 0xd, + DW_PCIE_LTSSM_RCVRY_SPEED = 0xe, + DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf, + DW_PCIE_LTSSM_RCVRY_IDLE = 0x10, DW_PCIE_LTSSM_L0 = 0x11, + DW_PCIE_LTSSM_L0S = 0x12, + DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13, + DW_PCIE_LTSSM_L1_IDLE = 0x14, DW_PCIE_LTSSM_L2_IDLE = 0x15, + DW_PCIE_LTSSM_L2_WAKE = 0x16, + DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17, + DW_PCIE_LTSSM_DISABLED_IDLE = 0x18, + DW_PCIE_LTSSM_DISABLED = 0x19, + DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a, + DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b, + DW_PCIE_LTSSM_LPBK_EXIT = 0x1c, + DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d, + DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e, + DW_PCIE_LTSSM_HOT_RESET = 0x1f, + DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20, + DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21, + DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22, + DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23, DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF, }; @@ -342,7 +374,7 @@ struct dw_pcie_ob_atu_cfg { u8 func_no; u8 code; u8 routing; - u64 cpu_addr; + u64 parent_bus_addr; u64 pci_addr; u64 size; }; @@ -379,6 +411,7 @@ struct dw_pcie_rp { bool use_atu_msg; int msg_atu_index; struct resource *msg_res; + bool use_linkup_irq; }; struct dw_pcie_ep_ops { @@ -435,6 +468,11 @@ struct dw_pcie_ops { void (*stop_link)(struct dw_pcie *pcie); }; +struct debugfs_info { + struct dentry *debug_dir; + void *rasdes_info; +}; + struct dw_pcie { struct device *dev; void __iomem *dbi_base; @@ -443,6 +481,7 @@ struct dw_pcie { void __iomem *atu_base; resource_size_t atu_phys_addr; size_t atu_size; + resource_size_t parent_bus_offset; u32 num_ib_windows; u32 num_ob_windows; u32 region_align; @@ -463,6 +502,20 @@ struct dw_pcie { struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS]; struct gpio_desc *pe_rst; bool suspended; + struct debugfs_info *debugfs; + + /* + * If iATU input addresses are offset from CPU physical addresses, + * we previously required .cpu_addr_fixup() to convert them. We + * now rely on the devicetree instead. If .cpu_addr_fixup() + * exists, we compare its results with devicetree. + * + * If .cpu_addr_fixup() does not exist, we assume the offset is + * zero and warn if devicetree claims otherwise. If we know all + * devicetrees correctly describe the offset, set + * use_parent_dt_ranges to true to avoid this warning. + */ + bool use_parent_dt_ranges; }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -476,6 +529,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci); u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap); u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap); +u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci); int dw_pcie_read(void __iomem *addr, int size, u32 *val); int dw_pcie_write(void __iomem *addr, int size, u32 val); @@ -489,17 +543,18 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci); int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, const struct dw_pcie_ob_atu_cfg *atu); int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, - u64 cpu_addr, u64 pci_addr, u64 size); + u64 parent_bus_addr, u64 pci_addr, u64 size); int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u8 bar); + int type, u64 parent_bus_addr, + u8 bar, size_t size); void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index); void dw_pcie_setup(struct dw_pcie *pci); void dw_pcie_iatu_detect(struct dw_pcie *pci); int dw_pcie_edma_detect(struct dw_pcie *pci); void dw_pcie_edma_remove(struct dw_pcie *pci); - -int dw_pcie_suspend_noirq(struct dw_pcie *pci); -int dw_pcie_resume_noirq(struct dw_pcie *pci); +resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci, + const char *reg_name, + resource_size_t cpu_phy_addr); static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) { @@ -678,6 +733,8 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci) } #ifdef CONFIG_PCIE_DW_HOST +int dw_pcie_suspend_noirq(struct dw_pcie *pci); +int dw_pcie_resume_noirq(struct dw_pcie *pci); irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp); int dw_pcie_setup_rc(struct dw_pcie_rp *pp); int dw_pcie_host_init(struct dw_pcie_rp *pp); @@ -686,6 +743,16 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp); void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where); #else +static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci) +{ + return 0; +} + +static inline int dw_pcie_resume_noirq(struct dw_pcie *pci) +{ + return 0; +} + static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) { return IRQ_NONE; @@ -732,6 +799,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num); void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); +int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap); struct dw_pcie_ep_func * dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no); #else @@ -789,10 +857,29 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) { } +static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, + u8 prev_cap, u8 cap) +{ + return 0; +} + static inline struct dw_pcie_ep_func * dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) { return NULL; } #endif + +#ifdef CONFIG_PCIE_DW_DEBUGFS +void dwc_pcie_debugfs_init(struct dw_pcie *pci); +void dwc_pcie_debugfs_deinit(struct dw_pcie *pci); +#else +static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci) +{ +} +static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci) +{ +} +#endif + #endif /* _PCIE_DESIGNWARE_H */ diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 1170e1107508..c624b7ebd118 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -240,6 +240,34 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = { .init = rockchip_pcie_host_init, }; +/* + * ATS does not work on RK3588 when running in EP mode. + * + * After the host has enabled ATS on the EP side, it will send an IOTLB + * invalidation request to the EP side. However, the RK3588 will never send + * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT + * error, and the EP will not be operational. If we hide the ATS capability, + * things work as expected. + */ +static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + + /* Only hide the ATS capability for RK3588 running in EP mode. */ + if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep")) + return; + + if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI, + PCI_EXT_CAP_ID_ATS)) + dev_err(dev, "failed to hide ATS capability\n"); +} + +static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep) +{ + rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep); +} + static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -272,13 +300,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = { .linkup_notifier = true, .msi_capable = true, .msix_capable = true, + .intx_capable = false, .align = SZ_64K, - .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_0] = { .type = BAR_RESIZABLE, }, + .bar[BAR_1] = { .type = BAR_RESIZABLE, }, + .bar[BAR_2] = { .type = BAR_RESIZABLE, }, + .bar[BAR_3] = { .type = BAR_RESIZABLE, }, + .bar[BAR_4] = { .type = BAR_RESIZABLE, }, + .bar[BAR_5] = { .type = BAR_RESIZABLE, }, }; /* @@ -292,13 +321,14 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = { .linkup_notifier = true, .msi_capable = true, .msix_capable = true, + .intx_capable = false, .align = SZ_64K, - .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, - .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_0] = { .type = BAR_RESIZABLE, }, + .bar[BAR_1] = { .type = BAR_RESIZABLE, }, + .bar[BAR_2] = { .type = BAR_RESIZABLE, }, + .bar[BAR_3] = { .type = BAR_RESIZABLE, }, .bar[BAR_4] = { .type = BAR_RESERVED, }, - .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, }, + .bar[BAR_5] = { .type = BAR_RESIZABLE, }, }; static const struct pci_epc_features * @@ -312,6 +342,7 @@ rockchip_pcie_get_features(struct dw_pcie_ep *ep) static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = { .init = rockchip_pcie_ep_init, + .pre_init = rockchip_pcie_ep_pre_init, .raise_irq = rockchip_pcie_raise_irq, .get_features = rockchip_pcie_get_features, }; @@ -389,6 +420,34 @@ static const struct dw_pcie_ops dw_pcie_ops = { .stop_link = rockchip_pcie_stop_link, }; +static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg) +{ + struct rockchip_pcie *rockchip = arg; + struct dw_pcie *pci = &rockchip->pci; + struct dw_pcie_rp *pp = &pci->pp; + struct device *dev = pci->dev; + u32 reg, val; + + reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC); + rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC); + + dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg); + dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip)); + + if (reg & PCIE_RDLH_LINK_UP_CHGED) { + val = rockchip_pcie_get_ltssm(rockchip); + if ((val & PCIE_LINKUP) == PCIE_LINKUP) { + dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); + /* Rescan the bus to enumerate endpoint devices */ + pci_lock_rescan_remove(); + pci_rescan_bus(pp->bridge->bus); + pci_unlock_rescan_remove(); + } + } + + return IRQ_HANDLED; +} + static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg) { struct rockchip_pcie *rockchip = arg; @@ -418,14 +477,29 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg) return IRQ_HANDLED; } -static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip) +static int rockchip_pcie_configure_rc(struct platform_device *pdev, + struct rockchip_pcie *rockchip) { + struct device *dev = &pdev->dev; struct dw_pcie_rp *pp; + int irq, ret; u32 val; if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST)) return -ENODEV; + irq = platform_get_irq_byname(pdev, "sys"); + if (irq < 0) + return irq; + + ret = devm_request_threaded_irq(dev, irq, NULL, + rockchip_pcie_rc_sys_irq_thread, + IRQF_ONESHOT, "pcie-sys-rc", rockchip); + if (ret) { + dev_err(dev, "failed to request PCIe sys IRQ\n"); + return ret; + } + /* LTSSM enable control mode */ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE); rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); @@ -435,8 +509,19 @@ static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip) pp = &rockchip->pci.pp; pp->ops = &rockchip_pcie_host_ops; + pp->use_linkup_irq = true; - return dw_pcie_host_init(pp); + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + /* unmask DLL up/down indicator */ + val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0); + rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC); + + return ret; } static int rockchip_pcie_configure_ep(struct platform_device *pdev, @@ -450,14 +535,12 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev, return -ENODEV; irq = platform_get_irq_byname(pdev, "sys"); - if (irq < 0) { - dev_err(dev, "missing sys IRQ resource\n"); + if (irq < 0) return irq; - } ret = devm_request_threaded_irq(dev, irq, NULL, rockchip_pcie_ep_sys_irq_thread, - IRQF_ONESHOT, "pcie-sys", rockchip); + IRQF_ONESHOT, "pcie-sys-ep", rockchip); if (ret) { dev_err(dev, "failed to request PCIe sys IRQ\n"); return ret; @@ -491,7 +574,8 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev, pci_epc_init_notify(rockchip->pci.ep.epc); /* unmask DLL up/down indicator and hot reset/link-down reset */ - rockchip_pcie_writel_apb(rockchip, 0x60000, PCIE_CLIENT_INTR_MASK_MISC); + val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0); + rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC); return ret; } @@ -553,7 +637,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) switch (data->mode) { case DW_PCIE_RC_TYPE: - ret = rockchip_pcie_configure_rc(rockchip); + ret = rockchip_pcie_configure_rc(pdev, rockchip); if (ret) goto deinit_clk; break; diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 615a0e3e6d7e..1f2f4c28a949 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev) ret = histb_pcie_host_enable(pp); if (ret) { dev_err(dev, "failed to enable host\n"); - return ret; + goto err_exit_phy; } ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); - return ret; + goto err_exit_phy; } return 0; + +err_exit_phy: + phy_exit(hipcie->phy); + + return ret; } static void histb_pcie_remove(struct platform_device *pdev) @@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev) histb_pcie_host_disable(hipcie); - if (hipcie->phy) - phy_exit(hipcie->phy); + phy_exit(hipcie->phy); } static const struct of_device_id histb_pcie_of_match[] = { diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 9b53b8f6f268..c21906eced61 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -57,7 +57,6 @@ PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \ PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD) -#define BUS_IATU_OFFSET SZ_256M #define RESET_INTERVAL_MS 100 struct intel_pcie { @@ -381,13 +380,7 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp) return intel_pcie_host_setup(pcie); } -static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) -{ - return cpu_addr + BUS_IATU_OFFSET; -} - static const struct dw_pcie_ops intel_pcie_ops = { - .cpu_addr_fixup = intel_pcie_cpu_addr, }; static const struct dw_pcie_host_ops intel_pcie_dw_ops = { @@ -409,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); pci = &pcie->pci; pci->dev = dev; + pci->use_parent_dt_ranges = true; pp = &pci->pp; ret = intel_pcie_get_resources(pdev); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 1b2088acb538..d0e6a3811b00 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -216,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy) usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0); - if (reg_val & PIPE_CLK_STABLE) { - dev_err(dev, "PIPE clk is not stable\n"); - return -EINVAL; - } + if (reg_val & PIPE_CLK_STABLE) + return dev_err_probe(dev, -ETIMEDOUT, + "PIPE clk is not stable\n"); return 0; } @@ -371,10 +370,9 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie, if (ret < 0) return 0; - if (ret > MAX_PCI_SLOTS) { - dev_err(dev, "Too many GPIO clock requests!\n"); - return -EINVAL; - } + if (ret > MAX_PCI_SLOTS) + return dev_err_probe(dev, -EINVAL, + "Too many GPIO clock requests!\n"); pcie->n_gpio_clkreq = ret; @@ -420,17 +418,16 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, "unable to get a valid reset gpio\n"); } - if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) { - dev_err(dev, "Too many PCI slots!\n"); - return -EINVAL; - } + if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) + return dev_err_probe(dev, -EINVAL, + "Too many PCI slots!\n"); + pcie->num_slots++; ret = of_pci_get_devfn(child); - if (ret < 0) { - dev_err(dev, "failed to parse devfn: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, + "failed to parse devfn\n"); slot = PCI_SLOT(ret); @@ -452,7 +449,7 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *child, *node = dev->of_node; + struct device_node *node = dev->of_node; void __iomem *apb_base; int ret; @@ -477,17 +474,13 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, return ret; /* Parse OF children */ - for_each_available_child_of_node(node, child) { + for_each_available_child_of_node_scoped(node, child) { ret = kirin_pcie_parse_port(kirin_pcie, pdev, child); if (ret) - goto put_node; + return ret; } return 0; - -put_node: - of_node_put(child); - return ret; } static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, @@ -729,16 +722,9 @@ static int kirin_pcie_probe(struct platform_device *pdev) struct dw_pcie *pci; int ret; - if (!dev->of_node) { - dev_err(dev, "NULL node\n"); - return -EINVAL; - } - data = of_device_get_match_data(dev); - if (!data) { - dev_err(dev, "OF data missing\n"); - return -EINVAL; - } + if (!data) + return dev_err_probe(dev, -EINVAL, "OF data missing\n"); kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); if (!kirin_pcie) diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index c08f64d7a825..46b1c6d19974 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -48,7 +48,7 @@ #define PARF_DBI_BASE_ADDR_HI 0x354 #define PARF_SLV_ADDR_SPACE_SIZE 0x358 #define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c -#define PARF_NO_SNOOP_OVERIDE 0x3d4 +#define PARF_NO_SNOOP_OVERRIDE 0x3d4 #define PARF_ATU_BASE_ADDR 0x634 #define PARF_ATU_BASE_ADDR_HI 0x638 #define PARF_SRIS_MODE 0x644 @@ -89,9 +89,9 @@ #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2) #define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3) -/* PARF_NO_SNOOP_OVERIDE register fields */ -#define WR_NO_SNOOP_OVERIDE_EN BIT(1) -#define RD_NO_SNOOP_OVERIDE_EN BIT(3) +/* PARF_NO_SNOOP_OVERRIDE register fields */ +#define WR_NO_SNOOP_OVERRIDE_EN BIT(1) +#define RD_NO_SNOOP_OVERRIDE_EN BIT(3) /* PARF_DEVICE_TYPE register fields */ #define PARF_DEVICE_TYPE_EP 0x0 @@ -529,8 +529,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) writel_relaxed(val, pcie_ep->parf + PARF_LTSSM); if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop) - writel_relaxed(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, - pcie_ep->parf + PARF_NO_SNOOP_OVERIDE); + writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, + pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE); return 0; @@ -825,6 +825,10 @@ static const struct pci_epc_features qcom_pcie_epc_features = { .msi_capable = true, .msix_capable = false, .align = SZ_4K, + .bar[BAR_0] = { .only_64bit = true, }, + .bar[BAR_1] = { .type = BAR_RESERVED, }, + .bar[BAR_2] = { .only_64bit = true, }, + .bar[BAR_3] = { .type = BAR_RESERVED, }, }; static const struct pci_epc_features * @@ -933,6 +937,7 @@ static const struct of_device_id qcom_pcie_ep_match[] = { { .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0}, { .compatible = "qcom,sdx55-pcie-ep", }, { .compatible = "qcom,sm8450-pcie-ep", }, + { .compatible = "qcom,sar2130p-pcie-ep", }, { } }; MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index dc102d8bd58c..dc98ae63362d 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -61,7 +61,7 @@ #define PARF_DBI_BASE_ADDR_V2_HI 0x354 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c -#define PARF_NO_SNOOP_OVERIDE 0x3d4 +#define PARF_NO_SNOOP_OVERRIDE 0x3d4 #define PARF_ATU_BASE_ADDR 0x634 #define PARF_ATU_BASE_ADDR_HI 0x638 #define PARF_DEVICE_TYPE 0x1000 @@ -135,9 +135,9 @@ #define PARF_INT_ALL_LINK_UP BIT(13) #define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) -/* PARF_NO_SNOOP_OVERIDE register fields */ -#define WR_NO_SNOOP_OVERIDE_EN BIT(1) -#define RD_NO_SNOOP_OVERIDE_EN BIT(3) +/* PARF_NO_SNOOP_OVERRIDE register fields */ +#define WR_NO_SNOOP_OVERRIDE_EN BIT(1) +#define RD_NO_SNOOP_OVERRIDE_EN BIT(3) /* PARF_DEVICE_TYPE register fields */ #define DEVICE_TYPE_RC 0x4 @@ -1007,8 +1007,8 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg; if (pcie_cfg->override_no_snoop) - writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN, - pcie->parf + PARF_NO_SNOOP_OVERIDE); + writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN, + pcie->parf + PARF_NO_SNOOP_OVERRIDE); qcom_pcie_clear_aspm_l0s(pcie->pci); qcom_pcie_clear_hpc(pcie->pci); @@ -1569,6 +1569,8 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) pci_lock_rescan_remove(); pci_rescan_bus(pp->bridge->bus); pci_unlock_rescan_remove(); + + qcom_pcie_icc_opp_update(pcie); } else { dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n", status); @@ -1703,6 +1705,10 @@ static int qcom_pcie_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); + irq = platform_get_irq_byname_optional(pdev, "global"); + if (irq > 0) + pp->use_linkup_irq = true; + ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "cannot initialize host\n"); @@ -1716,7 +1722,6 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_host_deinit; } - irq = platform_get_irq_byname_optional(pdev, "global"); if (irq > 0) { ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qcom_pcie_global_irq_thread, diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index cf5f59a745b3..f441bfd6f96a 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -75,6 +75,8 @@ int pci_host_common_probe(struct platform_device *pdev) bridge->sysdata = cfg; bridge->ops = (struct pci_ops *)&ops->pci_ops; + bridge->enable_device = ops->enable_device; + bridge->disable_device = ops->disable_device; bridge->msi_domain = true; return pci_host_probe(bridge); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index cdd5be16021d..ac27bda5ba26 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1356,7 +1356,7 @@ static struct pci_ops hv_pcifront_ops = { * * If the PF driver wishes to initiate communication, it can "invalidate" one or * more of the first 64 blocks. This invalidation is delivered via a callback - * supplied by the VF driver by this driver. + * supplied to the VF driver by this driver. * * No protocol is implied, except that supplied by the PF and VF drivers. */ @@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void) spin_lock_irqsave(&multi_msi_cpu_lock, flags); - cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids, - false); + cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask); cpu = cpu_next; spin_unlock_irqrestore(&multi_msi_cpu_lock, flags); @@ -2053,6 +2052,7 @@ static struct irq_chip hv_msi_irq_chip = { .irq_set_affinity = irq_chip_set_affinity_parent, #ifdef CONFIG_X86 .irq_ack = irq_chip_ack_parent, + .flags = IRQCHIP_MOVE_DEFERRED, #elif defined(CONFIG_ARM64) .irq_eoi = irq_chip_eoi_parent, #endif diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 46d3afe1d308..b0e3bce10aa4 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1422,7 +1422,7 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) } /* - * devm_of_pci_get_host_bridge_resources() only sets up translateable resources, + * devm_of_pci_get_host_bridge_resources() only sets up translatable resources, * so we need extra resource setup parsing our special DT properties encoding * the MEM and IO apertures. */ @@ -1715,6 +1715,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = { { .compatible = "marvell,kirkwood-pcie", }, {}, }; +MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table); static const struct dev_pm_ops mvebu_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index b3cdbc5927de..d2f88997283a 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2106,47 +2106,39 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; - struct device_node *np = dev->of_node, *port; + struct device_node *np = dev->of_node; const struct tegra_pcie_soc *soc = pcie->soc; u32 lanes = 0, mask = 0; unsigned int lane = 0; int err; /* parse root ports */ - for_each_child_of_node(np, port) { + for_each_child_of_node_scoped(np, port) { struct tegra_pcie_port *rp; unsigned int index; u32 value; char *label; err = of_pci_get_devfn(port); - if (err < 0) { - dev_err(dev, "failed to parse address: %d\n", err); - goto err_node_put; - } + if (err < 0) + return dev_err_probe(dev, err, "failed to parse address\n"); index = PCI_SLOT(err); - if (index < 1 || index > soc->num_ports) { - dev_err(dev, "invalid port number: %d\n", index); - err = -EINVAL; - goto err_node_put; - } + if (index < 1 || index > soc->num_ports) + return dev_err_probe(dev, -EINVAL, + "invalid port number: %d\n", index); index--; err = of_property_read_u32(port, "nvidia,num-lanes", &value); - if (err < 0) { - dev_err(dev, "failed to parse # of lanes: %d\n", - err); - goto err_node_put; - } + if (err < 0) + return dev_err_probe(dev, err, + "failed to parse # of lanes\n"); - if (value > 16) { - dev_err(dev, "invalid # of lanes: %u\n", value); - err = -EINVAL; - goto err_node_put; - } + if (value > 16) + return dev_err_probe(dev, -EINVAL, + "invalid # of lanes: %u\n", value); lanes |= value << (index << 3); @@ -2159,16 +2151,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) lane += value; rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); - if (!rp) { - err = -ENOMEM; - goto err_node_put; - } + if (!rp) + return -ENOMEM; err = of_address_to_resource(port, 0, &rp->regs); - if (err < 0) { - dev_err(dev, "failed to parse address: %d\n", err); - goto err_node_put; - } + if (err < 0) + return dev_err_probe(dev, err, "failed to parse address\n"); INIT_LIST_HEAD(&rp->list); rp->index = index; @@ -2177,16 +2165,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) rp->np = port; rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); - if (IS_ERR(rp->base)) { - err = PTR_ERR(rp->base); - goto err_node_put; - } + if (IS_ERR(rp->base)) + return PTR_ERR(rp->base); label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index); - if (!label) { - err = -ENOMEM; - goto err_node_put; - } + if (!label) + return -ENOMEM; /* * Returns -ENOENT if reset-gpios property is not populated @@ -2199,34 +2183,26 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) GPIOD_OUT_LOW, label); if (IS_ERR(rp->reset_gpio)) { - if (PTR_ERR(rp->reset_gpio) == -ENOENT) { + if (PTR_ERR(rp->reset_gpio) == -ENOENT) rp->reset_gpio = NULL; - } else { - dev_err(dev, "failed to get reset GPIO: %ld\n", - PTR_ERR(rp->reset_gpio)); - err = PTR_ERR(rp->reset_gpio); - goto err_node_put; - } + else + return dev_err_probe(dev, PTR_ERR(rp->reset_gpio), + "failed to get reset GPIO\n"); } list_add_tail(&rp->list, &pcie->ports); } err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); - if (err < 0) { - dev_err(dev, "invalid lane configuration\n"); - return err; - } + if (err < 0) + return dev_err_probe(dev, err, + "invalid lane configuration\n"); err = tegra_pcie_get_regulators(pcie, mask); if (err < 0) return err; return 0; - -err_node_put: - of_node_put(port); - return err; } /* diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c index b5bd10a62adb..08161065a89c 100644 --- a/drivers/pci/controller/pci-thunder-ecam.c +++ b/drivers/pci/controller/pci-thunder-ecam.c @@ -204,7 +204,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, v = readl(addr); if (v & 0xff00) - pr_err("Bad MSIX cap header: %08x\n", v); + pr_err("Bad MSI-X cap header: %08x\n", v); v |= 0xbc00; /* next capability is EA at 0xbc */ set_val(v, where, size, val); return PCIBIOS_SUCCESSFUL; diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index 88c0977bc41a..7bce327897c9 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -154,7 +154,7 @@ static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain * the expected behaviour of .set_affinity for each MSI interrupt, the 16 * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs - * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another + * for each core). The MSI vector is moved from 1 MSI GIC IRQ to another * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a * consequence, the total MSI vectors that X-Gene v1 supports will be * reduced to 256 (2048/8) vectors. diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index eb55a7f8573a..70409e71a18f 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -6,6 +6,7 @@ * Description: Altera PCIe host controller driver */ +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> @@ -77,9 +78,25 @@ #define S10_TLP_FMTTYPE_CFGWR0 0x45 #define S10_TLP_FMTTYPE_CFGWR1 0x44 +#define AGLX_RP_CFG_ADDR(pcie, reg) (((pcie)->hip_base) + (reg)) +#define AGLX_RP_SECONDARY(pcie) \ + readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS)) + +#define AGLX_BDF_REG 0x00002004 +#define AGLX_ROOT_PORT_IRQ_STATUS 0x14c +#define AGLX_ROOT_PORT_IRQ_ENABLE 0x150 +#define CFG_AER BIT(4) + +#define AGLX_CFG_TARGET GENMASK(13, 12) +#define AGLX_CFG_TARGET_TYPE0 0 +#define AGLX_CFG_TARGET_TYPE1 1 +#define AGLX_CFG_TARGET_LOCAL_2000 2 +#define AGLX_CFG_TARGET_LOCAL_3000 3 + enum altera_pcie_version { ALTERA_PCIE_V1 = 0, ALTERA_PCIE_V2, + ALTERA_PCIE_V3, }; struct altera_pcie { @@ -102,6 +119,11 @@ struct altera_pcie_ops { int size, u32 *value); int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno, int where, int size, u32 value); + int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, u32 *value); + int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, u32 value); + void (*rp_isr)(struct irq_desc *desc); }; struct altera_pcie_data { @@ -112,6 +134,9 @@ struct altera_pcie_data { u32 cfgrd1; u32 cfgwr0; u32 cfgwr1; + u32 port_conf_offset; + u32 port_irq_status_offset; + u32 port_irq_enable_offset; }; struct tlp_rp_regpair_t { @@ -131,6 +156,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) return readl_relaxed(pcie->cra_base + reg); } +static inline void cra_writew(struct altera_pcie *pcie, const u32 value, + const u32 reg) +{ + writew_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg) +{ + return readw_relaxed(pcie->cra_base + reg); +} + +static inline void cra_writeb(struct altera_pcie *pcie, const u32 value, + const u32 reg) +{ + writeb_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg) +{ + return readb_relaxed(pcie->cra_base + reg); +} + static bool altera_pcie_link_up(struct altera_pcie *pcie) { return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); @@ -145,11 +192,20 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie) return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA); } +static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie) +{ + void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, + pcie->pcie_data->cap_offset + + PCI_EXP_LNKSTA); + + return (readw_relaxed(addr) & PCI_EXP_LNKSTA_DLLLA); +} + /* * Altera PCIe port uses BAR0 of RC's configuration space as the translation * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space * using these registers, so it can be reached by DMA from EP devices. - * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt + * This BAR0 will also access to MSI vector when receiving MSI/MSI-X interrupt * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge * should be hidden during enumeration to avoid the sizing and resource * allocation by PCIe core. @@ -425,6 +481,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno, return PCIBIOS_SUCCESSFUL; } +static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where, + int size, u32 *value) +{ + void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where); + + switch (size) { + case 1: + *value = readb_relaxed(addr); + break; + case 2: + *value = readw_relaxed(addr); + break; + default: + *value = readl_relaxed(addr); + break; + } + + /* Interrupt PIN not programmed in hardware, set to INTA. */ + if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value)) + *value = 0x01; + else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00)) + *value |= 0x0100; + + return PCIBIOS_SUCCESSFUL; +} + +static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno, + int where, int size, u32 value) +{ + void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where); + + switch (size) { + case 1: + writeb_relaxed(value, addr); + break; + case 2: + writew_relaxed(value, addr); + break; + default: + writel_relaxed(value, addr); + break; + } + + /* + * Monitor changes to PCI_PRIMARY_BUS register on Root Port + * and update local copy of root bus number accordingly. + */ + if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS) + pcie->root_bus_nr = value & 0xff; + + return PCIBIOS_SUCCESSFUL; +} + +static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, u32 value) +{ + cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG); + if (busno > AGLX_RP_SECONDARY(pcie)) + where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1); + + switch (size) { + case 1: + cra_writeb(pcie, value, where); + break; + case 2: + cra_writew(pcie, value, where); + break; + default: + cra_writel(pcie, value, where); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno, + unsigned int devfn, int where, int size, u32 *value) +{ + cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG); + if (busno > AGLX_RP_SECONDARY(pcie)) + where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1); + + switch (size) { + case 1: + *value = cra_readb(pcie, where); + break; + case 2: + *value = cra_readw(pcie, where); + break; + default: + *value = cra_readl(pcie, where); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, unsigned int devfn, int where, int size, u32 *value) @@ -437,6 +590,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, return pcie->pcie_data->ops->rp_read_cfg(pcie, where, size, value); + if (pcie->pcie_data->ops->ep_read_cfg) + return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn, + where, size, value); + switch (size) { case 1: byte_en = 1 << (where & 3); @@ -481,6 +638,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, return pcie->pcie_data->ops->rp_write_cfg(pcie, busno, where, size, value); + if (pcie->pcie_data->ops->ep_write_cfg) + return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn, + where, size, value); + switch (size) { case 1: data32 = (value & 0xff) << shift; @@ -659,7 +820,32 @@ static void altera_pcie_isr(struct irq_desc *desc) dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit); } } + chained_irq_exit(chip, desc); +} + +static void aglx_isr(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct altera_pcie *pcie; + struct device *dev; + u32 status; + int ret; + + chained_irq_enter(chip, desc); + pcie = irq_desc_get_handler_data(desc); + dev = &pcie->pdev->dev; + + status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset + + pcie->pcie_data->port_irq_status_offset); + if (status & CFG_AER) { + writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset + + pcie->pcie_data->port_irq_status_offset)); + + ret = generic_handle_domain_irq(pcie->irq_domain, 0); + if (ret) + dev_err_ratelimited(dev, "unexpected IRQ %d\n", pcie->irq); + } chained_irq_exit(chip, desc); } @@ -694,9 +880,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) if (IS_ERR(pcie->cra_base)) return PTR_ERR(pcie->cra_base); - if (pcie->pcie_data->version == ALTERA_PCIE_V2) { - pcie->hip_base = - devm_platform_ioremap_resource_byname(pdev, "Hip"); + if (pcie->pcie_data->version == ALTERA_PCIE_V2 || + pcie->pcie_data->version == ALTERA_PCIE_V3) { + pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip"); if (IS_ERR(pcie->hip_base)) return PTR_ERR(pcie->hip_base); } @@ -706,7 +892,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) if (pcie->irq < 0) return pcie->irq; - irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); + irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie); return 0; } @@ -719,6 +905,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = { .tlp_read_pkt = tlp_read_packet, .tlp_write_pkt = tlp_write_packet, .get_link_status = altera_pcie_link_up, + .rp_isr = altera_pcie_isr, }; static const struct altera_pcie_ops altera_pcie_ops_2_0 = { @@ -727,6 +914,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = { .get_link_status = s10_altera_pcie_link_up, .rp_read_cfg = s10_rp_read_cfg, .rp_write_cfg = s10_rp_write_cfg, + .rp_isr = altera_pcie_isr, +}; + +static const struct altera_pcie_ops altera_pcie_ops_3_0 = { + .rp_read_cfg = aglx_rp_read_cfg, + .rp_write_cfg = aglx_rp_write_cfg, + .get_link_status = aglx_altera_pcie_link_up, + .ep_read_cfg = aglx_ep_read_cfg, + .ep_write_cfg = aglx_ep_write_cfg, + .rp_isr = aglx_isr, }; static const struct altera_pcie_data altera_pcie_1_0_data = { @@ -749,11 +946,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = { .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1, }; +static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = { + .ops = &altera_pcie_ops_3_0, + .version = ALTERA_PCIE_V3, + .cap_offset = 0x70, + .port_conf_offset = 0x14000, + .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS, + .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE, +}; + +static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = { + .ops = &altera_pcie_ops_3_0, + .version = ALTERA_PCIE_V3, + .cap_offset = 0x70, + .port_conf_offset = 0x104000, + .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS, + .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE, +}; + +static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = { + .ops = &altera_pcie_ops_3_0, + .version = ALTERA_PCIE_V3, + .cap_offset = 0x70, + .port_conf_offset = 0x1300, + .port_irq_status_offset = 0x0, + .port_irq_enable_offset = 0x4, +}; + static const struct of_device_id altera_pcie_of_match[] = { {.compatible = "altr,pcie-root-port-1.0", .data = &altera_pcie_1_0_data }, {.compatible = "altr,pcie-root-port-2.0", .data = &altera_pcie_2_0_data }, + {.compatible = "altr,pcie-root-port-3.0-f-tile", + .data = &altera_pcie_3_0_f_tile_data }, + {.compatible = "altr,pcie-root-port-3.0-p-tile", + .data = &altera_pcie_3_0_p_tile_data }, + {.compatible = "altr,pcie-root-port-3.0-r-tile", + .data = &altera_pcie_3_0_r_tile_data }, {}, }; @@ -791,11 +1021,18 @@ static int altera_pcie_probe(struct platform_device *pdev) return ret; } - /* clear all interrupts */ - cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); - /* enable all interrupts */ - cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); - altera_pcie_host_init(pcie); + if (pcie->pcie_data->version == ALTERA_PCIE_V1 || + pcie->pcie_data->version == ALTERA_PCIE_V2) { + /* clear all interrupts */ + cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); + /* enable all interrupts */ + cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); + altera_pcie_host_init(pcie); + } else if (pcie->pcie_data->version == ALTERA_PCIE_V3) { + writel(CFG_AER, + pcie->hip_base + pcie->pcie_data->port_conf_offset + + pcie->pcie_data->port_irq_enable_offset); + } bridge->sysdata = pcie; bridge->busnr = pcie->root_bus_nr; diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index fefab2758a06..18e11b9a7f46 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -26,7 +26,6 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/msi.h> -#include <linux/notifier.h> #include <linux/of_irq.h> #include <linux/pci-ecam.h> @@ -667,12 +666,16 @@ static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev) return NULL; } -static int apple_pcie_add_device(struct apple_pcie_port *port, - struct pci_dev *pdev) +static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev) { u32 sid, rid = pci_dev_id(pdev); + struct apple_pcie_port *port; int idx, err; + port = apple_pcie_get_port(pdev); + if (!port) + return 0; + dev_dbg(&pdev->dev, "added to bus %s, index %d\n", pci_name(pdev->bus->self), port->idx); @@ -698,12 +701,16 @@ static int apple_pcie_add_device(struct apple_pcie_port *port, return idx >= 0 ? 0 : -ENOSPC; } -static void apple_pcie_release_device(struct apple_pcie_port *port, - struct pci_dev *pdev) +static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev) { + struct apple_pcie_port *port; u32 rid = pci_dev_id(pdev); int idx; + port = apple_pcie_get_port(pdev); + if (!port) + return; + mutex_lock(&port->pcie->lock); for_each_set_bit(idx, port->sid_map, port->sid_map_sz) { @@ -721,50 +728,10 @@ static void apple_pcie_release_device(struct apple_pcie_port *port, mutex_unlock(&port->pcie->lock); } -static int apple_pcie_bus_notifier(struct notifier_block *nb, - unsigned long action, - void *data) -{ - struct device *dev = data; - struct pci_dev *pdev = to_pci_dev(dev); - struct apple_pcie_port *port; - int err; - - /* - * This is a bit ugly. We assume that if we get notified for - * any PCI device, we must be in charge of it, and that there - * is no other PCI controller in the whole system. It probably - * holds for now, but who knows for how long? - */ - port = apple_pcie_get_port(pdev); - if (!port) - return NOTIFY_DONE; - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - err = apple_pcie_add_device(port, pdev); - if (err) - return notifier_from_errno(err); - break; - case BUS_NOTIFY_DEL_DEVICE: - apple_pcie_release_device(port, pdev); - break; - default: - return NOTIFY_DONE; - } - - return NOTIFY_OK; -} - -static struct notifier_block apple_pcie_nb = { - .notifier_call = apple_pcie_bus_notifier, -}; - static int apple_pcie_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; struct platform_device *platform = to_platform_device(dev); - struct device_node *of_port; struct apple_pcie *pcie; int ret; @@ -787,11 +754,10 @@ static int apple_pcie_init(struct pci_config_window *cfg) if (ret) return ret; - for_each_child_of_node(dev->of_node, of_port) { + for_each_child_of_node_scoped(dev->of_node, of_port) { ret = apple_pcie_setup_port(pcie, of_port); if (ret) { dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret); - of_node_put(of_port); return ret; } } @@ -799,23 +765,10 @@ static int apple_pcie_init(struct pci_config_window *cfg) return 0; } -static int apple_pcie_probe(struct platform_device *pdev) -{ - int ret; - - ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb); - if (ret) - return ret; - - ret = pci_host_common_probe(pdev); - if (ret) - bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb); - - return ret; -} - static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = { .init = apple_pcie_init, + .enable_device = apple_pcie_enable_device, + .disable_device = apple_pcie_disable_device, .pci_ops = { .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read, @@ -830,7 +783,7 @@ static const struct of_device_id apple_pcie_of_match[] = { MODULE_DEVICE_TABLE(of, apple_pcie_of_match); static struct platform_driver apple_pcie_driver = { - .probe = apple_pcie_probe, + .probe = pci_host_common_probe, .driver = { .name = "pcie-apple", .of_match_table = apple_pcie_of_match, diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e733a27dc8df..e19628e13898 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -40,7 +40,7 @@ /* Broadcom STB PCIe Register Offsets */ #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc -#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0 +#define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0 #define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff @@ -55,6 +55,10 @@ #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 +#define PCIE_RC_PL_PHY_CTL_15 0x184c +#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000 +#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff + #define PCIE_MISC_MISC_CTRL 0x4008 #define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80 #define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400 @@ -146,9 +150,6 @@ #define MSI_INT_MASK_SET 0x10 #define MSI_INT_MASK_CLR 0x14 -#define PCIE_EXT_CFG_DATA 0x8000 -#define PCIE_EXT_CFG_INDEX 0x9000 - #define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1 #define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0 @@ -174,8 +175,9 @@ #define MDIO_PORT0 0x0 #define MDIO_DATA_MASK 0x7fffffff #define MDIO_PORT_MASK 0xf0000 +#define MDIO_PORT_EXT_MASK 0x200000 #define MDIO_REGAD_MASK 0xffff -#define MDIO_CMD_MASK 0xfff00000 +#define MDIO_CMD_MASK 0x00100000 #define MDIO_CMD_READ 0x1 #define MDIO_CMD_WRITE 0x0 #define MDIO_DATA_DONE_MASK 0x80000000 @@ -191,11 +193,11 @@ #define SSC_STATUS_PLL_LOCK_MASK 0x800 #define PCIE_BRCM_MAX_MEMC 3 -#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX]) -#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA]) -#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1]) -#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG]) -#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE]) +#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX]) +#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA]) +#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1]) +#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG]) +#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE]) /* Rescal registers */ #define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700 @@ -234,13 +236,24 @@ struct inbound_win { u64 cpu_addr; }; +/* + * The RESCAL block is tied to PCIe controller #1, regardless of the number of + * controllers, and turning off PCIe controller #1 prevents access to the RESCAL + * register blocks, therefore no other controller can access this register + * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB), + * or a hang (AXI). + */ +#define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0) + struct pcie_cfg_data { const int *offsets; const enum pcie_soc_base soc_base; const bool has_phy; + const u32 quirks; u8 num_inbound_wins; int (*perst_set)(struct brcm_pcie *pcie, u32 val); int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); + int (*post_setup)(struct brcm_pcie *pcie); }; struct subdev_regulators { @@ -276,8 +289,6 @@ struct brcm_pcie { int gen; u64 msi_target_addr; struct brcm_msi *msi; - const int *reg_offsets; - enum pcie_soc_base soc_base; struct reset_control *rescal; struct reset_control *perst_reset; struct reset_control *bridge_reset; @@ -285,17 +296,14 @@ struct brcm_pcie { int num_memc; u64 memc_size[PCIE_BRCM_MAX_MEMC]; u32 hw_rev; - int (*perst_set)(struct brcm_pcie *pcie, u32 val); - int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); struct subdev_regulators *sr; bool ep_wakeup_capable; - bool has_phy; - u8 num_inbound_wins; + const struct pcie_cfg_data *cfg; }; static inline bool is_bmips(const struct brcm_pcie *pcie) { - return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425; + return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425; } /* @@ -309,8 +317,8 @@ static int brcm_pcie_encode_ibar_size(u64 size) if (log2_in >= 12 && log2_in <= 15) /* Covers 4KB to 32KB (inclusive) */ return (log2_in - 12) + 0x1c; - else if (log2_in >= 16 && log2_in <= 35) - /* Covers 64KB to 32GB, (inclusive) */ + else if (log2_in >= 16 && log2_in <= 36) + /* Covers 64KB to 64GB, (inclusive) */ return log2_in - 15; /* Something is awry so disable */ return 0; @@ -320,6 +328,7 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd) { u32 pkt = 0; + pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4); pkt |= FIELD_PREP(MDIO_PORT_MASK, port); pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad); pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd); @@ -403,12 +412,12 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) { u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); - u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); - lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; - writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS); + writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); - lnkctl2 = (lnkctl2 & ~0xf) | gen; + u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS); writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); } @@ -550,7 +559,7 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, return hwirq; for (i = 0; i < nr_irqs; i++) - irq_domain_set_info(domain, virq + i, hwirq + i, + irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i, &brcm_msi_bottom_irq_chip, domain->host_data, handle_edge_irq, NULL, NULL); return 0; @@ -717,8 +726,8 @@ static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus, /* For devices, write to the config space index register */ idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0); - writel(idx, pcie->base + PCIE_EXT_CFG_INDEX); - return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where); + writel(idx, base + IDX_ADDR(pcie)); + return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where); } static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus, @@ -821,6 +830,39 @@ static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val) return 0; } +static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie) +{ + static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030, + 0x5030, 0x0007 }; + static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e }; + int ret, i; + u32 tmp; + + /* Allow a 54MHz (xosc) refclk source */ + ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600); + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]); + if (ret < 0) + return ret; + } + + usleep_range(100, 200); + + /* + * Set L1SS sub-state timers to avoid lengthy state transitions, + * PM clock period is 18.52ns (1/54MHz, round down). + */ + tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15); + tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK; + tmp |= 0x12; + writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15); + + return 0; +} + static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size, u64 cpu_addr, u64 pci_offset) { @@ -855,7 +897,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, * security considerations, and is not implemented in our modern * SoCs. */ - if (pcie->soc_base != BCM7712) + if (pcie->cfg->soc_base != BCM7712) add_inbound_win(b++, &n, 0, 0, 0); resource_list_for_each_entry(entry, &bridge->dma_ranges) { @@ -872,10 +914,10 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, * That being said, each BARs size must still be a power of * two. */ - if (pcie->soc_base == BCM7712) + if (pcie->cfg->soc_base == BCM7712) add_inbound_win(b++, &n, size, cpu_start, pcie_start); - if (n > pcie->num_inbound_wins) + if (n > pcie->cfg->num_inbound_wins) break; } @@ -889,7 +931,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, * that enables multiple memory controllers. As such, it can return * now w/o doing special configuration. */ - if (pcie->soc_base == BCM7712) + if (pcie->cfg->soc_base == BCM7712) return n; ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1, @@ -1012,7 +1054,7 @@ static void set_inbound_win_registers(struct brcm_pcie *pcie, * 7712: * All of their BARs need to be set. */ - if (pcie->soc_base == BCM7712) { + if (pcie->cfg->soc_base == BCM7712) { /* BUS remap register settings */ reg_offset = brcm_ubus_reg_offset(i); tmp = lower_32_bits(cpu_addr) & ~0xfff; @@ -1036,15 +1078,15 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) int memc, ret; /* Reset the bridge */ - ret = pcie->bridge_sw_init_set(pcie, 1); + ret = pcie->cfg->bridge_sw_init_set(pcie, 1); if (ret) return ret; /* Ensure that PERST# is asserted; some bootloaders may deassert it. */ - if (pcie->soc_base == BCM2711) { - ret = pcie->perst_set(pcie, 1); + if (pcie->cfg->soc_base == BCM2711) { + ret = pcie->cfg->perst_set(pcie, 1); if (ret) { - pcie->bridge_sw_init_set(pcie, 0); + pcie->cfg->bridge_sw_init_set(pcie, 0); return ret; } } @@ -1052,7 +1094,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) usleep_range(100, 200); /* Take the bridge out of reset */ - ret = pcie->bridge_sw_init_set(pcie, 0); + ret = pcie->cfg->bridge_sw_init_set(pcie, 0); if (ret) return ret; @@ -1072,9 +1114,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) */ if (is_bmips(pcie)) burst = 0x1; /* 256 bytes */ - else if (pcie->soc_base == BCM2711) + else if (pcie->cfg->soc_base == BCM2711) burst = 0x0; /* 128 bytes */ - else if (pcie->soc_base == BCM7278) + else if (pcie->cfg->soc_base == BCM7278) burst = 0x3; /* 512 bytes */ else burst = 0x2; /* 512 bytes */ @@ -1180,10 +1222,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) /* PCIe->SCB endian mode for inbound window */ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); - u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, + u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN, PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); + if (pcie->cfg->post_setup) { + ret = pcie->cfg->post_setup(pcie); + if (ret < 0) + return ret; + } + return 0; } @@ -1199,7 +1247,7 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie) u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */ /* 7712 does not have this (RGR1) timer */ - if (pcie->soc_base == BCM7712) + if (pcie->cfg->soc_base == BCM7712) return; /* Each unit in timeout register is 1/216,000,000 seconds */ @@ -1276,8 +1324,12 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) bool ssc_good = false; int ret, i; + /* Limit the generation if specified */ + if (pcie->gen) + brcm_pcie_set_gen(pcie, pcie->gen); + /* Unassert the fundamental reset */ - ret = pcie->perst_set(pcie, 0); + ret = pcie->cfg->perst_set(pcie, 0); if (ret) return ret; @@ -1302,9 +1354,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) brcm_config_clkreq(pcie); - if (pcie->gen) - brcm_pcie_set_gen(pcie, pcie->gen); - if (pcie->ssc) { ret = brcm_pcie_set_ssc(pcie); if (ret == 0) @@ -1367,7 +1416,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus) ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); if (ret) { - dev_info(dev, "No regulators for downstream device\n"); + dev_info(dev, "Did not get regulators, err=%d\n", ret); + pcie->sr = NULL; goto no_regulators; } @@ -1390,7 +1440,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus) struct subdev_regulators *sr = pcie->sr; struct device *dev = &bus->dev; - if (!sr) + if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) return; if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) @@ -1463,12 +1513,12 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start) static inline int brcm_phy_start(struct brcm_pcie *pcie) { - return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0; + return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0; } static inline int brcm_phy_stop(struct brcm_pcie *pcie) { - return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0; + return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0; } static int brcm_pcie_turn_off(struct brcm_pcie *pcie) @@ -1479,7 +1529,7 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie) if (brcm_pcie_link_up(pcie)) brcm_pcie_enter_l23(pcie); /* Assert fundamental reset */ - ret = pcie->perst_set(pcie, 1); + ret = pcie->cfg->perst_set(pcie, 1); if (ret) return ret; @@ -1493,8 +1543,9 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie) u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); writel(tmp, base + HARD_DEBUG(pcie)); - /* Shutdown PCIe bridge */ - ret = pcie->bridge_sw_init_set(pcie, 1); + if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN)) + /* Shutdown PCIe bridge */ + ret = pcie->cfg->bridge_sw_init_set(pcie, 1); return ret; } @@ -1582,7 +1633,7 @@ static int brcm_pcie_resume_noirq(struct device *dev) goto err_reset; /* Take bridge out of reset so we can access the SERDES reg */ - pcie->bridge_sw_init_set(pcie, 0); + pcie->cfg->bridge_sw_init_set(pcie, 0); /* SERDES_IDDQ = 0 */ tmp = readl(base + HARD_DEBUG(pcie)); @@ -1660,7 +1711,7 @@ static void brcm_pcie_remove(struct platform_device *pdev) static const int pcie_offsets[] = { [RGR1_SW_INIT_1] = 0x9210, [EXT_CFG_INDEX] = 0x9000, - [EXT_CFG_DATA] = 0x9004, + [EXT_CFG_DATA] = 0x8000, [PCIE_HARD_DEBUG] = 0x4204, [PCIE_INTR2_CPU_BASE] = 0x4300, }; @@ -1668,7 +1719,7 @@ static const int pcie_offsets[] = { static const int pcie_offsets_bcm7278[] = { [RGR1_SW_INIT_1] = 0xc010, [EXT_CFG_INDEX] = 0x9000, - [EXT_CFG_DATA] = 0x9004, + [EXT_CFG_DATA] = 0x8000, [PCIE_HARD_DEBUG] = 0x4204, [PCIE_INTR2_CPU_BASE] = 0x4300, }; @@ -1682,8 +1733,9 @@ static const int pcie_offsets_bcm7425[] = { }; static const int pcie_offsets_bcm7712[] = { + [RGR1_SW_INIT_1] = 0x9210, [EXT_CFG_INDEX] = 0x9000, - [EXT_CFG_DATA] = 0x9004, + [EXT_CFG_DATA] = 0x8000, [PCIE_HARD_DEBUG] = 0x4304, [PCIE_INTR2_CPU_BASE] = 0x4400, }; @@ -1704,6 +1756,16 @@ static const struct pcie_cfg_data bcm2711_cfg = { .num_inbound_wins = 3, }; +static const struct pcie_cfg_data bcm2712_cfg = { + .offsets = pcie_offsets_bcm7712, + .soc_base = BCM7712, + .perst_set = brcm_pcie_perst_set_7278, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, + .post_setup = brcm_pcie_post_setup_bcm2712, + .quirks = CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN, + .num_inbound_wins = 10, +}; + static const struct pcie_cfg_data bcm4908_cfg = { .offsets = pcie_offsets, .soc_base = BCM4908, @@ -1755,6 +1817,7 @@ static const struct pcie_cfg_data bcm7712_cfg = { static const struct of_device_id brcm_pcie_match[] = { { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg }, + { .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg }, { .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg }, { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg }, { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg }, @@ -1784,7 +1847,7 @@ static struct pci_ops brcm7425_pcie_ops = { static int brcm_pcie_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node, *msi_np; + struct device_node *np = pdev->dev.of_node; struct pci_host_bridge *bridge; const struct pcie_cfg_data *data; struct brcm_pcie *pcie; @@ -1803,12 +1866,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->dev = &pdev->dev; pcie->np = np; - pcie->reg_offsets = data->offsets; - pcie->soc_base = data->soc_base; - pcie->perst_set = data->perst_set; - pcie->bridge_sw_init_set = data->bridge_sw_init_set; - pcie->has_phy = data->has_phy; - pcie->num_inbound_wins = data->num_inbound_wins; + pcie->cfg = data; pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) @@ -1843,7 +1901,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) if (ret) return dev_err_probe(&pdev->dev, ret, "could not enable clock\n"); - pcie->bridge_sw_init_set(pcie, 0); + pcie->cfg->bridge_sw_init_set(pcie, 0); if (pcie->swinit_reset) { ret = reset_control_assert(pcie->swinit_reset); @@ -1882,22 +1940,29 @@ static int brcm_pcie_probe(struct platform_device *pdev) goto fail; pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); - if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { + if (pcie->cfg->soc_base == BCM4908 && + pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n"); ret = -ENODEV; goto fail; } - msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); - if (pci_msi_enabled() && msi_np == pcie->np) { - ret = brcm_pcie_enable_msi(pcie); + if (pci_msi_enabled()) { + struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); + + if (msi_np == pcie->np) + ret = brcm_pcie_enable_msi(pcie); + + of_node_put(msi_np); + if (ret) { dev_err(pcie->dev, "probe of internal MSI failed"); goto fail; } } - bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops; + bridge->ops = pcie->cfg->soc_base == BCM7425 ? + &brcm7425_pcie_ops : &brcm_pcie_ops; bridge->sysdata = pcie; platform_set_drvdata(pdev, pcie); @@ -1940,3 +2005,4 @@ module_platform_driver(brcm_pcie_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom STB PCIe RC driver"); MODULE_AUTHOR("Broadcom"); +MODULE_SOFTDEP("pre: irq_bcm2712_mip"); diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index be52e3a123ab..9d52504acae4 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -15,6 +15,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_device.h> @@ -24,6 +25,7 @@ #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/regmap.h> #include <linux/reset.h> #include "../pci.h" @@ -125,6 +127,8 @@ #define MAX_NUM_PHY_RESETS 3 +#define PCIE_MTK_RESET_TIME_US 10 + /* Time in ms needed to complete PCIe reset on EN7581 SoC */ #define PCIE_EN7581_RESET_TIME_MS 100 @@ -133,10 +137,18 @@ struct mtk_gen3_pcie; #define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0) #define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0) +enum mtk_gen3_pcie_flags { + SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device + * probing or suspend/resume phase to + * avoid hw bugs/issues. + */ +}; + /** * struct mtk_gen3_pcie_pdata - differentiate between host generations * @power_up: pcie power_up callback * @phy_resets: phy reset lines SoC data. + * @flags: pcie device flags. */ struct mtk_gen3_pcie_pdata { int (*power_up)(struct mtk_gen3_pcie *pcie); @@ -144,6 +156,7 @@ struct mtk_gen3_pcie_pdata { const char *id[MAX_NUM_PHY_RESETS]; int num_resets; } phy_resets; + u32 flags; }; /** @@ -341,7 +354,8 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", range_type, *num, (unsigned long long)cpu_addr, - (unsigned long long)pci_addr, (unsigned long long)table_size); + (unsigned long long)pci_addr, + (unsigned long long)table_size); cpu_addr += table_size; pci_addr += table_size; @@ -438,22 +452,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) val |= PCIE_DISABLE_DVFSRC_VLT_REQ; writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG); - /* Assert all reset signals */ - val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); - val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB; - writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); - /* - * Described in PCIe CEM specification sections 2.2 (PERST# Signal) - * and 2.2.1 (Initial Power-Up (G3 to S0)). - * The deassertion of PERST# should be delayed 100ms (TPVPERL) - * for the power and clock to become stable. + * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal + * causing occasional PCIe link down. In order to overcome the issue, + * PCIE_RSTB signals are not asserted/released at this stage and the + * PCIe block is reset using en7523_reset_assert() and + * en7581_pci_enable(). */ - msleep(100); - - /* De-assert reset signals */ - val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB); - writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); + if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { + /* Assert all reset signals */ + val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); + val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | + PCIE_PE_RSTB; + writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); + + /* + * Described in PCIe CEM specification revision 6.0. + * + * The deassertion of PERST# should be delayed 100ms (TPVPERL) + * for the power and clock to become stable. + */ + msleep(PCIE_T_PVPERL_MS); + + /* De-assert reset signals */ + val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | + PCIE_PE_RSTB); + writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); + } /* Check if the link is up or not */ err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, @@ -865,7 +890,8 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) for (i = 0; i < num_resets; i++) pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i]; - ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets); + ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, + pcie->phy_resets); if (ret) { dev_err(dev, "failed to get PHY bulk reset\n"); return ret; @@ -895,29 +921,62 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) return pcie->num_clks; } - ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes); - if (ret == 0) { - if (num_lanes == 0 || num_lanes > 16 || (num_lanes != 1 && num_lanes % 2)) + ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes); + if (ret == 0) { + if (num_lanes == 0 || num_lanes > 16 || + (num_lanes != 1 && num_lanes % 2)) dev_warn(dev, "invalid num-lanes, using controller defaults\n"); - else + else pcie->num_lanes = num_lanes; - } + } return 0; } static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) { + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct device *dev = pcie->dev; + struct resource_entry *entry; + struct regmap *pbus_regmap; + u32 val, args[2], size; + resource_size_t addr; int err; - u32 val; /* - * Wait for the time needed to complete the bulk assert in - * mtk_pcie_setup for EN7581 SoC. + * The controller may have been left out of reset by the bootloader + * so make sure that we get a clean start by asserting resets here. + */ + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, + pcie->phy_resets); + + /* Wait for the time needed to complete the reset lines assert. */ + msleep(PCIE_EN7581_RESET_TIME_MS); + + /* + * Configure PBus base address and base address mask to allow the + * hw to detect if a given address is accessible on PCIe controller. */ - mdelay(PCIE_EN7581_RESET_TIME_MS); + pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, + "mediatek,pbus-csr", + ARRAY_SIZE(args), + args); + if (IS_ERR(pbus_regmap)) + return PTR_ERR(pbus_regmap); + + entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); + if (!entry) + return -ENODEV; + + addr = entry->res->start - entry->offset; + regmap_write(pbus_regmap, args[0], lower_32_bits(addr)); + size = lower_32_bits(resource_size(entry->res)); + regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size))); + /* + * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581 + * requires PHY initialization and power-on before PHY reset deassert. + */ err = phy_init(pcie->phy); if (err) { dev_err(dev, "failed to initialize PHY\n"); @@ -930,7 +989,8 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) goto err_phy_on; } - err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); + err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, + pcie->phy_resets); if (err) { dev_err(dev, "failed to deassert PHYs\n"); goto err_phy_deassert; @@ -940,17 +1000,11 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) * Wait for the time needed to complete the bulk de-assert above. * This time is specific for EN7581 SoC. */ - mdelay(PCIE_EN7581_RESET_TIME_MS); + msleep(PCIE_EN7581_RESET_TIME_MS); pm_runtime_enable(dev); pm_runtime_get_sync(dev); - err = clk_bulk_prepare(pcie->num_clks, pcie->clks); - if (err) { - dev_err(dev, "failed to prepare clock\n"); - goto err_clk_prepare; - } - val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) | FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) | FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) | @@ -963,20 +1017,26 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf); writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG); - err = clk_bulk_enable(pcie->num_clks, pcie->clks); + err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); if (err) { dev_err(dev, "failed to prepare clock\n"); - goto err_clk_enable; + goto err_clk_prepare_enable; } + /* + * Airoha EN7581 performs PCIe reset via clk callbacks since it has a + * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to + * complete the PCIe reset. + */ + msleep(PCIE_T_PVPERL_MS); + return 0; -err_clk_enable: - clk_bulk_unprepare(pcie->num_clks, pcie->clks); -err_clk_prepare: +err_clk_prepare_enable: pm_runtime_put_sync(dev); pm_runtime_disable(dev); - reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, + pcie->phy_resets); err_phy_deassert: phy_power_off(pcie->phy); err_phy_on: @@ -990,8 +1050,18 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) struct device *dev = pcie->dev; int err; + /* + * The controller may have been left out of reset by the bootloader + * so make sure that we get a clean start by asserting resets here. + */ + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, + pcie->phy_resets); + reset_control_assert(pcie->mac_reset); + usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US); + /* PHY power on and enable pipe clock */ - err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); + err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, + pcie->phy_resets); if (err) { dev_err(dev, "failed to deassert PHYs\n"); return err; @@ -1031,7 +1101,8 @@ err_clk_init: err_phy_on: phy_exit(pcie->phy); err_phy_init: - reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, + pcie->phy_resets); return err; } @@ -1046,7 +1117,8 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) phy_power_off(pcie->phy); phy_exit(pcie->phy); - reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); + reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, + pcie->phy_resets); } static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie) @@ -1073,15 +1145,8 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) * Deassert the line in order to avoid unbalance in deassert_count * counter since the bulk is shared. */ - reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); - /* - * The controller may have been left out of reset by the bootloader - * so make sure that we get a clean start by asserting resets here. - */ - reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); - - reset_control_assert(pcie->mac_reset); - usleep_range(10, 20); + reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, + pcie->phy_resets); /* Don't touch the hardware registers before power up */ err = pcie->soc->power_up(pcie); @@ -1231,10 +1296,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev) return err; } - /* Pull down the PERST# pin */ - val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); - val |= PCIE_PE_RSTB; - writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); + if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { + /* Assert the PERST# pin */ + val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); + val |= PCIE_PE_RSTB; + writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); + } dev_dbg(pcie->dev, "entered L2 states successfully"); @@ -1285,6 +1352,7 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = { .id[2] = "phy-lane2", .num_resets = 3, }, + .flags = SKIP_PCIE_RSTB, }; static const struct of_device_id mtk_pcie_of_match[] = { @@ -1301,6 +1369,7 @@ static struct platform_driver mtk_pcie_driver = { .name = "mtk-pcie-gen3", .of_match_table = mtk_pcie_of_match, .pm = &mtk_pcie_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 3bcfc4e58ba2..811a8b4acd50 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1041,24 +1041,22 @@ err_free_ck: static int mtk_pcie_setup(struct mtk_pcie *pcie) { struct device *dev = pcie->dev; - struct device_node *node = dev->of_node, *child; + struct device_node *node = dev->of_node; struct mtk_pcie_port *port, *tmp; int err, slot; slot = of_get_pci_domain_nr(dev->of_node); if (slot < 0) { - for_each_available_child_of_node(node, child) { + for_each_available_child_of_node_scoped(node, child) { err = of_pci_get_devfn(child); - if (err < 0) { - dev_err(dev, "failed to get devfn: %d\n", err); - goto error_put_node; - } + if (err < 0) + return dev_err_probe(dev, err, "failed to get devfn\n"); slot = PCI_SLOT(err); err = mtk_pcie_parse_port(pcie, child, slot); if (err) - goto error_put_node; + return err; } } else { err = mtk_pcie_parse_port(pcie, node, slot); @@ -1079,9 +1077,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) mtk_pcie_subsys_powerdown(pcie); return 0; -error_put_node: - of_node_put(child); - return err; } static int mtk_pcie_probe(struct platform_device *pdev) diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c index 776caa0b1011..01ead2f92e87 100644 --- a/drivers/pci/controller/pcie-mt7621.c +++ b/drivers/pci/controller/pcie-mt7621.c @@ -258,30 +258,25 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); - struct device_node *node = dev->of_node, *child; + struct device_node *node = dev->of_node; int err; pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); - for_each_available_child_of_node(node, child) { + for_each_available_child_of_node_scoped(node, child) { int slot; err = of_pci_get_devfn(child); - if (err < 0) { - of_node_put(child); - dev_err(dev, "failed to parse devfn: %d\n", err); - return err; - } + if (err < 0) + return dev_err_probe(dev, err, "failed to parse devfn\n"); slot = PCI_SLOT(err); err = mt7621_pcie_parse_port(pcie, child, slot); - if (err) { - of_node_put(child); + if (err) return err; - } } return 0; diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c index 047e2cef5afc..c5e0d025bc43 100644 --- a/drivers/pci/controller/pcie-rcar-ep.c +++ b/drivers/pci/controller/pcie-rcar-ep.c @@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep, } if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), - outbound_name)) { + res->name)) { dev_err(pcie->dev, "Cannot request memory region %s.\n", outbound_name); return -EIO; diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 7c92eada04af..c32b803a47c7 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -178,8 +178,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, * space, it's generally only accessible when in endpoint mode. * When in root complex mode, the controller is unable to target * itself with either type 0 or type 1 accesses, and indeed, any - * controller initiated target transfer to its own config space - * result in a completer abort. + * controller-initiated target transfer to its own config space + * results in a completer abort. * * Each channel effectively only supports a single device, but as * the same channel <-> device access works for any PCI_SLOT() @@ -775,7 +775,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) if (err) return err; - /* Two irqs are for MSI, but they are also used for non-MSI irqs */ + /* Two IRQs are for MSI, but they are also used for non-MSI IRQs */ err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, IRQF_SHARED | IRQF_NO_THREAD, rcar_msi_bottom_chip.name, host); @@ -792,7 +792,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) goto err; } - /* disable all MSIs */ + /* Disable all MSIs */ rcar_pci_write_reg(pcie, 0, PCIEMSIIER); /* @@ -892,6 +892,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, dev_err(pcie->dev, "Failed to map inbound regions!\n"); return -EINVAL; } + /* * If the size of the range is larger than the alignment of * the start address, we have to use multiple entries to @@ -903,6 +904,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, size = min(size, alignment); } + /* Hardware supports max 4GiB inbound region */ size = min(size, 1ULL << 32); diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 1064b7b06cef..85ea36df2f59 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -40,6 +40,10 @@ * @irq_pci_fn: the latest PCI function that has updated the mapping of * the MSI/INTX IRQ dedicated outbound region. * @irq_pending: bitmask of asserted INTX IRQs. + * @perst_irq: IRQ used for the PERST# signal. + * @perst_asserted: True if the PERST# signal was asserted. + * @link_up: True if the PCI link is up. + * @link_training: Work item to execute PCI link training. */ struct rockchip_pcie_ep { struct rockchip_pcie rockchip; @@ -784,6 +788,7 @@ static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep) SZ_1M); if (!ep->irq_cpu_addr) { dev_err(dev, "failed to reserve memory space for MSI\n"); + err = -ENOMEM; goto err_epc_mem_exit; } diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 5adac6adc046..6a46be17aa91 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -367,7 +367,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) } } - rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, + rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP, PCIE_CORE_CONFIG_VENDOR); rockchip_pcie_write(rockchip, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c index b9ade7632e11..0f88da378805 100644 --- a/drivers/pci/controller/pcie-rockchip.c +++ b/drivers/pci/controller/pcie-rockchip.c @@ -30,7 +30,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) struct platform_device *pdev = to_platform_device(dev); struct device_node *node = dev->of_node; struct resource *regs; - int err; + int err, i; if (rockchip->is_rc) { regs = platform_get_resource_byname(pdev, @@ -69,55 +69,23 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) if (rockchip->link_gen < 0 || rockchip->link_gen > 2) rockchip->link_gen = 2; - rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core"); - if (IS_ERR(rockchip->core_rst)) { - if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) - dev_err(dev, "missing core reset property in node\n"); - return PTR_ERR(rockchip->core_rst); - } - - rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt"); - if (IS_ERR(rockchip->mgmt_rst)) { - if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) - dev_err(dev, "missing mgmt reset property in node\n"); - return PTR_ERR(rockchip->mgmt_rst); - } - - rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev, - "mgmt-sticky"); - if (IS_ERR(rockchip->mgmt_sticky_rst)) { - if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) - dev_err(dev, "missing mgmt-sticky reset property in node\n"); - return PTR_ERR(rockchip->mgmt_sticky_rst); - } + for (i = 0; i < ROCKCHIP_NUM_PM_RSTS; i++) + rockchip->pm_rsts[i].id = rockchip_pci_pm_rsts[i]; - rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe"); - if (IS_ERR(rockchip->pipe_rst)) { - if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pipe reset property in node\n"); - return PTR_ERR(rockchip->pipe_rst); - } - - rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm"); - if (IS_ERR(rockchip->pm_rst)) { - if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pm reset property in node\n"); - return PTR_ERR(rockchip->pm_rst); - } + err = devm_reset_control_bulk_get_exclusive(dev, + ROCKCHIP_NUM_PM_RSTS, + rockchip->pm_rsts); + if (err) + return dev_err_probe(dev, err, "Cannot get the PM reset\n"); - rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk"); - if (IS_ERR(rockchip->pclk_rst)) { - if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) - dev_err(dev, "missing pclk reset property in node\n"); - return PTR_ERR(rockchip->pclk_rst); - } + for (i = 0; i < ROCKCHIP_NUM_CORE_RSTS; i++) + rockchip->core_rsts[i].id = rockchip_pci_core_rsts[i]; - rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk"); - if (IS_ERR(rockchip->aclk_rst)) { - if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) - dev_err(dev, "missing aclk reset property in node\n"); - return PTR_ERR(rockchip->aclk_rst); - } + err = devm_reset_control_bulk_get_exclusive(dev, + ROCKCHIP_NUM_CORE_RSTS, + rockchip->core_rsts); + if (err) + return dev_err_probe(dev, err, "Cannot get the Core resets\n"); if (rockchip->is_rc) rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep", @@ -129,29 +97,10 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio), "failed to get PERST# GPIO\n"); - rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); - if (IS_ERR(rockchip->aclk_pcie)) { - dev_err(dev, "aclk clock not found\n"); - return PTR_ERR(rockchip->aclk_pcie); - } - - rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf"); - if (IS_ERR(rockchip->aclk_perf_pcie)) { - dev_err(dev, "aclk_perf clock not found\n"); - return PTR_ERR(rockchip->aclk_perf_pcie); - } - - rockchip->hclk_pcie = devm_clk_get(dev, "hclk"); - if (IS_ERR(rockchip->hclk_pcie)) { - dev_err(dev, "hclk clock not found\n"); - return PTR_ERR(rockchip->hclk_pcie); - } - - rockchip->clk_pcie_pm = devm_clk_get(dev, "pm"); - if (IS_ERR(rockchip->clk_pcie_pm)) { - dev_err(dev, "pm clock not found\n"); - return PTR_ERR(rockchip->clk_pcie_pm); - } + rockchip->num_clks = devm_clk_bulk_get_all(dev, &rockchip->clks); + if (rockchip->num_clks < 0) + return dev_err_probe(dev, rockchip->num_clks, + "failed to get clocks\n"); return 0; } @@ -169,23 +118,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) int err, i; u32 regs; - err = reset_control_assert(rockchip->aclk_rst); - if (err) { - dev_err(dev, "assert aclk_rst err %d\n", err); - return err; - } - - err = reset_control_assert(rockchip->pclk_rst); - if (err) { - dev_err(dev, "assert pclk_rst err %d\n", err); - return err; - } - - err = reset_control_assert(rockchip->pm_rst); - if (err) { - dev_err(dev, "assert pm_rst err %d\n", err); - return err; - } + err = reset_control_bulk_assert(ROCKCHIP_NUM_PM_RSTS, + rockchip->pm_rsts); + if (err) + return dev_err_probe(dev, err, "Couldn't assert PM resets\n"); for (i = 0; i < MAX_LANE_NUM; i++) { err = phy_init(rockchip->phys[i]); @@ -195,47 +131,19 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) } } - err = reset_control_assert(rockchip->core_rst); - if (err) { - dev_err(dev, "assert core_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->mgmt_rst); - if (err) { - dev_err(dev, "assert mgmt_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->mgmt_sticky_rst); - if (err) { - dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_assert(rockchip->pipe_rst); + err = reset_control_bulk_assert(ROCKCHIP_NUM_CORE_RSTS, + rockchip->core_rsts); if (err) { - dev_err(dev, "assert pipe_rst err %d\n", err); + dev_err_probe(dev, err, "Couldn't assert Core resets\n"); goto err_exit_phy; } udelay(10); - err = reset_control_deassert(rockchip->pm_rst); - if (err) { - dev_err(dev, "deassert pm_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_deassert(rockchip->aclk_rst); - if (err) { - dev_err(dev, "deassert aclk_rst err %d\n", err); - goto err_exit_phy; - } - - err = reset_control_deassert(rockchip->pclk_rst); + err = reset_control_bulk_deassert(ROCKCHIP_NUM_PM_RSTS, + rockchip->pm_rsts); if (err) { - dev_err(dev, "deassert pclk_rst err %d\n", err); + dev_err(dev, "Couldn't deassert PM resets %d\n", err); goto err_exit_phy; } @@ -275,31 +183,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) goto err_power_off_phy; } - /* - * Please don't reorder the deassert sequence of the following - * four reset pins. - */ - err = reset_control_deassert(rockchip->mgmt_sticky_rst); - if (err) { - dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->core_rst); - if (err) { - dev_err(dev, "deassert core_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->mgmt_rst); - if (err) { - dev_err(dev, "deassert mgmt_rst err %d\n", err); - goto err_power_off_phy; - } - - err = reset_control_deassert(rockchip->pipe_rst); + err = reset_control_bulk_deassert(ROCKCHIP_NUM_CORE_RSTS, + rockchip->core_rsts); if (err) { - dev_err(dev, "deassert pipe_rst err %d\n", err); + dev_err(dev, "Couldn't deassert Core reset %d\n", err); goto err_power_off_phy; } @@ -375,50 +262,18 @@ int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) struct device *dev = rockchip->dev; int err; - err = clk_prepare_enable(rockchip->aclk_pcie); - if (err) { - dev_err(dev, "unable to enable aclk_pcie clock\n"); - return err; - } - - err = clk_prepare_enable(rockchip->aclk_perf_pcie); - if (err) { - dev_err(dev, "unable to enable aclk_perf_pcie clock\n"); - goto err_aclk_perf_pcie; - } - - err = clk_prepare_enable(rockchip->hclk_pcie); - if (err) { - dev_err(dev, "unable to enable hclk_pcie clock\n"); - goto err_hclk_pcie; - } - - err = clk_prepare_enable(rockchip->clk_pcie_pm); - if (err) { - dev_err(dev, "unable to enable clk_pcie_pm clock\n"); - goto err_clk_pcie_pm; - } + err = clk_bulk_prepare_enable(rockchip->num_clks, rockchip->clks); + if (err) + return dev_err_probe(dev, err, "failed to enable clocks\n"); return 0; - -err_clk_pcie_pm: - clk_disable_unprepare(rockchip->hclk_pcie); -err_hclk_pcie: - clk_disable_unprepare(rockchip->aclk_perf_pcie); -err_aclk_perf_pcie: - clk_disable_unprepare(rockchip->aclk_pcie); - return err; } EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks); -void rockchip_pcie_disable_clocks(void *data) +void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip) { - struct rockchip_pcie *rockchip = data; - clk_disable_unprepare(rockchip->clk_pcie_pm); - clk_disable_unprepare(rockchip->hclk_pcie); - clk_disable_unprepare(rockchip->aclk_perf_pcie); - clk_disable_unprepare(rockchip->aclk_pcie); + clk_bulk_disable_unprepare(rockchip->num_clks, rockchip->clks); } EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks); diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index a51b087ce878..14954f43e5e9 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -11,9 +11,11 @@ #ifndef _PCIE_ROCKCHIP_H #define _PCIE_ROCKCHIP_H +#include <linux/clk.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci-ecam.h> +#include <linux/reset.h> /* * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16 @@ -198,7 +200,6 @@ #define AXI_WRAPPER_NOR_MSG 0xc #define PCIE_RC_SEND_PME_OFF 0x11960 -#define ROCKCHIP_VENDOR_ID 0x1d87 #define PCIE_LINK_IS_L2(x) \ (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2) #define PCIE_LINK_TRAINING_DONE(x) \ @@ -309,22 +310,31 @@ (((c) << ((b) * 8 + 5)) & \ ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) +#define ROCKCHIP_NUM_PM_RSTS ARRAY_SIZE(rockchip_pci_pm_rsts) +#define ROCKCHIP_NUM_CORE_RSTS ARRAY_SIZE(rockchip_pci_core_rsts) + +static const char * const rockchip_pci_pm_rsts[] = { + "pm", + "pclk", + "aclk", +}; + +static const char * const rockchip_pci_core_rsts[] = { + "mgmt-sticky", + "core", + "mgmt", + "pipe", +}; + struct rockchip_pcie { void __iomem *reg_base; /* DT axi-base */ void __iomem *apb_base; /* DT apb-base */ bool legacy_phy; struct phy *phys[MAX_LANE_NUM]; - struct reset_control *core_rst; - struct reset_control *mgmt_rst; - struct reset_control *mgmt_sticky_rst; - struct reset_control *pipe_rst; - struct reset_control *pm_rst; - struct reset_control *aclk_rst; - struct reset_control *pclk_rst; - struct clk *aclk_pcie; - struct clk *aclk_perf_pcie; - struct clk *hclk_pcie; - struct clk *clk_pcie_pm; + struct reset_control_bulk_data pm_rsts[ROCKCHIP_NUM_PM_RSTS]; + struct reset_control_bulk_data core_rsts[ROCKCHIP_NUM_CORE_RSTS]; + struct clk_bulk_data *clks; + int num_clks; struct regulator *vpcie12v; /* 12V power supply */ struct regulator *vpcie3v3; /* 3.3V power supply */ struct regulator *vpcie1v8; /* 1.8V power supply */ @@ -358,7 +368,7 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip); int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip); void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip); int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip); -void rockchip_pcie_disable_clocks(void *data); +void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip); void rockchip_pcie_cfg_configuration_accesses( struct rockchip_pcie *rockchip, u32 type); diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index a0f5e1d67b04..13ca493d22bd 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -30,11 +30,14 @@ #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340 #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 -#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1) +#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1) +#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2) -#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0 -#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8 -#define XILINX_CPM_PCIE_IR_LOCAL BIT(0) +#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0 +#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4 +#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8 +#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC +#define XILINX_CPM_PCIE_IR_LOCAL BIT(0) #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x) @@ -80,14 +83,22 @@ enum xilinx_cpm_version { CPM, CPM5, + CPM5_HOST1, + CPM5NC_HOST, }; /** * struct xilinx_cpm_variant - CPM variant information * @version: CPM version + * @ir_status: Offset for the error interrupt status register + * @ir_enable: Offset for the CPM5 local error interrupt enable register + * @ir_misc_value: A bitmask for the miscellaneous interrupt status */ struct xilinx_cpm_variant { enum xilinx_cpm_version version; + u32 ir_status; + u32 ir_enable; + u32 ir_misc_value; }; /** @@ -269,6 +280,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) { struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); + const struct xilinx_cpm_variant *variant = port->variant; unsigned long val; int i; @@ -279,11 +291,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) generic_handle_domain_irq(port->cpm_domain, i); pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); - if (port->variant->version == CPM5) { - val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS); + if (variant->ir_status) { + val = readl_relaxed(port->cpm_base + variant->ir_status); if (val) writel_relaxed(val, port->cpm_base + - XILINX_CPM_PCIE_IR_STATUS); + variant->ir_status); } /* @@ -465,6 +477,11 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port) */ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) { + const struct xilinx_cpm_variant *variant = port->variant; + + if (variant->version == CPM5NC_HOST) + return; + if (cpm_pcie_link_up(port)) dev_info(port->dev, "PCIe Link is UP\n"); else @@ -483,15 +500,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to * CPM SLCR block. */ - writel(XILINX_CPM_PCIE_MISC_IR_LOCAL, + writel(variant->ir_misc_value, port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); - if (port->variant->version == CPM5) { + if (variant->ir_enable) { writel(XILINX_CPM_PCIE_IR_LOCAL, - port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE); + port->cpm_base + variant->ir_enable); } - /* Enable the Bridge enable bit */ + /* Set Bridge enable bit */ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | XILINX_CPM_PCIE_REG_RPSC_BEN, XILINX_CPM_PCIE_REG_RPSC); @@ -525,7 +542,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port, if (IS_ERR(port->cfg)) return PTR_ERR(port->cfg); - if (port->variant->version == CPM5) { + if (port->variant->version == CPM5 || + port->variant->version == CPM5_HOST1) { port->reg_base = devm_platform_ioremap_resource_byname(pdev, "cpm_csr"); if (IS_ERR(port->reg_base)) @@ -565,28 +583,34 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) port->dev = dev; - err = xilinx_cpm_pcie_init_irq_domain(port); - if (err) - return err; + port->variant = of_device_get_match_data(dev); - bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); - if (!bus) - return -ENODEV; + if (port->variant->version != CPM5NC_HOST) { + err = xilinx_cpm_pcie_init_irq_domain(port); + if (err) + return err; + } - port->variant = of_device_get_match_data(dev); + bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (!bus) { + err = -ENODEV; + goto err_free_irq_domains; + } err = xilinx_cpm_pcie_parse_dt(port, bus->res); if (err) { dev_err(dev, "Parsing DT failed\n"); - goto err_parse_dt; + goto err_free_irq_domains; } xilinx_cpm_pcie_init_port(port); - err = xilinx_cpm_setup_irq(port); - if (err) { - dev_err(dev, "Failed to set up interrupts\n"); - goto err_setup_irq; + if (port->variant->version != CPM5NC_HOST) { + err = xilinx_cpm_setup_irq(port); + if (err) { + dev_err(dev, "Failed to set up interrupts\n"); + goto err_setup_irq; + } } bridge->sysdata = port->cfg; @@ -599,20 +623,37 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) return 0; err_host_bridge: - xilinx_cpm_free_interrupts(port); + if (port->variant->version != CPM5NC_HOST) + xilinx_cpm_free_interrupts(port); err_setup_irq: pci_ecam_free(port->cfg); -err_parse_dt: - xilinx_cpm_free_irq_domains(port); +err_free_irq_domains: + if (port->variant->version != CPM5NC_HOST) + xilinx_cpm_free_irq_domains(port); return err; } static const struct xilinx_cpm_variant cpm_host = { .version = CPM, + .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL, }; static const struct xilinx_cpm_variant cpm5_host = { .version = CPM5, + .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL, + .ir_status = XILINX_CPM_PCIE0_IR_STATUS, + .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE, +}; + +static const struct xilinx_cpm_variant cpm5_host1 = { + .version = CPM5_HOST1, + .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL, + .ir_status = XILINX_CPM_PCIE1_IR_STATUS, + .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE, +}; + +static const struct xilinx_cpm_variant cpm5n_host = { + .version = CPM5NC_HOST, }; static const struct of_device_id xilinx_cpm_pcie_of_match[] = { @@ -624,6 +665,14 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = { .compatible = "xlnx,versal-cpm5-host", .data = &cpm5_host, }, + { + .compatible = "xlnx,versal-cpm5-host1", + .data = &cpm5_host1, + }, + { + .compatible = "xlnx,versal-cpm5nc-host", + .data = &cpm5n_host, + }, {} }; diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c index 6630cacef301..3fdfffdf0270 100644 --- a/drivers/pci/controller/plda/pcie-microchip-host.c +++ b/drivers/pci/controller/plda/pcie-microchip-host.c @@ -7,20 +7,27 @@ * Author: Daire McNamara <daire.mcnamara@microchip.com> */ +#include <linux/align.h> +#include <linux/bits.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> +#include <linux/log2.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci-ecam.h> #include <linux/platform_device.h> +#include <linux/wordpart.h> #include "../../pci.h" #include "pcie-plda.h" +#define MC_MAX_NUM_INBOUND_WINDOWS 8 +#define MPFS_NC_BOUNCE_ADDR 0x80000000 + /* PCIe Bridge Phy and Controller Phy offsets */ #define MC_PCIE1_BRIDGE_ADDR 0x00008000u #define MC_PCIE1_CTRL_ADDR 0x0000a000u @@ -607,6 +614,91 @@ static void mc_disable_interrupts(struct mc_pcie *port) writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST); } +static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index, + u64 axi_addr, u64 pcie_addr, u64 size) +{ + u32 table_offset = window_index * ATR_ENTRY_SIZE; + void __iomem *table_addr = port->bridge_base_addr + table_offset; + u32 atr_sz; + u32 val; + + atr_sz = ilog2(size) - 1; + + val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K); + val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz); + val |= ATR_IMPL_ENABLE; + + writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM); + + writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR); + + writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB); + writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW); + + writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM); +} + +static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev, + struct mc_pcie *port) +{ + struct device *dev = &pdev->dev; + struct device_node *dn = dev->of_node; + struct of_range_parser parser; + struct of_range range; + int atr_index = 0; + + /* + * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface + * Controller FPGA logic block which contains the AXI-S interface. + * + * From the point of view of the PCIe Root Port, there are only two + * supported Root Port configurations: + * + * Configuration 1: for use with fully coherent designs; supports a + * window from 0x0 (CPU space) to specified PCIe space. + * + * Configuration 2: for use with non-coherent designs; supports two + * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space + * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe + * space 0xc0000000. This cfg needs two windows because of how the + * MSI space is allocated in the AXI-S range on MPFS. + * + * The FIC interface outside the PCIe block *must* complete the + * inbound address translation as per MCHP MPFS FPGA design + * guidelines. + */ + if (device_property_read_bool(dev, "dma-noncoherent")) { + /* + * Always need same two tables in this case. Need two tables + * due to hardware interactions between address and size. + */ + mc_pcie_setup_inbound_atr(port, 0, 0, + MPFS_NC_BOUNCE_ADDR, SZ_1G); + mc_pcie_setup_inbound_atr(port, 1, SZ_1G, + MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G); + } else { + /* Find any DMA ranges */ + if (of_pci_dma_range_parser_init(&parser, dn)) { + /* No DMA range property - setup default */ + mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G); + return 0; + } + + for_each_of_range(&parser, &range) { + if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) { + dev_err(dev, "too many inbound ranges; %d available tables\n", + MC_MAX_NUM_INBOUND_WINDOWS); + return -EINVAL; + } + mc_pcie_setup_inbound_atr(port, atr_index, 0, + range.pci_addr, range.size); + atr_index++; + } + } + + return 0; +} + static int mc_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; @@ -627,6 +719,10 @@ static int mc_platform_init(struct pci_config_window *cfg) if (ret) return ret; + ret = mc_pcie_setup_inbound_ranges(pdev, port); + if (ret) + return ret; + port->plda.event_ops = &mc_event_ops; port->plda.event_irq_chip = &mc_event_irq_chip; port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0); diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c index 8533dc618d45..4153214ca410 100644 --- a/drivers/pci/controller/plda/pcie-plda-host.c +++ b/drivers/pci/controller/plda/pcie-plda-host.c @@ -8,11 +8,14 @@ * Author: Daire McNamara <daire.mcnamara@microchip.com> */ +#include <linux/align.h> +#include <linux/bitfield.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/pci_regs.h> #include <linux/pci-ecam.h> +#include <linux/wordpart.h> #include "pcie-plda.h" @@ -502,8 +505,9 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index, writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + ATR0_AXI4_SLV0_TRSL_PARAM); - val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) | - ATR_IMPL_ENABLE; + val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K); + val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz); + val |= ATR_IMPL_ENABLE; writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + ATR0_AXI4_SLV0_SRCADDR_PARAM); @@ -518,13 +522,20 @@ void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index, val = upper_32_bits(pci_addr); writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) + ATR0_AXI4_SLV0_TRSL_ADDR_UDW); +} +EXPORT_SYMBOL_GPL(plda_pcie_setup_window); + +void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port) +{ + void __iomem *bridge_base_addr = port->bridge_addr; + u32 val; val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM); val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT); writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM); writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR); } -EXPORT_SYMBOL_GPL(plda_pcie_setup_window); +EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation); int plda_pcie_setup_iomems(struct pci_host_bridge *bridge, struct plda_pcie_rp *port) diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h index 0e7dc0d8e5ba..61ece26065ea 100644 --- a/drivers/pci/controller/plda/pcie-plda.h +++ b/drivers/pci/controller/plda/pcie-plda.h @@ -89,14 +89,15 @@ /* PCIe AXI slave table init defines */ #define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u -#define ATR_SIZE_SHIFT 1 -#define ATR_IMPL_ENABLE 1 +#define ATR_SIZE_MASK GENMASK(6, 1) +#define ATR_IMPL_ENABLE BIT(0) #define ATR0_AXI4_SLV0_SRC_ADDR 0x804u #define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u #define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu #define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u #define PCIE_TX_RX_INTERFACE 0x00000000u #define PCIE_CONFIG_INTERFACE 0x00000001u +#define TRSL_ID_AXI4_MASTER_0 0x00000004u #define CONFIG_SPACE_ADDR_OFFSET 0x1000u @@ -204,6 +205,7 @@ int plda_init_interrupts(struct platform_device *pdev, void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index, phys_addr_t axi_addr, phys_addr_t pci_addr, size_t size); +void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port); int plda_pcie_setup_iomems(struct pci_host_bridge *bridge, struct plda_pcie_rp *port); int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops, diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 9d9596947350..8df064b62a2f 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -17,6 +17,8 @@ #include <linux/rculist.h> #include <linux/rcupdate.h> +#include <xen/xen.h> + #include <asm/irqdomain.h> #define VMD_CFGBAR 0 @@ -125,7 +127,7 @@ struct vmd_irq_list { struct vmd_dev { struct pci_dev *dev; - spinlock_t cfg_lock; + raw_spinlock_t cfg_lock; void __iomem *cfgbar; int msix_count; @@ -391,7 +393,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, if (!addr) return -EFAULT; - spin_lock_irqsave(&vmd->cfg_lock, flags); + raw_spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: *value = readb(addr); @@ -406,7 +408,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, ret = -EINVAL; break; } - spin_unlock_irqrestore(&vmd->cfg_lock, flags); + raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } @@ -426,7 +428,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, if (!addr) return -EFAULT; - spin_lock_irqsave(&vmd->cfg_lock, flags); + raw_spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: writeb(value, addr); @@ -444,7 +446,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, ret = -EINVAL; break; } - spin_unlock_irqrestore(&vmd->cfg_lock, flags); + raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } @@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) struct vmd_dev *vmd; int err; + if (xen_domain()) { + /* + * Xen doesn't have knowledge about devices in the VMD bus + * because the config space of devices behind the VMD bridge is + * not known to Xen, and hence Xen cannot discover or configure + * them in any way. + * + * Bypass of MSI remapping won't work in that case as direct + * write by Linux to the MSI entries won't result in functional + * interrupts, as Xen is the entity that manages the host + * interrupt controller and must configure interrupts. However + * multiplexing of interrupts by the VMD bridge will work under + * Xen, so force the usage of that mode which must always be + * supported by VMD bridges. + */ + features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP; + } + if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) return -ENOMEM; @@ -1009,7 +1029,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) if (features & VMD_FEAT_OFFSET_FIRST_VECTOR) vmd->first_vec = 1; - spin_lock_init(&vmd->cfg_lock); + raw_spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); err = vmd_enable_domain(vmd, features); if (err) |