diff options
Diffstat (limited to 'drivers/pci/controller')
63 files changed, 2159 insertions, 1343 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index adddf21fa381..f18c3725ef80 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -99,6 +99,14 @@ config PCIE_XILINX Say 'Y' here if you want kernel to support the Xilinx AXI PCIe Host Bridge driver. +config PCIE_XILINX_CPM + bool "Xilinx Versal CPM host bridge support" + depends on ARCH_ZYNQMP || COMPILE_TEST + select PCI_HOST_COMMON + help + Say 'Y' here if you want kernel support for the + Xilinx Versal CPM host bridge. + config PCI_XGENE bool "X-Gene PCIe controller" depends on ARM64 || COMPILE_TEST diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index efd9733ead26..bcdbf49ab1e4 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o +obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index b76b3cf55ce5..5d30564190e1 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -42,4 +42,27 @@ config PCIE_CADENCE_PLAT_EP endpoint mode. This PCIe controller may be embedded into many different vendors SoCs. +config PCI_J721E + bool + +config PCI_J721E_HOST + bool "TI J721E PCIe platform host controller" + depends on OF + select PCIE_CADENCE_HOST + select PCI_J721E + help + Say Y here if you want to support the TI J721E PCIe platform + controller in host mode. TI J721E PCIe controller uses Cadence PCIe + core. + +config PCI_J721E_EP + bool "TI J721E PCIe platform endpoint controller" + depends on OF + depends on PCI_ENDPOINT + select PCIE_CADENCE_EP + select PCI_J721E + help + Say Y here if you want to support the TI J721E PCIe platform + controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe + core. endmenu diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile index 232a3f20876a..9bac5fb2f13d 100644 --- a/drivers/pci/controller/cadence/Makefile +++ b/drivers/pci/controller/cadence/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o +obj-$(CONFIG_PCI_J721E) += pci-j721e.o diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c new file mode 100644 index 000000000000..586b9d69fa5e --- /dev/null +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * pci-j721e - PCIe controller driver for TI's J721E SoCs + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/io.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include "../../pci.h" +#include "pcie-cadence.h" + +#define ENABLE_REG_SYS_2 0x108 +#define STATUS_REG_SYS_2 0x508 +#define STATUS_CLR_REG_SYS_2 0x708 +#define LINK_DOWN BIT(1) + +#define J721E_PCIE_USER_CMD_STATUS 0x4 +#define LINK_TRAINING_ENABLE BIT(0) + +#define J721E_PCIE_USER_LINKSTATUS 0x14 +#define LINK_STATUS GENMASK(1, 0) + +enum link_status { + NO_RECEIVERS_DETECTED, + LINK_TRAINING_IN_PROGRESS, + LINK_UP_DL_IN_PROGRESS, + LINK_UP_DL_COMPLETED, +}; + +#define J721E_MODE_RC BIT(7) +#define LANE_COUNT_MASK BIT(8) +#define LANE_COUNT(n) ((n) << 8) + +#define GENERATION_SEL_MASK GENMASK(1, 0) + +#define MAX_LANES 2 + +struct j721e_pcie { + struct device *dev; + u32 mode; + u32 num_lanes; + struct cdns_pcie *cdns_pcie; + void __iomem *user_cfg_base; + void __iomem *intd_cfg_base; +}; + +enum j721e_pcie_mode { + PCI_MODE_RC, + PCI_MODE_EP, +}; + +struct j721e_pcie_data { + enum j721e_pcie_mode mode; +}; + +static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset) +{ + return readl(pcie->user_cfg_base + offset); +} + +static inline void j721e_pcie_user_writel(struct j721e_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->user_cfg_base + offset); +} + +static inline u32 j721e_pcie_intd_readl(struct j721e_pcie *pcie, u32 offset) +{ + return readl(pcie->intd_cfg_base + offset); +} + +static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->intd_cfg_base + offset); +} + +static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv) +{ + struct j721e_pcie *pcie = priv; + struct device *dev = pcie->dev; + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2); + if (!(reg & LINK_DOWN)) + return IRQ_NONE; + + dev_err(dev, "LINK DOWN!\n"); + + j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN); + return IRQ_HANDLED; +} + +static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie) +{ + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2); + reg |= LINK_DOWN; + j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg); +} + +static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); + reg |= LINK_TRAINING_ENABLE; + j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); + + return 0; +} + +static void j721e_pcie_stop_link(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); + reg &= ~LINK_TRAINING_ENABLE; + j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); +} + +static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS); + reg &= LINK_STATUS; + if (reg == LINK_UP_DL_COMPLETED) + return true; + + return false; +} + +static const struct cdns_pcie_ops j721e_pcie_ops = { + .start_link = j721e_pcie_start_link, + .stop_link = j721e_pcie_stop_link, + .link_up = j721e_pcie_link_up, +}; + +static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon) +{ + struct device *dev = pcie->dev; + u32 mask = J721E_MODE_RC; + u32 mode = pcie->mode; + u32 val = 0; + int ret = 0; + + if (mode == PCI_MODE_RC) + val = J721E_MODE_RC; + + ret = regmap_update_bits(syscon, 0, mask, val); + if (ret) + dev_err(dev, "failed to set pcie mode\n"); + + return ret; +} + +static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + int link_speed; + u32 val = 0; + int ret; + + link_speed = of_pci_get_max_link_speed(np); + if (link_speed < 2) + link_speed = 2; + + val = link_speed - 1; + ret = regmap_update_bits(syscon, 0, GENERATION_SEL_MASK, val); + if (ret) + dev_err(dev, "failed to set link speed\n"); + + return ret; +} + +static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->dev; + u32 lanes = pcie->num_lanes; + u32 val = 0; + int ret; + + val = LANE_COUNT(lanes - 1); + ret = regmap_update_bits(syscon, 0, LANE_COUNT_MASK, val); + if (ret) + dev_err(dev, "failed to set link count\n"); + + return ret; +} + +static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node; + struct regmap *syscon; + int ret; + + syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl"); + if (IS_ERR(syscon)) { + dev_err(dev, "Unable to get ti,syscon-pcie-ctrl regmap\n"); + return PTR_ERR(syscon); + } + + ret = j721e_pcie_set_mode(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set pci mode\n"); + return ret; + } + + ret = j721e_pcie_set_link_speed(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set link speed\n"); + return ret; + } + + ret = j721e_pcie_set_lane_count(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set num-lanes\n"); + return ret; + } + + return 0; +} + +static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + if (pci_is_root_bus(bus)) + return pci_generic_config_read32(bus, devfn, where, size, + value); + + return pci_generic_config_read(bus, devfn, where, size, value); +} + +static int cdns_ti_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + if (pci_is_root_bus(bus)) + return pci_generic_config_write32(bus, devfn, where, size, + value); + + return pci_generic_config_write(bus, devfn, where, size, value); +} + +static struct pci_ops cdns_ti_pcie_host_ops = { + .map_bus = cdns_pci_map_bus, + .read = cdns_ti_pcie_config_read, + .write = cdns_ti_pcie_config_write, +}; + +static const struct j721e_pcie_data j721e_pcie_rc_data = { + .mode = PCI_MODE_RC, +}; + +static const struct j721e_pcie_data j721e_pcie_ep_data = { + .mode = PCI_MODE_EP, +}; + +static const struct of_device_id of_j721e_pcie_match[] = { + { + .compatible = "ti,j721e-pcie-host", + .data = &j721e_pcie_rc_data, + }, + { + .compatible = "ti,j721e-pcie-ep", + .data = &j721e_pcie_ep_data, + }, + {}, +}; + +static int j721e_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct pci_host_bridge *bridge; + struct j721e_pcie_data *data; + struct cdns_pcie *cdns_pcie; + struct j721e_pcie *pcie; + struct cdns_pcie_rc *rc; + struct cdns_pcie_ep *ep; + struct gpio_desc *gpiod; + void __iomem *base; + u32 num_lanes; + u32 mode; + int ret; + int irq; + + data = (struct j721e_pcie_data *)of_device_get_match_data(dev); + if (!data) + return -EINVAL; + + mode = (u32)data->mode; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = dev; + pcie->mode = mode; + + base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg"); + if (IS_ERR(base)) + return PTR_ERR(base); + pcie->intd_cfg_base = base; + + base = devm_platform_ioremap_resource_byname(pdev, "user_cfg"); + if (IS_ERR(base)) + return PTR_ERR(base); + pcie->user_cfg_base = base; + + ret = of_property_read_u32(node, "num-lanes", &num_lanes); + if (ret || num_lanes > MAX_LANES) + num_lanes = 1; + pcie->num_lanes = num_lanes; + + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48))) + return -EINVAL; + + irq = platform_get_irq_byname(pdev, "link_state"); + if (irq < 0) + return irq; + + dev_set_drvdata(dev, pcie); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + ret = j721e_pcie_ctrl_init(pcie); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0, + "j721e-pcie-link-down-irq", pcie); + if (ret < 0) { + dev_err(dev, "failed to request link state IRQ %d\n", irq); + goto err_get_sync; + } + + j721e_pcie_config_link_irq(pcie); + + switch (mode) { + case PCI_MODE_RC: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) { + ret = -ENODEV; + goto err_get_sync; + } + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) { + ret = -ENOMEM; + goto err_get_sync; + } + + bridge->ops = &cdns_ti_pcie_host_ops; + rc = pci_host_bridge_priv(bridge); + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + + gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get reset GPIO\n"); + goto err_get_sync; + } + + ret = cdns_pcie_init_phy(dev, cdns_pcie); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_get_sync; + } + + /* + * "Power Sequencing and Reset Signal Timings" table in + * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0 + * indicates PERST# should be deasserted after minimum of 100us + * once REFCLK is stable. The REFCLK to the connector in RC + * mode is selected while enabling the PHY. So deassert PERST# + * after 100 us. + */ + if (gpiod) { + usleep_range(100, 200); + gpiod_set_value_cansleep(gpiod, 1); + } + + ret = cdns_pcie_host_setup(rc); + if (ret < 0) + goto err_pcie_setup; + + break; + case PCI_MODE_EP: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) { + ret = -ENODEV; + goto err_get_sync; + } + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) { + ret = -ENOMEM; + goto err_get_sync; + } + + cdns_pcie = &ep->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + + ret = cdns_pcie_init_phy(dev, cdns_pcie); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_get_sync; + } + + ret = cdns_pcie_ep_setup(ep); + if (ret < 0) + goto err_pcie_setup; + + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + } + + return 0; + +err_pcie_setup: + cdns_pcie_disable_phy(cdns_pcie); + +err_get_sync: + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return ret; +} + +static int j721e_pcie_remove(struct platform_device *pdev) +{ + struct j721e_pcie *pcie = platform_get_drvdata(pdev); + struct cdns_pcie *cdns_pcie = pcie->cdns_pcie; + struct device *dev = &pdev->dev; + + cdns_pcie_disable_phy(cdns_pcie); + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + +static struct platform_driver j721e_pcie_driver = { + .probe = j721e_pcie_probe, + .remove = j721e_pcie_remove, + .driver = { + .name = "j721e-pcie", + .of_match_table = of_j721e_pcie_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(j721e_pcie_driver); diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 1c15c8352125..254a3e1eff50 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -8,7 +8,6 @@ #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include <linux/sizes.h> #include "pcie-cadence.h" @@ -52,6 +51,7 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; dma_addr_t bar_phys = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; @@ -112,6 +112,8 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); cdns_pcie_writel(pcie, reg, cfg); + epf->epf_bar[bar] = epf_bar; + return 0; } @@ -119,6 +121,7 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; enum pci_barno bar = epf_bar->barno; u32 reg, cfg, b, ctrl; @@ -140,6 +143,8 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); + + epf->epf_bar[bar] = NULL; } static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, @@ -156,7 +161,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, return -EINVAL; } - cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); + cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); set_bit(r, &ep->ob_region_map); ep->ob_addr[r] = addr; @@ -225,10 +230,55 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) return mme; } +static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 val, reg; + + reg = cap + PCI_MSIX_FLAGS; + val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); + if (!(val & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + val &= PCI_MSIX_FLAGS_QSIZE; + + return val; +} + +static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts, + enum pci_barno bir, u32 offset) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 val, reg; + + reg = cap + PCI_MSIX_FLAGS; + val = cdns_pcie_ep_fn_readw(pcie, fn, reg); + val &= ~PCI_MSIX_FLAGS_QSIZE; + val |= interrupts; + cdns_pcie_ep_fn_writew(pcie, fn, reg, val); + + /* Set MSIX BAR and offset */ + reg = cap + PCI_MSIX_TABLE; + val = offset | bir; + cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + + /* Set PBA BAR and offset. BAR must match MSIX BAR */ + reg = cap + PCI_MSIX_PBA; + val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + + return 0; +} + static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; + unsigned long flags; u32 offset; u16 status; u8 msg_code; @@ -239,7 +289,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || ep->irq_pci_fn != fn)) { /* First region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0, + cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0, ep->irq_phys_addr); ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; ep->irq_pci_fn = fn; @@ -253,11 +303,13 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, msg_code = MSG_CODE_DEASSERT_INTA + intx; } + spin_lock_irqsave(&ep->lock, flags); status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { status ^= PCI_STATUS_INTERRUPT; cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); } + spin_unlock_irqrestore(&ep->lock, flags); offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | @@ -318,7 +370,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn)) { /* First region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region(pcie, fn, 0, + cdns_pcie_set_outbound_region(pcie, 0, fn, 0, false, ep->irq_phys_addr, pci_addr & ~pci_addr_mask, @@ -331,6 +383,51 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, return 0; } +static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, + u16 interrupt_num) +{ + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 tbl_offset, msg_data, reg; + struct cdns_pcie *pcie = &ep->pcie; + struct pci_epf_msix_tbl *msix_tbl; + struct cdns_pcie_epf *epf; + u64 pci_addr_mask = 0xff; + u64 msg_addr; + u16 flags; + u8 bir; + + /* Check whether the MSI-X feature has been enabled by the PCI host. */ + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); + if (!(flags & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + reg = cap + PCI_MSIX_TABLE; + tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); + bir = tbl_offset & PCI_MSIX_TABLE_BIR; + tbl_offset &= PCI_MSIX_TABLE_OFFSET; + + epf = &ep->epf[fn]; + msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; + msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; + msg_data = msix_tbl[(interrupt_num - 1)].msg_data; + + /* Set the outbound region if needed. */ + if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || + ep->irq_pci_fn != fn) { + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region(pcie, 0, fn, 0, + false, + ep->irq_phys_addr, + msg_addr & ~pci_addr_mask, + pci_addr_mask + 1); + ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); + ep->irq_pci_fn = fn; + } + writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); + + return 0; +} + static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, enum pci_epc_irq_type type, u16 interrupt_num) @@ -344,6 +441,9 @@ static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, case PCI_EPC_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); + case PCI_EPC_IRQ_MSIX: + return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num); + default: break; } @@ -355,8 +455,10 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; + struct device *dev = pcie->dev; struct pci_epf *epf; u32 cfg; + int ret; /* * BIT(0) is hardwired to 1, hence function 0 is always enabled @@ -367,13 +469,19 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) cfg |= BIT(epf->func_no); cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; + } + return 0; } static const struct pci_epc_features cdns_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, - .msix_capable = false, + .msix_capable = true, }; static const struct pci_epc_features* @@ -390,6 +498,8 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = { .unmap_addr = cdns_pcie_ep_unmap_addr, .set_msi = cdns_pcie_ep_set_msi, .get_msi = cdns_pcie_ep_get_msi, + .set_msix = cdns_pcie_ep_set_msix, + .get_msix = cdns_pcie_ep_get_msix, .raise_irq = cdns_pcie_ep_raise_irq, .start = cdns_pcie_ep_start, .get_features = cdns_pcie_ep_get_features, @@ -408,8 +518,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) pcie->is_rc = false; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); - pcie->reg_base = devm_ioremap_resource(dev, res); + pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); if (IS_ERR(pcie->reg_base)) { dev_err(dev, "missing \"reg\"\n"); return PTR_ERR(pcie->reg_base); @@ -440,8 +549,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); if (IS_ERR(epc)) { dev_err(dev, "failed to create epc device\n"); - ret = PTR_ERR(epc); - goto err_init; + return PTR_ERR(epc); } epc_set_drvdata(epc, ep); @@ -449,11 +557,16 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) epc->max_functions = 1; + ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), + GFP_KERNEL); + if (!ep->epf) + return -ENOMEM; + ret = pci_epc_mem_init(epc, pcie->mem_res->start, resource_size(pcie->mem_res), PAGE_SIZE); if (ret < 0) { dev_err(dev, "failed to initialize the memory space\n"); - goto err_init; + return ret; } ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, @@ -466,14 +579,12 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; /* Reserve region 0 for IRQs */ set_bit(0, &ep->ob_region_map); + spin_lock_init(&ep->lock); return 0; free_epc_mem: pci_epc_mem_exit(epc); - err_init: - pm_runtime_put_sync(dev); - return ret; } diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 8c2543f28ba0..4550e0d469ca 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -3,16 +3,28 @@ // Cadence PCIe host controller driver. // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> +#include <linux/delay.h> #include <linux/kernel.h> +#include <linux/list_sort.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include "pcie-cadence.h" -static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, - int where) +static u64 bar_max_size[] = { + [RP_BAR0] = _ULL(128 * SZ_2G), + [RP_BAR1] = SZ_2G, + [RP_NO_BAR] = _BITULL(63), +}; + +static u8 bar_aperture_mask[] = { + [RP_BAR0] = 0x1F, + [RP_BAR1] = 0xF, +}; + +void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) { struct pci_host_bridge *bridge = pci_find_host_bridge(bus); struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); @@ -20,7 +32,7 @@ static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, unsigned int busn = bus->number; u32 addr0, desc0; - if (busn == rc->bus_range->start) { + if (pci_is_root_bus(bus)) { /* * Only the root port (devfn == 0) is connected to this bus. * All other PCI devices are behind some bridge hence on another @@ -50,7 +62,7 @@ static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, * The bus number was already set once for all in desc1 by * cdns_pcie_host_init_address_translation(). */ - if (busn == rc->bus_range->start + 1) + if (busn == bridge->busnr + 1) desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; else desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; @@ -70,6 +82,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; + u32 id; /* * Set the root complex BAR configuration register: @@ -89,8 +102,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->vendor_id != 0xffff) { + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); @@ -101,19 +118,230 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) return 0; } -static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) +static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, + enum cdns_pcie_rp_bar bar, + u64 cpu_addr, u64 size, + unsigned long flags) +{ + struct cdns_pcie *pcie = &rc->pcie; + u32 addr0, addr1, aperture, value; + + if (!rc->avail_ib_bar[bar]) + return -EBUSY; + + rc->avail_ib_bar[bar] = false; + + aperture = ilog2(size); + addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1); + + if (bar == RP_NO_BAR) + return 0; + + value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG); + value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) | + LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2)); + if (size + cpu_addr >= SZ_4G) { + if (!(flags & IORESOURCE_PREFETCH)) + value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar); + value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar); + } else { + if (!(flags & IORESOURCE_PREFETCH)) + value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar); + value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar); + } + + value |= LM_RC_BAR_CFG_APERTURE(bar, aperture); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); + + return 0; +} + +static enum cdns_pcie_rp_bar +cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size <= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] < bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} + +static enum cdns_pcie_rp_bar +cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size >= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] > bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} + +static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc, + struct resource_entry *entry) +{ + u64 cpu_addr, pci_addr, size, winsize; + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = pcie->dev; + enum cdns_pcie_rp_bar bar; + unsigned long flags; + int ret; + + cpu_addr = entry->res->start; + pci_addr = entry->res->start - entry->offset; + flags = entry->res->flags; + size = resource_size(entry->res); + + if (entry->offset) { + dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n", + pci_addr, cpu_addr); + return -EINVAL; + } + + while (size > 0) { + /* + * Try to find a minimum BAR whose size is greater than + * or equal to the remaining resource_entry size. This will + * fail if the size of each of the available BARs is less than + * the remaining resource_entry size. + * If a minimum BAR is found, IB ATU will be configured and + * exited. + */ + bar = cdns_pcie_host_find_min_bar(rc, size); + if (bar != RP_BAR_UNDEFINED) { + ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, + size, flags); + if (ret) + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + /* + * If the control reaches here, it would mean the remaining + * resource_entry size cannot be fitted in a single BAR. So we + * find a maximum BAR whose size is less than or equal to the + * remaining resource_entry size and split the resource entry + * so that part of resource entry is fitted inside the maximum + * BAR. The remaining size would be fitted during the next + * iteration of the loop. + * If a maximum BAR is not found, there is no way we can fit + * this resource_entry, so we error out. + */ + bar = cdns_pcie_host_find_max_bar(rc, size); + if (bar == RP_BAR_UNDEFINED) { + dev_err(dev, "No free BAR to map cpu_addr %llx\n", + cpu_addr); + return -EINVAL; + } + + winsize = bar_max_size[bar]; + ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize, + flags); + if (ret) { + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + size -= winsize; + cpu_addr += winsize; + } + + return 0; +} + +static int cdns_pcie_host_dma_ranges_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct resource_entry *entry1, *entry2; + + entry1 = container_of(a, struct resource_entry, node); + entry2 = container_of(b, struct resource_entry, node); + + return resource_size(entry2->res) - resource_size(entry1->res); +} + +static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; - struct resource *mem_res = pcie->mem_res; - struct resource *bus_range = rc->bus_range; - struct resource *cfg_res = rc->cfg_res; struct device *dev = pcie->dev; struct device_node *np = dev->of_node; - struct of_pci_range_parser parser; - struct of_pci_range range; + struct pci_host_bridge *bridge; + struct resource_entry *entry; + u32 no_bar_nbits = 32; + int err; + + bridge = pci_host_bridge_from_priv(rc); + if (!bridge) + return -ENOMEM; + + if (list_empty(&bridge->dma_ranges)) { + of_property_read_u32(np, "cdns,no-bar-match-nbits", + &no_bar_nbits); + err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0, + (u64)1 << no_bar_nbits, 0); + if (err) + dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR); + return err; + } + + list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp); + + resource_list_for_each_entry(entry, &bridge->dma_ranges) { + err = cdns_pcie_host_bar_config(rc, entry); + if (err) + dev_err(dev, "Fail to configure IB using dma-ranges\n"); + return err; + } + + return 0; +} + +static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc); + struct resource *cfg_res = rc->cfg_res; + struct resource_entry *entry; + u64 cpu_addr = cfg_res->start; u32 addr0, addr1, desc1; - u64 cpu_addr; - int r, err; + int r, err, busnr = 0; + + entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (entry) + busnr = entry->res->start; /* * Reserve region 0 for PCI configure space accesses: @@ -121,81 +349,74 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) * cdns_pci_map_bus(), other region registers are set here once for all. */ addr1 = 0; /* Should be programmed to zero. */ - desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start); + desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); - cpu_addr = cfg_res->start - mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1); - err = of_pci_range_parser_init(&parser, np); - if (err) - return err; - r = 1; - for_each_of_pci_range(&parser, &range) { - bool is_io; - - if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) - is_io = false; - else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) - is_io = true; + resource_list_for_each_entry(entry, &bridge->windows) { + struct resource *res = entry->res; + u64 pci_addr = res->start - entry->offset; + + if (resource_type(res) == IORESOURCE_IO) + cdns_pcie_set_outbound_region(pcie, busnr, 0, r, + true, + pci_pio_to_address(res->start), + pci_addr, + resource_size(res)); else - continue; + cdns_pcie_set_outbound_region(pcie, busnr, 0, r, + false, + res->start, + pci_addr, + resource_size(res)); - cdns_pcie_set_outbound_region(pcie, 0, r, is_io, - range.cpu_addr, - range.pci_addr, - range.size); r++; } - /* - * Set Root Port no BAR match Inbound Translation registers: - * needed for MSI and DMA. - * Root Port BAR0 and BAR1 are disabled, hence no need to set their - * inbound translation registers. - */ - addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); - addr1 = 0; - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); + err = cdns_pcie_host_map_dma_ranges(rc); + if (err) + return err; return 0; } static int cdns_pcie_host_init(struct device *dev, - struct list_head *resources, struct cdns_pcie_rc *rc) { - struct resource *bus_range = NULL; int err; - /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range); - if (err) - return err; - - rc->bus_range = bus_range; - rc->pcie.bus = bus_range->start; - err = cdns_pcie_host_init_root_port(rc); if (err) - goto err_out; + return err; - err = cdns_pcie_host_init_address_translation(rc); - if (err) - goto err_out; + return cdns_pcie_host_init_address_translation(rc); +} - return 0; +static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie) +{ + struct device *dev = pcie->dev; + int retries; + + /* Check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (cdns_pcie_link_up(pcie)) { + dev_info(dev, "Link up\n"); + return 0; + } + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } - err_out: - pci_free_resource_list(resources); - return err; + return -ETIMEDOUT; } int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) @@ -204,7 +425,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; struct pci_host_bridge *bridge; - struct list_head resources; + enum cdns_pcie_rp_bar bar; struct cdns_pcie *pcie; struct resource *res; int ret; @@ -216,17 +437,13 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) pcie = &rc->pcie; pcie->is_rc = true; - rc->no_bar_nbits = 32; - of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); - rc->vendor_id = 0xffff; of_property_read_u32(np, "vendor-id", &rc->vendor_id); rc->device_id = 0xffff; of_property_read_u32(np, "device-id", &rc->device_id); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); - pcie->reg_base = devm_ioremap_resource(dev, res); + pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); if (IS_ERR(pcie->reg_base)) { dev_err(dev, "missing \"reg\"\n"); return PTR_ERR(pcie->reg_base); @@ -234,40 +451,36 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(rc->cfg_base)) { - dev_err(dev, "missing \"cfg\"\n"); + if (IS_ERR(rc->cfg_base)) return PTR_ERR(rc->cfg_base); - } rc->cfg_res = res; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); - if (!res) { - dev_err(dev, "missing \"mem\"\n"); - return -EINVAL; + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; } - pcie->mem_res = res; + ret = cdns_pcie_host_wait_for_link(pcie); + if (ret) + dev_dbg(dev, "PCIe link never came up\n"); + + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) + rc->avail_ib_bar[bar] = true; - ret = cdns_pcie_host_init(dev, &resources, rc); + ret = cdns_pcie_host_init(dev, rc); if (ret) - goto err_init; + return ret; - list_splice_init(&resources, &bridge->windows); - bridge->dev.parent = dev; - bridge->busnr = pcie->bus; - bridge->ops = &cdns_pcie_host_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; + if (!bridge->ops) + bridge->ops = &cdns_pcie_host_ops; ret = pci_host_probe(bridge); if (ret < 0) - goto err_host_probe; + goto err_init; return 0; - err_host_probe: - pci_free_resource_list(&resources); - err_init: pm_runtime_put_sync(dev); diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c index f5c6bf6dfcb8..5fee0f89ab59 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-plat.c +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c @@ -13,6 +13,8 @@ #include <linux/of_device.h> #include "pcie-cadence.h" +#define CDNS_PLAT_CPU_TO_BUS_ADDR 0x0FFFFFFF + /** * struct cdns_plat_pcie - private data for this PCIe platform driver * @pcie: Cadence PCIe controller @@ -30,6 +32,15 @@ struct cdns_plat_pcie_of_data { static const struct of_device_id cdns_plat_pcie_of_match[]; +static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr) +{ + return cpu_addr & CDNS_PLAT_CPU_TO_BUS_ADDR; +} + +static const struct cdns_pcie_ops cdns_plat_ops = { + .cpu_addr_fixup = cdns_plat_cpu_addr_fixup, +}; + static int cdns_plat_pcie_probe(struct platform_device *pdev) { const struct cdns_plat_pcie_of_data *data; @@ -66,6 +77,7 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) rc = pci_host_bridge_priv(bridge); rc->pcie.dev = dev; + rc->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &rc->pcie; cdns_plat_pcie->is_rc = is_rc; @@ -93,6 +105,7 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) return -ENOMEM; ep->pcie.dev = dev; + ep->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &ep->pcie; cdns_plat_pcie->is_rc = is_rc; @@ -115,9 +128,8 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) } err_init: - pm_runtime_put_sync(dev); - err_get_sync: + pm_runtime_put_sync(dev); pm_runtime_disable(dev); cdns_pcie_disable_phy(cdns_plat_pcie->pcie); phy_count = cdns_plat_pcie->pcie->phy_count; diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index cd795f6fc1e2..3c3646502d05 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -7,7 +7,7 @@ #include "pcie-cadence.h" -void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, +void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, u32 r, bool is_io, u64 cpu_addr, u64 pci_addr, size_t size) { @@ -60,7 +60,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, /* The device and function numbers are always 0. */ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); - desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); + desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); } else { /* * Use captured values for bus and device numbers but still @@ -73,7 +73,9 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); @@ -82,7 +84,8 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); } -void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, +void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, + u8 busnr, u8 fn, u32 r, u64 cpu_addr) { u32 addr0, addr1, desc0, desc1; @@ -94,13 +97,15 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, if (pcie->is_rc) { desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); - desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); + desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); } else { desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); } /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index df14ad002fe9..feed1e3038f4 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -10,6 +10,11 @@ #include <linux/pci.h> #include <linux/phy/phy.h> +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + /* * Local Management Registers */ @@ -87,6 +92,20 @@ #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 +#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ + (((aperture) - 2) << ((bar) * 8)) /* * Endpoint Function Registers (PCI configuration space for endpoint functions) @@ -94,6 +113,7 @@ #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 +#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0 /* * Root Port Registers (PCI configuration space for the root port function) @@ -170,11 +190,19 @@ #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) enum cdns_pcie_rp_bar { + RP_BAR_UNDEFINED = -1, RP_BAR0, RP_BAR1, RP_NO_BAR }; +#define CDNS_PCIE_RP_MAX_IB 0x3 + +struct cdns_pcie_rp_ib_bar { + u64 size; + bool free; +}; + /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) @@ -223,23 +251,31 @@ enum cdns_pcie_msg_routing { MSG_ROUTING_GATHER, }; +struct cdns_pcie_ops { + int (*start_link)(struct cdns_pcie *pcie); + void (*stop_link)(struct cdns_pcie *pcie); + bool (*link_up)(struct cdns_pcie *pcie); + u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr); +}; + /** * struct cdns_pcie - private data for Cadence PCIe controller drivers * @reg_base: IO mapped register base * @mem_res: start/end offsets in the physical system memory to map PCI accesses * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. * @bus: In Root Complex mode, the bus number + * @ops: Platform specific ops to control various inputs from Cadence PCIe + * wrapper */ struct cdns_pcie { void __iomem *reg_base; struct resource *mem_res; struct device *dev; bool is_rc; - u8 bus; int phy_count; struct phy **phy; struct device_link **link; - const struct cdns_pcie_common_ops *ops; + const struct cdns_pcie_ops *ops; }; /** @@ -248,22 +284,28 @@ struct cdns_pcie { * @dev: pointer to PCIe device * @cfg_res: start/end offsets in the physical system memory to map PCI * configuration space accesses - * @bus_range: first/last buses behind the PCIe host controller * @cfg_base: IO mapped window to access the PCI configuration space of a * single function at a time - * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address - * translation (nbits sets into the "no BAR match" register) * @vendor_id: PCI vendor ID * @device_id: PCI device ID + * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or + * available */ struct cdns_pcie_rc { struct cdns_pcie pcie; struct resource *cfg_res; - struct resource *bus_range; void __iomem *cfg_base; - u32 no_bar_nbits; u32 vendor_id; u32 device_id; + bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB]; +}; + +/** + * struct cdns_pcie_epf - Structure to hold info about endpoint function + * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers + */ +struct cdns_pcie_epf { + struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; }; /** @@ -282,6 +324,10 @@ struct cdns_pcie_rc { * @irq_pci_fn: the latest PCI function that has updated the mapping of * the MSI/legacy IRQ dedicated outbound region. * @irq_pending: bitmask of asserted legacy IRQs. + * @lock: spin lock to disable interrupts while modifying PCIe controller + * registers fields (RMW) accessible by both remote RC and EP to + * minimize time between read and write + * @epf: Structure to hold info about endpoint function */ struct cdns_pcie_ep { struct cdns_pcie pcie; @@ -293,54 +339,95 @@ struct cdns_pcie_ep { u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; + /* protect writing to PCI_STATUS while raising legacy interrupts */ + spinlock_t lock; + struct cdns_pcie_epf *epf; }; /* Register access */ -static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) +static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) { - writeb(value, pcie->reg_base + reg); + writel(value, pcie->reg_base + reg); } -static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value) +static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) { - writew(value, pcie->reg_base + reg); + return readl(pcie->reg_base + reg); } -static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) +static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size) { - writel(value, pcie->reg_base + reg); + void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); + unsigned int offset = (unsigned long)addr & 0x3; + u32 val = readl(aligned_addr); + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + pr_warn("Address %p and size %d are not aligned\n", addr, size); + return 0; + } + + if (size > 2) + return val; + + return (val >> (8 * offset)) & ((1 << (size * 8)) - 1); } -static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) +static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value) { - return readl(pcie->reg_base + reg); + void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); + unsigned int offset = (unsigned long)addr & 0x3; + u32 mask; + u32 val; + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + pr_warn("Address %p and size %d are not aligned\n", addr, size); + return; + } + + if (size > 2) { + writel(value, addr); + return; + } + + mask = ~(((1 << (size * 8)) - 1) << (offset * 8)); + val = readl(aligned_addr) & mask; + val |= value << (offset * 8); + writel(val, aligned_addr); } /* Root Port register access */ static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) { - writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x1, value); } static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, u32 reg, u16 value) { - writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x2, value); } /* Endpoint Function register access */ static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, u32 reg, u8 value) { - writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + cdns_pcie_write_sz(addr, 0x1, value); } static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, u32 reg, u16 value) { - writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + cdns_pcie_write_sz(addr, 0x2, value); } static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, @@ -349,14 +436,11 @@ static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } -static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg) -{ - return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) { - return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + return cdns_pcie_read_sz(addr, 0x2); } static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) @@ -364,13 +448,43 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } +static inline int cdns_pcie_start_link(struct cdns_pcie *pcie) +{ + if (pcie->ops->start_link) + return pcie->ops->start_link(pcie); + + return 0; +} + +static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie) +{ + if (pcie->ops->stop_link) + pcie->ops->stop_link(pcie); +} + +static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) +{ + if (pcie->ops->link_up) + return pcie->ops->link_up(pcie); + + return true; +} + #ifdef CONFIG_PCIE_CADENCE_HOST int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); +void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); #else static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { return 0; } + +static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + return NULL; +} #endif #ifdef CONFIG_PCIE_CADENCE_EP @@ -381,11 +495,12 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) return 0; } #endif -void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, +void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, u32 r, bool is_io, u64 cpu_addr, u64 pci_addr, size_t size); -void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, +void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, + u8 busnr, u8 fn, u32 r, u64 cpu_addr); void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 6184ebc9392d..dc387724cf08 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -2,7 +2,7 @@ /* * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs * - * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com * * Authors: Kishon Vijay Abraham I <kishon@ti.com> */ @@ -593,13 +593,12 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = + devm_platform_ioremap_resource_byname(pdev, "ep_dbics2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); @@ -626,20 +625,16 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, struct dw_pcie *pci = dra7xx->pci; struct pcie_port *pp = &pci->pp; struct device *dev = pci->dev; - struct resource *res; pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) { - dev_err(dev, "missing IRQ resource\n"); + if (pp->irq < 0) return pp->irq; - } ret = dra7xx_pcie_init_irq_domain(pp); if (ret < 0) return ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); @@ -871,10 +866,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) pci->ops = &dw_pcie_ops; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "missing IRQ resource: %d\n", irq); + if (irq < 0) return irq; - } base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); if (IS_ERR(base)) @@ -998,9 +991,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) return 0; err_gpio: - pm_runtime_put(dev); - err_get_sync: + pm_runtime_put(dev); pm_runtime_disable(dev); dra7xx_pcie_disable_phy(dra7xx); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index c5043d951e80..8d82c43ae299 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Samsung Exynos SoCs * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -84,14 +84,12 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, { struct dw_pcie *pci = ep->pci; struct device *dev = pci->dev; - struct resource *res; ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); if (!ep->mem_res) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ep->mem_res->elbi_base = devm_ioremap_resource(dev, res); + ep->mem_res->elbi_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ep->mem_res->elbi_base)) return PTR_ERR(ep->mem_res->elbi_base); @@ -402,10 +400,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, int ret; pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (pp->irq < 0) return pp->irq; - } + ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, IRQF_SHARED, "exynos-pcie", ep); if (ret) { @@ -415,10 +412,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get msi irq\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &exynos_pcie_host_ops; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 8f08ae53f53e..90df28c7cb0c 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Freescale i.MX6 SoCs * * Copyright (C) 2013 Kosagi - * http://www.kosagi.com + * https://www.kosagi.com * * Author: Sean Cross <xobs@kosagi.com> */ @@ -868,10 +868,8 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI irq\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &imx6_pcie_host_ops; @@ -1269,7 +1267,7 @@ static void imx6_pcie_quirk(struct pci_dev *dev) if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver) return; - if (bus->number == pp->root_bus_nr) { + if (pci_is_root_bus(bus)) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 790679fdfa48..c8c9d6a75f17 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Texas Instruments Keystone SoCs * * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com + * https://www.ti.com * * Author: Murali Karicheri <m-karicheri2@ti.com> * Implementation based on pci-exynos.c and pcie-designware.c @@ -440,7 +440,7 @@ static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number != pp->root_bus_nr) + if (!pci_is_root_bus(bus->parent)) reg |= CFG_TYPE1; ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); @@ -457,7 +457,7 @@ static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number != pp->root_bus_nr) + if (!pci_is_root_bus(bus->parent)) reg |= CFG_TYPE1; ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); @@ -1250,10 +1250,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev) pci->version = version; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "missing IRQ resource: %d\n", irq); + if (irq < 0) return irq; - } ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, "ks-pcie-error-irq", ks_pcie); @@ -1323,8 +1321,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) } if (pci->version >= 0x480A) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); - atu_base = devm_ioremap_resource(dev, res); + atu_base = devm_platform_ioremap_resource_byname(pdev, "atu"); if (IS_ERR(atu_base)) { ret = PTR_ERR(atu_base); goto err_get_sync; diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index ca59ba9e0ecd..4f183b96afbb 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -488,10 +488,8 @@ static int meson_add_pcie_port(struct meson_pcie *mp, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI IRQ\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &meson_pcie_host_ops; diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index 270868f3859a..d57d4ee15848 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -67,13 +67,8 @@ static int al_pcie_init(struct pci_config_window *cfg) dev_dbg(dev, "Root port dbi res: %pR\n", res); al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(al_pcie->dbi_base)) { - long err = PTR_ERR(al_pcie->dbi_base); - - dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n", - res, err); - return err; - } + if (IS_ERR(al_pcie->dbi_base)) + return PTR_ERR(al_pcie->dbi_base); cfg->priv = al_pcie; @@ -408,10 +403,8 @@ static int al_pcie_probe(struct platform_device *pdev) dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res); - if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res); + if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - } ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (!ecam_res) { diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 49596547e8c2..13901f359a41 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -248,10 +248,8 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, pp->ops = &armada8k_pcie_host_ops; pp->irq = platform_get_irq(pdev, 0); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq for port\n"); + if (pp->irq < 0) return pp->irq; - } ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, IRQF_SHARED, "armada8k-pcie", pcie); @@ -317,7 +315,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev) base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap regs base %p\n", base); ret = PTR_ERR(pci->dbi_base); goto fail_clkreg; } diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 28d5a1095200..97d50bb50f06 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -387,10 +387,8 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI irq\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &artpec6_pcie_host_ops; @@ -455,8 +453,7 @@ static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); @@ -481,8 +478,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct dw_pcie *pci; struct artpec6_pcie *artpec6_pcie; - struct resource *dbi_base; - struct resource *phy_base; int ret; const struct of_device_id *match; const struct artpec_pcie_of_data *data; @@ -512,13 +507,12 @@ static int artpec6_pcie_probe(struct platform_device *pdev) artpec6_pcie->variant = variant; artpec6_pcie->mode = mode; - dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); + artpec6_pcie->phy_base = + devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(artpec6_pcie->phy_base)) return PTR_ERR(artpec6_pcie->phy_base); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 5e5b8821bed8..305bfec2424d 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * Synopsys DesignWare PCIe Endpoint controller driver * * Copyright (C) 2017 Texas Instruments diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 0a4a5aa6fe46..9dafecba347f 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -346,11 +346,6 @@ int dw_pcie_host_init(struct pcie_port *pp) if (!bridge) return -ENOMEM; - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry(win, &bridge->windows) { switch (resource_type(win->res)) { @@ -473,14 +468,8 @@ int dw_pcie_host_init(struct pcie_port *pp) goto err_free_msi; } - pp->root_bus_nr = pp->busn->start; - - bridge->dev.parent = dev; bridge->sysdata = pp; - bridge->busnr = pp->root_bus_nr; bridge->ops = &dw_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; ret = pci_scan_root_bus_bridge(bridge); if (ret) @@ -529,7 +518,7 @@ static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number == pp->root_bus_nr) { + if (pci_is_root_bus(bus->parent)) { type = PCIE_ATU_TYPE_CFG0; cpu_addr = pp->cfg0_base; cfg_size = pp->cfg0_size; @@ -585,13 +574,11 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, struct dw_pcie *pci = to_dw_pcie_from_pp(pp); /* If there is no link, then there is no device */ - if (bus->number != pp->root_bus_nr) { + if (!pci_is_root_bus(bus)) { if (!dw_pcie_link_up(pci)) return 0; - } - - /* Access only one slot on each root port */ - if (bus->number == pp->root_bus_nr && dev > 0) + } else if (dev > 0) + /* Access only one slot on each root port */ return 0; return 1; @@ -607,7 +594,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, return PCIBIOS_DEVICE_NOT_FOUND; } - if (bus->number == pp->root_bus_nr) + if (pci_is_root_bus(bus)) return dw_pcie_rd_own_conf(pp, where, size, val); return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); @@ -621,7 +608,7 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == pp->root_bus_nr) + if (pci_is_root_bus(bus)) return dw_pcie_wr_own_conf(pp, where, size, val); return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 73646b677aff..712456f6ce36 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -153,8 +153,7 @@ static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index c92496e36fd5..b723e0cc41fb 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 656e00f8fbeb..f911760dcc69 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -173,7 +173,6 @@ struct dw_pcie_host_ops { }; struct pcie_port { - u8 root_bus_nr; u64 cfg0_base; void __iomem *va_cfg0_base; u32 cfg0_size; diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c index 0ad4e07dd4c2..5ca86796d43a 100644 --- a/drivers/pci/controller/dwc/pcie-hisi.c +++ b/drivers/pci/controller/dwc/pcie-hisi.c @@ -10,15 +10,10 @@ */ #include <linux/interrupt.h> #include <linux/init.h> -#include <linux/mfd/syscon.h> -#include <linux/of_address.h> -#include <linux/of_pci.h> #include <linux/platform_device.h> -#include <linux/of_device.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> -#include <linux/regmap.h> #include "../../pci.h" #if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) @@ -118,220 +113,6 @@ const struct pci_ecam_ops hisi_pcie_ops = { #ifdef CONFIG_PCI_HISI -#include "pcie-designware.h" - -#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 -#define PCIE_HIP06_CTRL_OFF 0x1000 -#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) -#define PCIE_LTSSM_LINKUP_STATE 0x11 -#define PCIE_LTSSM_STATE_MASK 0x3F - -#define to_hisi_pcie(x) dev_get_drvdata((x)->dev) - -struct hisi_pcie; - -struct pcie_soc_ops { - int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); -}; - -struct hisi_pcie { - struct dw_pcie *pci; - struct regmap *subctrl; - u32 port_id; - const struct pcie_soc_ops *soc_ops; -}; - -/* HipXX PCIe host only supports 32-bit config access */ -static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, - u32 *val) -{ - u32 reg; - u32 reg_val; - void *walker = ®_val; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - walker += (where & 0x3); - reg = where & ~0x3; - reg_val = dw_pcie_readl_dbi(pci, reg); - - if (size == 1) - *val = *(u8 __force *) walker; - else if (size == 2) - *val = *(u16 __force *) walker; - else if (size == 4) - *val = reg_val; - else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -/* HipXX PCIe host only supports 32-bit config access */ -static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, - u32 val) -{ - u32 reg_val; - u32 reg; - void *walker = ®_val; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - walker += (where & 0x3); - reg = where & ~0x3; - if (size == 4) - dw_pcie_writel_dbi(pci, reg, val); - else if (size == 2) { - reg_val = dw_pcie_readl_dbi(pci, reg); - *(u16 __force *) walker = val; - dw_pcie_writel_dbi(pci, reg, reg_val); - } else if (size == 1) { - reg_val = dw_pcie_readl_dbi(pci, reg); - *(u8 __force *) walker = val; - dw_pcie_writel_dbi(pci, reg, reg_val); - } else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) -{ - u32 val; - - regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + - 0x100 * hisi_pcie->port_id, &val); - - return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); -} - -static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) -{ - struct dw_pcie *pci = hisi_pcie->pci; - u32 val; - - val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4); - - return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); -} - -static int hisi_pcie_link_up(struct dw_pcie *pci) -{ - struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci); - - return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); -} - -static const struct dw_pcie_host_ops hisi_pcie_host_ops = { - .rd_own_conf = hisi_pcie_cfg_read, - .wr_own_conf = hisi_pcie_cfg_write, -}; - -static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = hisi_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - u32 port_id; - - if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { - dev_err(dev, "failed to read port-id\n"); - return -EINVAL; - } - if (port_id > 3) { - dev_err(dev, "Invalid port-id: %d\n", port_id); - return -EINVAL; - } - hisi_pcie->port_id = port_id; - - pp->ops = &hisi_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = hisi_pcie_link_up, -}; - -static int hisi_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct hisi_pcie *hisi_pcie; - struct resource *reg; - int ret; - - hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); - if (!hisi_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - hisi_pcie->pci = pci; - - hisi_pcie->soc_ops = of_device_get_match_data(dev); - - hisi_pcie->subctrl = - syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); - if (IS_ERR(hisi_pcie->subctrl)) { - dev_err(dev, "cannot get subctrl base\n"); - return PTR_ERR(hisi_pcie->subctrl); - } - - reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - platform_set_drvdata(pdev, hisi_pcie); - - ret = hisi_add_pcie_port(hisi_pcie, pdev); - if (ret) - return ret; - - return 0; -} - -static struct pcie_soc_ops hip05_ops = { - &hisi_pcie_link_up_hip05 -}; - -static struct pcie_soc_ops hip06_ops = { - &hisi_pcie_link_up_hip06 -}; - -static const struct of_device_id hisi_pcie_of_match[] = { - { - .compatible = "hisilicon,hip05-pcie", - .data = (void *) &hip05_ops, - }, - { - .compatible = "hisilicon,hip06-pcie", - .data = (void *) &hip06_ops, - }, - {}, -}; - -static struct platform_driver hisi_pcie_driver = { - .probe = hisi_pcie_probe, - .driver = { - .name = "hisi-pcie", - .of_match_table = hisi_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(hisi_pcie_driver); - static int hisi_pcie_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 811b5c6d62ea..2a2835746077 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -304,7 +304,6 @@ static int histb_pcie_probe(struct platform_device *pdev) struct histb_pcie *hipcie; struct dw_pcie *pci; struct pcie_port *pp; - struct resource *res; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; enum of_gpio_flags of_flags; @@ -324,15 +323,13 @@ static int histb_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); - hipcie->ctrl = devm_ioremap_resource(dev, res); + hipcie->ctrl = devm_platform_ioremap_resource_byname(pdev, "control"); if (IS_ERR(hipcie->ctrl)) { dev_err(dev, "cannot get control reg base\n"); return PTR_ERR(hipcie->ctrl); } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc-dbi"); if (IS_ERR(pci->dbi_base)) { dev_err(dev, "cannot get rc-dbi base\n"); return PTR_ERR(pci->dbi_base); @@ -402,10 +399,8 @@ static int histb_pcie_probe(struct platform_device *pdev) if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "Failed to get MSI IRQ\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } hipcie->phy = devm_phy_get(dev, "phy"); diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 2d8dbb318087..c3b3a1d162b5 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -253,11 +253,9 @@ static int intel_pcie_get_resources(struct platform_device *pdev) struct intel_pcie_port *lpp = platform_get_drvdata(pdev); struct dw_pcie *pci = &lpp->pci; struct device *dev = pci->dev; - struct resource *res; int ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); @@ -291,8 +289,7 @@ static int intel_pcie_get_resources(struct platform_device *pdev) ret = of_pci_get_max_link_speed(dev->of_node); lpp->link_gen = ret < 0 ? 0 : ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app"); - lpp->app_base = devm_ioremap_resource(dev, res); + lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); if (IS_ERR(lpp->app_base)) return PTR_ERR(lpp->app_base); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index c19617a912bd..e496f51e0152 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Kirin Phone SoCs * * Copyright (C) 2017 HiSilicon Electronics Co., Ltd. - * http://www.huawei.com + * https://www.huawei.com * * Author: Xiaowei Song <songxiaowei@huawei.com> */ @@ -147,23 +147,18 @@ static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct resource *apb; - struct resource *phy; - struct resource *dbi; - - apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); - kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); + kirin_pcie->apb_base = + devm_platform_ioremap_resource_byname(pdev, "apb"); if (IS_ERR(kirin_pcie->apb_base)) return PTR_ERR(kirin_pcie->apb_base); - phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); + kirin_pcie->phy_base = + devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(kirin_pcie->phy_base)) return PTR_ERR(kirin_pcie->phy_base); - dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); + kirin_pcie->pci->dbi_base = + devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(kirin_pcie->pci->dbi_base)) return PTR_ERR(kirin_pcie->pci->dbi_base); @@ -455,11 +450,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci, if (IS_ENABLED(CONFIG_PCI_MSI)) { irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, - "failed to get MSI IRQ (%d)\n", irq); + if (irq < 0) return irq; - } pci->pp.msi_irq = irq; } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 138e1a2d21cc..3aac77a295ba 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <linux/types.h> +#include "../../pci.h" #include "pcie-designware.h" #define PCIE20_PARF_SYS_CTRL 0x00 @@ -39,13 +40,14 @@ #define L23_CLK_RMV_DIS BIT(2) #define L1_CLK_RMV_DIS BIT(1) -#define PCIE20_COMMAND_STATUS 0x04 -#define CMD_BME_VAL 0x4 -#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 -#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 - #define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) + #define PCIE20_PARF_PHY_REFCLK 0x4C +#define PHY_REFCLK_SSP_EN BIT(16) +#define PHY_REFCLK_USE_PAD BIT(12) + #define PCIE20_PARF_DBI_BASE_ADDR 0x168 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 @@ -66,8 +68,8 @@ #define CFG_BRIDGE_SB_INIT BIT(0) #define PCIE20_CAP 0x70 -#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) -#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) +#define PCIE20_DEVICE_CONTROL2_STATUS2 (PCIE20_CAP + PCI_EXP_DEVCTL2) +#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + PCI_EXP_LNKCAP) #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) #define PCIE_CAP_LINK1_VAL 0x2FD7F @@ -77,22 +79,36 @@ #define DBI_RO_WR_EN 1 #define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) ((x) << 24) #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 #define SLV_ADDR_SPACE_SZ 0x10000000 +#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 + #define DEVICE_TYPE_RC 0x4 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 +#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 struct qcom_pcie_resources_2_1_0 { - struct clk *iface_clk; - struct clk *core_clk; - struct clk *phy_clk; + struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; struct reset_control *pci_reset; struct reset_control *axi_reset; struct reset_control *ahb_reset; struct reset_control *por_reset; struct reset_control *phy_reset; + struct reset_control *ext_reset; struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; }; @@ -177,6 +193,7 @@ struct qcom_pcie { struct phy *phy; struct gpio_desc *reset; const struct qcom_pcie_ops *ops; + int gen; }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) @@ -234,17 +251,21 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (ret) return ret; - res->iface_clk = devm_clk_get(dev, "iface"); - if (IS_ERR(res->iface_clk)) - return PTR_ERR(res->iface_clk); + res->clks[0].id = "iface"; + res->clks[1].id = "core"; + res->clks[2].id = "phy"; + res->clks[3].id = "aux"; + res->clks[4].id = "ref"; - res->core_clk = devm_clk_get(dev, "core"); - if (IS_ERR(res->core_clk)) - return PTR_ERR(res->core_clk); + /* iface, core, phy are required */ + ret = devm_clk_bulk_get(dev, 3, res->clks); + if (ret < 0) + return ret; - res->phy_clk = devm_clk_get(dev, "phy"); - if (IS_ERR(res->phy_clk)) - return PTR_ERR(res->phy_clk); + /* aux, ref are optional */ + ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); + if (ret < 0) + return ret; res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); if (IS_ERR(res->pci_reset)) @@ -262,6 +283,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (IS_ERR(res->por_reset)) return PTR_ERR(res->por_reset); + res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); + if (IS_ERR(res->ext_reset)) + return PTR_ERR(res->ext_reset); + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); return PTR_ERR_OR_ZERO(res->phy_reset); } @@ -270,14 +295,13 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); reset_control_assert(res->pci_reset); reset_control_assert(res->axi_reset); reset_control_assert(res->ahb_reset); reset_control_assert(res->por_reset); - reset_control_assert(res->pci_reset); - clk_disable_unprepare(res->iface_clk); - clk_disable_unprepare(res->core_clk); - clk_disable_unprepare(res->phy_clk); + reset_control_assert(res->ext_reset); + reset_control_assert(res->phy_reset); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } @@ -286,6 +310,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + struct device_node *node = dev->of_node; u32 val; int ret; @@ -295,73 +320,85 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return ret; } - ret = reset_control_assert(res->ahb_reset); - if (ret) { - dev_err(dev, "cannot assert ahb reset\n"); - goto err_assert_ahb; - } - - ret = clk_prepare_enable(res->iface_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable iface clock\n"); - goto err_assert_ahb; - } - - ret = clk_prepare_enable(res->phy_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_phy; - } - - ret = clk_prepare_enable(res->core_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_core; - } - ret = reset_control_deassert(res->ahb_reset); if (ret) { dev_err(dev, "cannot deassert ahb reset\n"); goto err_deassert_ahb; } - /* enable PCIe clocks and resets */ - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); - val &= ~BIT(0); - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); - - /* enable external reference clock */ - val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val |= BIT(16); - writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + ret = reset_control_deassert(res->ext_reset); + if (ret) { + dev_err(dev, "cannot deassert ext reset\n"); + goto err_deassert_ext; + } ret = reset_control_deassert(res->phy_reset); if (ret) { dev_err(dev, "cannot deassert phy reset\n"); - return ret; + goto err_deassert_phy; } ret = reset_control_deassert(res->pci_reset); if (ret) { dev_err(dev, "cannot deassert pci reset\n"); - return ret; + goto err_deassert_pci; } ret = reset_control_deassert(res->por_reset); if (ret) { dev_err(dev, "cannot deassert por reset\n"); - return ret; + goto err_deassert_por; } ret = reset_control_deassert(res->axi_reset); if (ret) { dev_err(dev, "cannot deassert axi reset\n"); - return ret; + goto err_deassert_axi; } + ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); + if (ret) + goto err_clks; + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || + of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(120) | + PCS_SWING_TX_SWING_LOW(120), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + } + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + /* set TX termination offset */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + } + + /* enable external reference clock */ + val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); + val &= ~PHY_REFCLK_USE_PAD; + val |= PHY_REFCLK_SSP_EN; + writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + /* wait for clock acquisition */ usleep_range(1000, 1500); + if (pcie->gen == 1) { + val = readl(pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); + val |= PCI_EXP_LNKSTA_CLS_2_5GB; + writel(val, pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); + } /* Set the Max TLP size to 2K, instead of using default of 4K */ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, @@ -371,13 +408,19 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return 0; +err_clks: + reset_control_assert(res->axi_reset); +err_deassert_axi: + reset_control_assert(res->por_reset); +err_deassert_por: + reset_control_assert(res->pci_reset); +err_deassert_pci: + reset_control_assert(res->phy_reset); +err_deassert_phy: + reset_control_assert(res->ext_reset); +err_deassert_ext: + reset_control_assert(res->ahb_reset); err_deassert_ahb: - clk_disable_unprepare(res->core_clk); -err_clk_core: - clk_disable_unprepare(res->phy_clk); -err_clk_phy: - clk_disable_unprepare(res->iface_clk); -err_assert_ahb: regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); return ret; @@ -1047,15 +1090,15 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) pcie->parf + PCIE20_PARF_SYS_CTRL); writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); - writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); + writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); - val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; + val &= ~PCI_EXP_LNKCAP_ASPMS; writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); - writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + + writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + PCIE20_DEVICE_CONTROL2_STATUS2); return 0; @@ -1339,10 +1382,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_disable(dev); - return ret; - } + if (ret < 0) + goto err_pm_runtime_put; pci->dev = dev; pci->ops = &dw_pcie_ops; @@ -1358,8 +1399,11 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); - pcie->parf = devm_ioremap_resource(dev, res); + pcie->gen = of_pci_get_max_link_speed(pdev->dev.of_node); + if (pcie->gen < 0) + pcie->gen = 2; + + pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); if (IS_ERR(pcie->parf)) { ret = PTR_ERR(pcie->parf); goto err_pm_runtime_put; @@ -1372,8 +1416,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); - pcie->elbi = devm_ioremap_resource(dev, res); + pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); if (IS_ERR(pcie->elbi)) { ret = PTR_ERR(pcie->elbi); goto err_pm_runtime_put; @@ -1426,6 +1469,7 @@ err_pm_runtime_put: static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 }, { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 7d0cdfd8138b..62846562da0b 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -198,10 +198,9 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, int ret; pp->irq = platform_get_irq(pdev, 0); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (pp->irq < 0) return pp->irq; - } + ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, "spear1340-pcie", spear13xx_pcie); @@ -273,7 +272,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); ret = PTR_ERR(pci->dbi_base); goto fail_clk; } diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 92b77f7d8354..70498689d0c0 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -2189,10 +2189,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) } pp->irq = platform_get_irq_byname(pdev, "intr"); - if (pp->irq < 0) { - dev_err(dev, "Failed to get \"intr\" interrupt\n"); + if (pp->irq < 0) return pp->irq; - } pcie->bpmp = tegra_bpmp_get(dev); if (IS_ERR(pcie->bpmp)) diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index a5401a0b1e58..3a7f403b57b8 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -416,8 +416,7 @@ static int uniphier_pcie_probe(struct platform_device *pdev) if (IS_ERR(priv->pci.dbi_base)) return PTR_ERR(priv->pci.dbi_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link"); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource_byname(pdev, "link"); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c index a6d2190a6753..ee0156921ebc 100644 --- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c +++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c @@ -170,10 +170,9 @@ static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci) int ret; pcie->irq = platform_get_irq_byname(pdev, "intr"); - if (pcie->irq < 0) { - dev_err(dev, "Can't get 'intr' IRQ, errno = %d\n", pcie->irq); + if (pcie->irq < 0) return pcie->irq; - } + ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr, IRQF_SHARED, pdev->name, pcie); if (ret) { diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index 5907baa9b1f2..3adec419a45b 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -29,18 +29,15 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) { - struct mobiveil_pcie *pcie = bus->sysdata; - struct mobiveil_root_port *rp = &pcie->rp; - /* Only one device down on each root port */ - if ((bus->number == rp->root_bus_nr) && (devfn > 0)) + if (pci_is_root_bus(bus) && (devfn > 0)) return false; /* * Do not read more than one device on the bus directly * attached to RC */ - if ((bus->primary == rp->root_bus_nr) && (PCI_SLOT(devfn) > 0)) + if ((bus->primary == to_pci_host_bridge(bus->bridge)->busnr) && (PCI_SLOT(devfn) > 0)) return false; return true; @@ -61,7 +58,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, return NULL; /* RC config access */ - if (bus->number == rp->root_bus_nr) + if (pci_is_root_bus(bus)) return pcie->csr_axi_slave_base + where; /* @@ -522,10 +519,8 @@ static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie) mobiveil_pcie_enable_msi(pcie); rp->irq = platform_get_irq(pdev, 0); - if (rp->irq < 0) { - dev_err(dev, "failed to map IRQ: %d\n", rp->irq); + if (rp->irq < 0) return rp->irq; - } /* initialize the IRQ domains */ ret = mobiveil_pcie_init_irq_domain(pcie); @@ -569,8 +564,6 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) struct mobiveil_root_port *rp = &pcie->rp; struct pci_host_bridge *bridge = rp->bridge; struct device *dev = &pcie->pdev->dev; - struct pci_bus *bus; - struct pci_bus *child; int ret; ret = mobiveil_pcie_parse_dt(pcie); @@ -582,14 +575,6 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) if (!mobiveil_pcie_is_bridge(pcie)) return -ENODEV; - /* parse the host bridge base addresses from the device tree file */ - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "Getting bridge resources failed\n"); - return ret; - } - /* * configure all inbound and outbound windows and prepare the RC for * config access @@ -607,12 +592,8 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) } /* Initialize bridge */ - bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = rp->root_bus_nr; bridge->ops = &mobiveil_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; ret = mobiveil_bringup_link(pcie); if (ret) { @@ -620,17 +601,5 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) return ret; } - /* setup the kernel resources for the newly added PCIe root bus */ - ret = pci_scan_root_bus_bridge(bridge); - if (ret) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - - return 0; + return pci_host_probe(bridge); } diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h index 767e36a8522d..6082b8afbc31 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h @@ -149,7 +149,6 @@ struct mobiveil_rp_ops { }; struct mobiveil_root_port { - char root_bus_nr; void __iomem *config_axi_slave_base; /* endpoint config base */ struct resource *ob_io_res; struct mobiveil_rp_ops *ops; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 90ff291c24f0..1559f79e63b6 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -195,7 +195,6 @@ struct advk_pcie { DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); struct mutex msi_used_lock; u16 msi_msg; - int root_bus_nr; int link_gen; struct pci_bridge_emul bridge; struct gpio_desc *reset_gpio; @@ -641,7 +640,14 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, int devfn) { - if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) + if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0) + return false; + + /* + * If the link goes down after we check for link-up, nothing bad + * happens but the config access times out. + */ + if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie)) return false; return true; @@ -659,7 +665,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, return PCIBIOS_DEVICE_NOT_FOUND; } - if (bus->number == pcie->root_bus_nr) + if (pci_is_root_bus(bus)) return pci_bridge_emul_conf_read(&pcie->bridge, where, size, val); @@ -670,7 +676,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); reg &= ~PIO_CTRL_TYPE_MASK; - if (bus->primary == pcie->root_bus_nr) + if (pci_is_root_bus(bus->parent)) reg |= PCIE_CONFIG_RD_TYPE0; else reg |= PCIE_CONFIG_RD_TYPE1; @@ -688,8 +694,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, advk_writel(pcie, 1, PIO_START); ret = advk_pcie_wait_pio(pcie); - if (ret < 0) + if (ret < 0) { + *val = 0xffffffff; return PCIBIOS_SET_FAILED; + } advk_pcie_check_pio_status(pcie); @@ -715,7 +723,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (!advk_pcie_valid_device(pcie, bus, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == pcie->root_bus_nr) + if (pci_is_root_bus(bus)) return pci_bridge_emul_conf_write(&pcie->bridge, where, size, val); @@ -729,7 +737,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); reg &= ~PIO_CTRL_TYPE_MASK; - if (bus->primary == pcie->root_bus_nr) + if (pci_is_root_bus(bus->parent)) reg |= PCIE_CONFIG_WR_TYPE0; else reg |= PCIE_CONFIG_WR_TYPE1; @@ -1105,7 +1113,6 @@ static int advk_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct advk_pcie *pcie; - struct resource *res, *bus; struct pci_host_bridge *bridge; int ret, irq; @@ -1116,8 +1123,7 @@ static int advk_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pcie->base = devm_ioremap_resource(dev, res); + pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); @@ -1133,14 +1139,6 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, &bus); - if (ret) { - dev_err(dev, "Failed to parse resources\n"); - return ret; - } - pcie->root_bus_nr = bus->start; - pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, "reset-gpios", 0, GPIOD_OUT_LOW, @@ -1184,12 +1182,8 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } - bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = 0; bridge->ops = &advk_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; ret = pci_host_probe(bridge); if (ret < 0) { diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index 1b67564de7af..da3cd216da00 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -422,7 +422,6 @@ static int faraday_pci_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct faraday_pci_variant *variant = of_device_get_match_data(dev); - struct resource *regs; struct resource_entry *win; struct faraday_pci *p; struct resource *io; @@ -437,12 +436,7 @@ static int faraday_pci_probe(struct platform_device *pdev) if (!host) return -ENOMEM; - host->dev.parent = dev; host->ops = &faraday_pci_ops; - host->busnr = 0; - host->msi = NULL; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; p = pci_host_bridge_priv(host); host->sysdata = p; p->dev = dev; @@ -465,16 +459,10 @@ static int faraday_pci_probe(struct platform_device *pdev) return ret; } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - p->base = devm_ioremap_resource(dev, regs); + p->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p->base)) return PTR_ERR(p->base); - ret = pci_parse_request_of_pci_ranges(dev, &host->windows, - &host->dma_ranges, NULL); - if (ret) - return ret; - win = resource_list_first_type(&host->windows, IORESOURCE_IO); if (win) { io = win->res; diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index 953de57f6c57..6ce34a1deecb 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -21,39 +21,32 @@ static void gen_pci_unmap_cfg(void *ptr) } static struct pci_config_window *gen_pci_init(struct device *dev, - struct list_head *resources, const struct pci_ecam_ops *ops) + struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops) { int err; struct resource cfgres; - struct resource *bus_range = NULL; + struct resource_entry *bus; struct pci_config_window *cfg; - /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range); - if (err) - return ERR_PTR(err); - err = of_address_to_resource(dev->of_node, 0, &cfgres); if (err) { dev_err(dev, "missing \"reg\" property\n"); - goto err_out; + return ERR_PTR(err); } - cfg = pci_ecam_create(dev, &cfgres, bus_range, ops); - if (IS_ERR(cfg)) { - err = PTR_ERR(cfg); - goto err_out; - } + bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (!bus) + return ERR_PTR(-ENODEV); + + cfg = pci_ecam_create(dev, &cfgres, bus->res, ops); + if (IS_ERR(cfg)) + return cfg; err = devm_add_action_or_reset(dev, gen_pci_unmap_cfg, cfg); - if (err) { - goto err_out; - } - return cfg; + if (err) + return ERR_PTR(err); -err_out: - pci_free_resource_list(resources); - return ERR_PTR(err); + return cfg; } int pci_host_common_probe(struct platform_device *pdev) @@ -61,9 +54,7 @@ int pci_host_common_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct pci_config_window *cfg; - struct list_head resources; const struct pci_ecam_ops *ops; - int ret; ops = of_device_get_match_data(&pdev->dev); if (!ops) @@ -76,7 +67,7 @@ int pci_host_common_probe(struct platform_device *pdev) of_pci_check_probe_only(); /* Parse and map our Configuration Space windows */ - cfg = gen_pci_init(dev, &resources, ops); + cfg = gen_pci_init(dev, bridge, ops); if (IS_ERR(cfg)) return PTR_ERR(cfg); @@ -84,32 +75,22 @@ int pci_host_common_probe(struct platform_device *pdev) if (!pci_has_flag(PCI_PROBE_ONLY)) pci_add_flags(PCI_REASSIGN_ALL_BUS); - list_splice_init(&resources, &bridge->windows); - bridge->dev.parent = dev; bridge->sysdata = cfg; - bridge->busnr = cfg->busr.start; bridge->ops = (struct pci_ops *)&ops->pci_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - ret = pci_host_probe(bridge); - if (ret < 0) { - pci_free_resource_list(&resources); - return ret; - } + platform_set_drvdata(pdev, bridge); - platform_set_drvdata(pdev, bridge->bus); - return 0; + return pci_host_probe(bridge); } EXPORT_SYMBOL_GPL(pci_host_common_probe); int pci_host_common_remove(struct platform_device *pdev) { - struct pci_bus *bus = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = platform_get_drvdata(pdev); pci_lock_rescan_remove(); - pci_stop_root_bus(bus); - pci_remove_root_bus(bus); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); pci_unlock_rescan_remove(); return 0; diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index bf40ff09c99d..fc4c3a15e570 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -938,8 +938,9 @@ out: * * Return: 0 on success, -errno on failure */ -int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len, - unsigned int block_id, unsigned int *bytes_returned) +static int hv_read_config_block(struct pci_dev *pdev, void *buf, + unsigned int len, unsigned int block_id, + unsigned int *bytes_returned) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, @@ -1018,8 +1019,8 @@ static void hv_pci_write_config_compl(void *context, struct pci_response *resp, * * Return: 0 on success, -errno on failure */ -int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len, - unsigned int block_id) +static int hv_write_config_block(struct pci_dev *pdev, void *buf, + unsigned int len, unsigned int block_id) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, @@ -1087,9 +1088,9 @@ int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len, * * Return: 0 on success, -errno on failure */ -int hv_register_block_invalidate(struct pci_dev *pdev, void *context, - void (*block_invalidate)(void *context, - u64 block_mask)) +static int hv_register_block_invalidate(struct pci_dev *pdev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, @@ -2759,10 +2760,8 @@ static int hv_pci_enter_d0(struct hv_device *hdev) struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct pci_packet *pkt; - bool retry = true; int ret; -enter_d0_retry: /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region @@ -2789,38 +2788,6 @@ enter_d0_retry: if (ret) goto exit; - /* - * In certain case (Kdump) the pci device of interest was - * not cleanly shut down and resource is still held on host - * side, the host could return invalid device status. - * We need to explicitly request host to release the resource - * and try to enter D0 again. - */ - if (comp_pkt.completion_status < 0 && retry) { - retry = false; - - dev_err(&hdev->device, "Retrying D0 Entry\n"); - - /* - * Hv_pci_bus_exit() calls hv_send_resource_released() - * to free up resources of its child devices. - * In the kdump kernel we need to set the - * wslot_res_allocated to 255 so it scans all child - * devices to release resources allocated in the - * normal kernel before panic happened. - */ - hbus->wslot_res_allocated = 255; - - ret = hv_pci_bus_exit(hdev, true); - - if (ret == 0) { - kfree(pkt); - goto enter_d0_retry; - } - dev_err(&hdev->device, - "Retrying D0 failed with ret %d\n", ret); - } - if (comp_pkt.completion_status < 0) { dev_err(&hdev->device, "PCI Pass-through VSP failed D0 Entry with status %x\n", @@ -3058,6 +3025,7 @@ static int hv_pci_probe(struct hv_device *hdev, struct hv_pcibus_device *hbus; u16 dom_req, dom; char *name; + bool enter_d0_retry = true; int ret; /* @@ -3178,11 +3146,47 @@ static int hv_pci_probe(struct hv_device *hdev, if (ret) goto free_fwnode; +retry: ret = hv_pci_query_relations(hdev); if (ret) goto free_irq_domain; ret = hv_pci_enter_d0(hdev); + /* + * In certain case (Kdump) the pci device of interest was + * not cleanly shut down and resource is still held on host + * side, the host could return invalid device status. + * We need to explicitly request host to release the resource + * and try to enter D0 again. + * Since the hv_pci_bus_exit() call releases structures + * of all its child devices, we need to start the retry from + * hv_pci_query_relations() call, requesting host to send + * the synchronous child device relations message before this + * information is needed in hv_send_resources_allocated() + * call later. + */ + if (ret == -EPROTO && enter_d0_retry) { + enter_d0_retry = false; + + dev_err(&hdev->device, "Retrying D0 Entry\n"); + + /* + * Hv_pci_bus_exit() calls hv_send_resources_released() + * to free up resources of its child devices. + * In the kdump kernel we need to set the + * wslot_res_allocated to 255 so it scans all child + * devices to release resources allocated in the + * normal kernel before panic happened. + */ + hbus->wslot_res_allocated = 255; + ret = hv_pci_bus_exit(hdev, true); + + if (ret == 0) + goto retry; + + dev_err(&hdev->device, + "Retrying D0 failed with ret %d\n", ret); + } if (ret) goto free_irq_domain; diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 459009c8a4a0..719c19fe2bfb 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -37,11 +37,11 @@ static void bridge_class_quirk(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_0, bridge_class_quirk); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_1, bridge_class_quirk); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_2, bridge_class_quirk); static void system_bus_quirk(struct pci_dev *pdev) @@ -218,14 +218,6 @@ static int loongson_pci_probe(struct platform_device *pdev) } } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "failed to get bridge resources\n"); - return err; - } - - bridge->dev.parent = dev; bridge->sysdata = priv; bridge->ops = &loongson_pci_ops; bridge->map_irq = loongson_map_irq; diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 153a64676bc9..c39978b750ec 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -71,7 +71,6 @@ struct mvebu_pcie { struct platform_device *pdev; struct mvebu_pcie_port *ports; struct msi_controller *msi; - struct list_head resources; struct resource io; struct resource realio; struct resource mem; @@ -105,6 +104,7 @@ struct mvebu_pcie_port { struct mvebu_pcie_window memwin; struct mvebu_pcie_window iowin; u32 saved_pcie_stat; + struct resource regs; }; static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) @@ -149,7 +149,9 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) /* * Setup PCIE BARs and Address Decode Wins: - * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks + * BAR[0] -> internal registers (needed for MSI) + * BAR[1] -> covers all DRAM banks + * BAR[2] -> Disabled * WIN[0-3] -> DRAM bank[0-3] */ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) @@ -203,6 +205,12 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, PCIE_BAR_CTRL_OFF(1)); + + /* + * Point BAR[0] to the device's internal registers. + */ + mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0)); + mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); } static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) @@ -708,14 +716,13 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, struct device_node *np, struct mvebu_pcie_port *port) { - struct resource regs; int ret = 0; - ret = of_address_to_resource(np, 0, ®s); + ret = of_address_to_resource(np, 0, &port->regs); if (ret) return (void __iomem *)ERR_PTR(ret); - return devm_ioremap_resource(&pdev->dev, ®s); + return devm_ioremap_resource(&pdev->dev, &port->regs); } #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) @@ -961,17 +968,16 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); int ret; - INIT_LIST_HEAD(&pcie->resources); - /* Get the bus range */ ret = of_pci_parse_bus_range(np, &pcie->busn); if (ret) { dev_err(dev, "failed to parse bus-range property: %d\n", ret); return ret; } - pci_add_resource(&pcie->resources, &pcie->busn); + pci_add_resource(&bridge->windows, &pcie->busn); /* Get the PCIe memory aperture */ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); @@ -981,7 +987,7 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) } pcie->mem.name = "PCI MEM"; - pci_add_resource(&pcie->resources, &pcie->mem); + pci_add_resource(&bridge->windows, &pcie->mem); /* Get the PCIe IO aperture */ mvebu_mbus_get_pcie_io_aperture(&pcie->io); @@ -994,10 +1000,10 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) resource_size(&pcie->io) - 1); pcie->realio.name = "PCI I/O"; - pci_add_resource(&pcie->resources, &pcie->realio); + pci_add_resource(&bridge->windows, &pcie->realio); } - return devm_request_pci_bus_resources(dev, &pcie->resources); + return devm_request_pci_bus_resources(dev, &bridge->windows); } /* @@ -1118,13 +1124,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev) pcie->nports = i; - list_splice_init(&pcie->resources, &bridge->windows); - bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = 0; bridge->ops = &mvebu_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; bridge->align_resource = mvebu_pcie_align_resource; bridge->msi = pcie->msi; diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c index 326171cb1a97..c9530038ca9a 100644 --- a/drivers/pci/controller/pci-rcar-gen2.c +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -98,22 +98,17 @@ struct rcar_pci_priv { void __iomem *reg; struct resource mem_res; struct resource *cfg_res; - unsigned busnr; int irq; - unsigned long window_size; - unsigned long window_addr; - unsigned long window_pci; }; /* PCI configuration space operations */ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, int where) { - struct pci_sys_data *sys = bus->sysdata; - struct rcar_pci_priv *priv = sys->private_data; + struct rcar_pci_priv *priv = bus->sysdata; int slot, val; - if (sys->busnr != bus->number || PCI_FUNC(devfn)) + if (!pci_is_root_bus(bus) || PCI_FUNC(devfn)) return NULL; /* Only one EHCI/OHCI device built-in */ @@ -132,20 +127,6 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, return priv->reg + (slot >> 1) * 0x100 + where; } -/* PCI interrupt mapping */ -static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - struct pci_sys_data *sys = dev->bus->sysdata; - struct rcar_pci_priv *priv = sys->private_data; - int irq; - - irq = of_irq_parse_and_map_pci(dev, slot, pin); - if (!irq) - irq = priv->irq; - - return irq; -} - #ifdef CONFIG_PCI_DEBUG /* if debug enabled, then attach an error handler irq to the bridge */ @@ -189,19 +170,33 @@ static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { } #endif /* PCI host controller setup */ -static int rcar_pci_setup(int nr, struct pci_sys_data *sys) +static void rcar_pci_setup(struct rcar_pci_priv *priv) { - struct rcar_pci_priv *priv = sys->private_data; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(priv); struct device *dev = priv->dev; void __iomem *reg = priv->reg; + struct resource_entry *entry; + unsigned long window_size; + unsigned long window_addr; + unsigned long window_pci; u32 val; - int ret; + + entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM); + if (!entry) { + window_addr = 0x40000000; + window_pci = 0x40000000; + window_size = SZ_1G; + } else { + window_addr = entry->res->start; + window_pci = entry->res->start - entry->offset; + window_size = resource_size(entry->res); + } pm_runtime_enable(dev); pm_runtime_get_sync(dev); val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); - dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val); + dev_info(dev, "PCI: revision %x\n", val); /* Disable Direct Power Down State and assert reset */ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; @@ -214,7 +209,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST); /* Setup PCIAHB window1 size */ - switch (priv->window_size) { + switch (window_size) { case SZ_2G: val |= RCAR_USBCTR_PCIAHB_WIN1_2G; break; @@ -226,8 +221,8 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) break; default: pr_warn("unknown window size %ld - defaulting to 256M\n", - priv->window_size); - priv->window_size = SZ_256M; + window_size); + window_size = SZ_256M; /* fall-through */ case SZ_256M: val |= RCAR_USBCTR_PCIAHB_WIN1_256M; @@ -245,7 +240,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG); /* PCI-AHB mapping */ - iowrite32(priv->window_addr | RCAR_PCIAHB_PREFETCH16, + iowrite32(window_addr | RCAR_PCIAHB_PREFETCH16, reg + RCAR_PCIAHB_WIN1_CTR_REG); /* AHB-PCI mapping: OHCI/EHCI registers */ @@ -256,7 +251,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG, reg + RCAR_AHBPCI_WIN1_CTR_REG); /* Set PCI-AHB Window1 address */ - iowrite32(priv->window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, + iowrite32(window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, reg + PCI_BASE_ADDRESS_1); /* Set AHB-PCI bridge PCI communication area address */ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; @@ -271,18 +266,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME, reg + RCAR_PCI_INT_ENABLE_REG); - if (priv->irq > 0) - rcar_pci_setup_errirq(priv); - - /* Add PCI resources */ - pci_add_resource(&sys->resources, &priv->mem_res); - ret = devm_request_pci_bus_resources(dev, &sys->resources); - if (ret < 0) - return ret; - - /* Setup bus number based on platform device id / of bus-range */ - sys->busnr = priv->busnr; - return 1; + rcar_pci_setup_errirq(priv); } static struct pci_ops rcar_pci_ops = { @@ -291,55 +275,20 @@ static struct pci_ops rcar_pci_ops = { .write = pci_generic_config_write, }; -static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, - struct device_node *np) -{ - struct device *dev = pci->dev; - struct of_pci_range range; - struct of_pci_range_parser parser; - int index = 0; - - /* Failure to parse is ok as we fall back to defaults */ - if (of_pci_dma_range_parser_init(&parser, np)) - return 0; - - /* Get the dma-ranges from DT */ - for_each_of_pci_range(&parser, &range) { - /* Hardware only allows one inbound 32-bit range */ - if (index) - return -EINVAL; - - pci->window_addr = (unsigned long)range.cpu_addr; - pci->window_pci = (unsigned long)range.pci_addr; - pci->window_size = (unsigned long)range.size; - - /* Catch HW limitations */ - if (!(range.flags & IORESOURCE_PREFETCH)) { - dev_err(dev, "window must be prefetchable\n"); - return -EINVAL; - } - if (pci->window_addr) { - u32 lowaddr = 1 << (ffs(pci->window_addr) - 1); - - if (lowaddr < pci->window_size) { - dev_err(dev, "invalid window size/addr\n"); - return -EINVAL; - } - } - index++; - } - - return 0; -} - static int rcar_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *cfg_res, *mem_res; struct rcar_pci_priv *priv; + struct pci_host_bridge *bridge; void __iomem *reg; - struct hw_pci hw; - void *hw_private[1]; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*priv)); + if (!bridge) + return -ENOMEM; + + priv = pci_host_bridge_priv(bridge); + bridge->sysdata = priv; cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(dev, cfg_res); @@ -353,10 +302,6 @@ static int rcar_pci_probe(struct platform_device *pdev) if (mem_res->start & 0xFFFF) return -EINVAL; - priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - priv->mem_res = *mem_res; priv->cfg_res = cfg_res; @@ -369,44 +314,13 @@ static int rcar_pci_probe(struct platform_device *pdev) return priv->irq; } - /* default window addr and size if not specified in DT */ - priv->window_addr = 0x40000000; - priv->window_pci = 0x40000000; - priv->window_size = SZ_1G; - - if (dev->of_node) { - struct resource busnr; - int ret; - - ret = of_pci_parse_bus_range(dev->of_node, &busnr); - if (ret < 0) { - dev_err(dev, "failed to parse bus-range\n"); - return ret; - } - - priv->busnr = busnr.start; - if (busnr.end != busnr.start) - dev_warn(dev, "only one bus number supported\n"); - - ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node); - if (ret < 0) { - dev_err(dev, "failed to parse dma-range\n"); - return ret; - } - } else { - priv->busnr = pdev->id; - } + bridge->ops = &rcar_pci_ops; + + pci_add_flags(PCI_REASSIGN_ALL_BUS); + + rcar_pci_setup(priv); - hw_private[0] = priv; - memset(&hw, 0, sizeof(hw)); - hw.nr_controllers = ARRAY_SIZE(hw_private); - hw.io_optional = 1; - hw.private_data = hw_private; - hw.map_irq = rcar_pci_map_irq; - hw.ops = &rcar_pci_ops; - hw.setup = rcar_pci_setup; - pci_common_init_dev(dev, &hw); - return 0; + return pci_host_probe(bridge); } static const struct of_device_id rcar_pci_of_match[] = { diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 235b456698fc..c1d34353c29b 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -181,13 +181,6 @@ #define AFI_PEXBIAS_CTRL_0 0x168 -#define RP_PRIV_XP_DL 0x00000494 -#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1) - -#define RP_RX_HDR_LIMIT 0x00000e00 -#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8) -#define RP_RX_HDR_LIMIT_PW (0x0e << 8) - #define RP_ECTL_2_R1 0x00000e84 #define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff @@ -323,7 +316,6 @@ struct tegra_pcie_soc { bool program_uphy; bool update_clamp_threshold; bool program_deskew_time; - bool raw_violation_fixup; bool update_fc_timer; bool has_cache_bars; struct { @@ -659,23 +651,6 @@ static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port) writel(value, port->base + RP_VEND_CTL0); } - /* Fixup for read after write violation. */ - if (soc->raw_violation_fixup) { - value = readl(port->base + RP_RX_HDR_LIMIT); - value &= ~RP_RX_HDR_LIMIT_PW_MASK; - value |= RP_RX_HDR_LIMIT_PW; - writel(value, port->base + RP_RX_HDR_LIMIT); - - value = readl(port->base + RP_PRIV_XP_DL); - value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD; - writel(value, port->base + RP_PRIV_XP_DL); - - value = readl(port->base + RP_VEND_XP); - value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; - value |= soc->update_fc_threshold; - writel(value, port->base + RP_VEND_XP); - } - if (soc->update_fc_timer) { value = readl(port->base + RP_VEND_XP); value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; @@ -1462,7 +1437,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); - struct resource *pads, *afi, *res; + struct resource *res; const struct tegra_pcie_soc *soc = pcie->soc; int err; @@ -1486,15 +1461,13 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) } } - pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads"); - pcie->pads = devm_ioremap_resource(dev, pads); + pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads"); if (IS_ERR(pcie->pads)) { err = PTR_ERR(pcie->pads); goto phys_put; } - afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi"); - pcie->afi = devm_ioremap_resource(dev, afi); + pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi"); if (IS_ERR(pcie->afi)) { err = PTR_ERR(pcie->afi); goto phys_put; @@ -1520,10 +1493,8 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) /* request interrupt */ err = platform_get_irq_byname(pdev, "intr"); - if (err < 0) { - dev_err(dev, "failed to get IRQ: %d\n", err); + if (err < 0) goto phys_put; - } pcie->irq = err; @@ -1738,10 +1709,8 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) } err = platform_get_irq_byname(pdev, "msi"); - if (err < 0) { - dev_err(dev, "failed to get IRQ: %d\n", err); + if (err < 0) goto free_irq_domain; - } msi->irq = err; @@ -2025,7 +1994,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) pcie->supplies[i++].supply = "hvdd-pex"; pcie->supplies[i++].supply = "vddio-pexctl-aud"; } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) { - pcie->num_supplies = 6; + pcie->num_supplies = 3; pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, sizeof(*pcie->supplies), @@ -2033,14 +2002,11 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) if (!pcie->supplies) return -ENOMEM; - pcie->supplies[i++].supply = "avdd-pll-uerefe"; pcie->supplies[i++].supply = "hvddio-pex"; pcie->supplies[i++].supply = "dvddio-pex"; - pcie->supplies[i++].supply = "dvdd-pex-pll"; - pcie->supplies[i++].supply = "hvdd-pex-pll-e"; pcie->supplies[i++].supply = "vddio-pex-ctl"; } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { - pcie->num_supplies = 7; + pcie->num_supplies = 4; pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), @@ -2050,11 +2016,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) pcie->supplies[i++].supply = "avddio-pex"; pcie->supplies[i++].supply = "dvddio-pex"; - pcie->supplies[i++].supply = "avdd-pex-pll"; pcie->supplies[i++].supply = "hvdd-pex"; - pcie->supplies[i++].supply = "hvdd-pex-pll-e"; pcie->supplies[i++].supply = "vddio-pex-ctl"; - pcie->supplies[i++].supply = "avdd-pll-erefe"; } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { bool need_pexa = false, need_pexb = false; @@ -2416,7 +2379,6 @@ static const struct tegra_pcie_soc tegra20_pcie = { .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = true, .ectl.enable = false, @@ -2446,7 +2408,6 @@ static const struct tegra_pcie_soc tegra30_pcie = { .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2459,8 +2420,6 @@ static const struct tegra_pcie_soc tegra124_pcie = { .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x44ac44ac, - /* FC threshold is bit[25:18] */ - .update_fc_threshold = 0x03fc0000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2470,7 +2429,6 @@ static const struct tegra_pcie_soc tegra124_pcie = { .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = false, - .raw_violation_fixup = true, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2494,7 +2452,6 @@ static const struct tegra_pcie_soc tegra210_pcie = { .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = true, - .raw_violation_fixup = false, .update_fc_timer = true, .has_cache_bars = false, .ectl = { @@ -2536,7 +2493,6 @@ static const struct tegra_pcie_soc tegra186_pcie = { .program_uphy = false, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2670,8 +2626,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct pci_host_bridge *host; struct tegra_pcie *pcie; - struct pci_bus *child; - struct resource *bus; int err; host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); @@ -2686,12 +2640,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) INIT_LIST_HEAD(&pcie->ports); pcie->dev = dev; - err = pci_parse_request_of_pci_ranges(dev, &host->windows, NULL, &bus); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - err = tegra_pcie_parse_dt(pcie); if (err < 0) return err; @@ -2715,26 +2663,15 @@ static int tegra_pcie_probe(struct platform_device *pdev) goto pm_runtime_put; } - host->busnr = bus->start; - host->dev.parent = &pdev->dev; host->ops = &tegra_pcie_ops; host->map_irq = tegra_pcie_map_irq; - host->swizzle_irq = pci_common_swizzle; - err = pci_scan_root_bus_bridge(host); + err = pci_host_probe(host); if (err < 0) { dev_err(dev, "failed to register host: %d\n", err); goto pm_runtime_put; } - pci_bus_size_bridges(host->bus); - pci_bus_assign_resources(host->bus); - - list_for_each_entry(child, &host->bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(host->bus); - if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_pcie_debugfs_init(pcie); if (err < 0) diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index 3681e5af3878..1f54334f09f7 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -239,7 +239,6 @@ struct v3_pci { struct device *dev; void __iomem *base; void __iomem *config_base; - struct pci_bus *bus; u32 config_mem; u32 non_pre_mem; u32 pre_mem; @@ -585,8 +584,6 @@ static int v3_pci_setup_resource(struct v3_pci *v3, } break; case IORESOURCE_BUS: - dev_dbg(dev, "BUS %pR\n", win->res); - host->busnr = win->res->start; break; default: dev_info(dev, "Unknown resource type %lu\n", @@ -724,12 +721,7 @@ static int v3_pci_probe(struct platform_device *pdev) if (!host) return -ENOMEM; - host->dev.parent = dev; host->ops = &v3_pci_ops; - host->busnr = 0; - host->msi = NULL; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; v3 = pci_host_bridge_priv(host); host->sysdata = v3; v3->dev = dev; @@ -770,17 +762,11 @@ static int v3_pci_probe(struct platform_device *pdev) if (IS_ERR(v3->config_base)) return PTR_ERR(v3->config_base); - ret = pci_parse_request_of_pci_ranges(dev, &host->windows, - &host->dma_ranges, NULL); - if (ret) - return ret; - /* Get and request error IRQ resource */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "unable to obtain PCIv3 error IRQ\n"); + if (irq < 0) return irq; - } + ret = devm_request_irq(dev, irq, v3_irq, 0, "PCIv3 error", v3); if (ret < 0) { @@ -904,17 +890,7 @@ static int v3_pci_probe(struct platform_device *pdev) val |= V3_SYSTEM_M_LOCK; writew(val, v3->base + V3_SYSTEM); - ret = pci_scan_root_bus_bridge(host); - if (ret) { - dev_err(dev, "failed to register host: %d\n", ret); - return ret; - } - v3->bus = host->bus; - - pci_bus_assign_resources(v3->bus); - pci_bus_add_devices(v3->bus); - - return 0; + return pci_host_probe(host); } static const struct of_device_id v3_pci_of_match[] = { diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c index b911359b6d81..653d5d0ecf81 100644 --- a/drivers/pci/controller/pci-versatile.c +++ b/drivers/pci/controller/pci-versatile.c @@ -67,23 +67,20 @@ static int versatile_pci_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; struct resource_entry *entry; - int ret, i, myslot = -1, mem = 1; + int i, myslot = -1, mem = 1; u32 val; void __iomem *local_pci_cfg_base; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - versatile_pci_base = devm_ioremap_resource(dev, res); + versatile_pci_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(versatile_pci_base)) return PTR_ERR(versatile_pci_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - versatile_cfg_base[0] = devm_ioremap_resource(dev, res); + versatile_cfg_base[0] = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(versatile_cfg_base[0])) return PTR_ERR(versatile_cfg_base[0]); @@ -92,11 +89,6 @@ static int versatile_pci_probe(struct platform_device *pdev) if (IS_ERR(versatile_cfg_base[1])) return PTR_ERR(versatile_cfg_base[1]); - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - NULL, NULL); - if (ret) - return ret; - resource_list_for_each_entry(entry, &bridge->windows) { if (resource_type(entry->res) == IORESOURCE_MEM) { writel(entry->res->start >> 28, PCI_IMAP(mem)); @@ -154,28 +146,11 @@ static int versatile_pci_probe(struct platform_device *pdev) */ writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE); - pci_add_flags(PCI_ENABLE_PROC_DOMAINS); pci_add_flags(PCI_REASSIGN_ALL_BUS); - bridge->dev.parent = dev; - bridge->sysdata = NULL; - bridge->busnr = 0; bridge->ops = &pci_versatile_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; + return pci_host_probe(bridge); } static const struct of_device_id versatile_pci_of_match[] = { diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index f4c02da84e59..02271c6d17a1 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -478,8 +478,6 @@ static int xgene_msi_probe(struct platform_device *pdev) for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { virt_msir = platform_get_irq(pdev, irq_index); if (virt_msir < 0) { - dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", - irq_index); rc = virt_msir; goto error; } diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index d1efa8ffbae1..8e0db84f089d 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -355,8 +355,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port, if (IS_ERR(port->csr_base)) return PTR_ERR(port->csr_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); - port->cfg_base = devm_ioremap_resource(dev, res); + port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg"); if (IS_ERR(port->cfg_base)) return PTR_ERR(port->cfg_base); port->cfg_addr = res->start; @@ -591,7 +590,6 @@ static int xgene_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *dn = dev->of_node; struct xgene_pcie_port *port; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; int ret; @@ -616,33 +614,14 @@ static int xgene_pcie_probe(struct platform_device *pdev) if (ret) return ret; - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - ret = xgene_pcie_setup(port); if (ret) return ret; - bridge->dev.parent = dev; bridge->sysdata = port; - bridge->busnr = 0; bridge->ops = &xgene_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; + return pci_host_probe(bridge); } static const struct of_device_id xgene_pcie_match_table[] = { diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index 16d938920ca5..e1636f7714ca 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -228,8 +228,7 @@ static int altera_msi_probe(struct platform_device *pdev) mutex_init(&msi->lock); msi->pdev = pdev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); - msi->csr_base = devm_ioremap_resource(&pdev->dev, res); + msi->csr_base = devm_platform_ioremap_resource_byname(pdev, "csr"); if (IS_ERR(msi->csr_base)) { dev_err(&pdev->dev, "failed to map csr memory\n"); return PTR_ERR(msi->csr_base); @@ -256,7 +255,6 @@ static int altera_msi_probe(struct platform_device *pdev) msi->irq = platform_get_irq(pdev, 0); if (msi->irq < 0) { - dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq); ret = msi->irq; goto err; } diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 24cb1c331058..523bd928b380 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -694,29 +694,23 @@ static void altera_pcie_irq_teardown(struct altera_pcie *pcie) static int altera_pcie_parse_dt(struct altera_pcie *pcie) { - struct device *dev = &pcie->pdev->dev; struct platform_device *pdev = pcie->pdev; - struct resource *cra; - struct resource *hip; - cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); - pcie->cra_base = devm_ioremap_resource(dev, cra); + pcie->cra_base = devm_platform_ioremap_resource_byname(pdev, "Cra"); if (IS_ERR(pcie->cra_base)) return PTR_ERR(pcie->cra_base); if (pcie->pcie_data->version == ALTERA_PCIE_V2) { - hip = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Hip"); - pcie->hip_base = devm_ioremap_resource(&pdev->dev, hip); + pcie->hip_base = + devm_platform_ioremap_resource_byname(pdev, "Hip"); if (IS_ERR(pcie->hip_base)) return PTR_ERR(pcie->hip_base); } /* setup IRQ */ pcie->irq = platform_get_irq(pdev, 0); - if (pcie->irq < 0) { - dev_err(dev, "failed to get IRQ: %d\n", pcie->irq); + if (pcie->irq < 0) return pcie->irq; - } irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); return 0; @@ -773,8 +767,6 @@ static int altera_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct altera_pcie *pcie; - struct pci_bus *bus; - struct pci_bus *child; struct pci_host_bridge *bridge; int ret; const struct of_device_id *match; @@ -799,13 +791,6 @@ static int altera_pcie_probe(struct platform_device *pdev) return ret; } - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "Failed add resources\n"); - return ret; - } - ret = altera_pcie_init_irq_domain(pcie); if (ret) { dev_err(dev, "Failed creating IRQ Domain\n"); @@ -818,27 +803,11 @@ static int altera_pcie_probe(struct platform_device *pdev) cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); altera_pcie_host_init(pcie); - bridge->dev.parent = dev; bridge->sysdata = pcie; bridge->busnr = pcie->root_bus_nr; bridge->ops = &altera_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - - /* Configure PCI Express setting. */ - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - return ret; + return pci_host_probe(bridge); } static int altera_pcie_remove(struct platform_device *pdev) diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 7730ea845ff2..85fa7d54f11f 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -172,7 +172,6 @@ struct brcm_pcie { struct device *dev; void __iomem *base; struct clk *clk; - struct pci_bus *root_bus; struct device_node *np; bool ssc; int gen; @@ -919,9 +918,10 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie) static int brcm_pcie_remove(struct platform_device *pdev) { struct brcm_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); - pci_stop_root_bus(pcie->root_bus); - pci_remove_root_bus(pcie->root_bus); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); __brcm_pcie_remove(pcie); return 0; @@ -933,8 +933,6 @@ static int brcm_pcie_probe(struct platform_device *pdev) struct pci_host_bridge *bridge; struct device_node *fw_np; struct brcm_pcie *pcie; - struct pci_bus *child; - struct resource *res; int ret; /* @@ -959,8 +957,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie->dev = &pdev->dev; pcie->np = np; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pcie->base = devm_ioremap_resource(&pdev->dev, res); + pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); @@ -973,11 +970,6 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc"); - ret = pci_parse_request_of_pci_ranges(pcie->dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - ret = clk_prepare_enable(pcie->clk); if (ret) { dev_err(&pdev->dev, "could not enable clock\n"); @@ -997,27 +989,12 @@ static int brcm_pcie_probe(struct platform_device *pdev) } } - bridge->dev.parent = &pdev->dev; - bridge->busnr = 0; bridge->ops = &brcm_pcie_ops; bridge->sysdata = pcie; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) { - dev_err(pcie->dev, "Scanning root bridge failed\n"); - goto fail; - } - - pci_assign_unassigned_bus_resources(bridge->bus); - list_for_each_entry(child, &bridge->bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bridge->bus); platform_set_drvdata(pdev, pcie); - pcie->root_bus = bridge->bus; - return 0; + return pci_host_probe(bridge); fail: __brcm_pcie_remove(pcie); return ret; diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index ff0a81a632a1..a956b0c18bd1 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -95,20 +95,14 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) if (IS_ERR(pcie->phy)) return PTR_ERR(pcie->phy); - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "unable to get PCI host bridge resources\n"); - return ret; - } - /* PAXC doesn't support legacy IRQs, skip mapping */ switch (pcie->type) { case IPROC_PCIE_PAXC: case IPROC_PCIE_PAXC_V2: + pcie->map_irq = 0; break; default: - pcie->map_irq = of_irq_parse_and_map_pci; + break; } ret = iproc_pcie_setup(pcie, &bridge->windows); diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 8c7f875acf7f..905e93808243 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -1470,7 +1470,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { struct device *dev; int ret; - struct pci_bus *child; struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); dev = pcie->dev; @@ -1524,28 +1523,16 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) if (iproc_pcie_msi_enable(pcie)) dev_info(dev, "not using iProc MSI\n"); - host->busnr = 0; - host->dev.parent = dev; host->ops = &iproc_pcie_ops; host->sysdata = pcie; host->map_irq = pcie->map_irq; - host->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(host); + ret = pci_host_probe(host); if (ret < 0) { dev_err(dev, "failed to scan host: %d\n", ret); goto err_power_off_phy; } - pci_assign_unassigned_bus_resources(host->bus); - - pcie->root_bus = host->bus; - - list_for_each_entry(child, &host->bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(host->bus); - return 0; err_power_off_phy: @@ -1558,8 +1545,10 @@ EXPORT_SYMBOL(iproc_pcie_setup); int iproc_pcie_remove(struct iproc_pcie *pcie) { - pci_stop_root_bus(pcie->root_bus); - pci_remove_root_bus(pcie->root_bus); + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + + pci_stop_root_bus(host->bus); + pci_remove_root_bus(host->bus); iproc_pcie_msi_disable(pcie); diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h index 4f03ea539805..c2676e442f55 100644 --- a/drivers/pci/controller/pcie-iproc.h +++ b/drivers/pci/controller/pcie-iproc.h @@ -54,7 +54,6 @@ struct iproc_msi; * @reg_offsets: register offsets * @base: PCIe host controller I/O register base * @base_addr: PCIe host controller register base physical address - * @root_bus: pointer to root bus * @phy: optional PHY device that controls the Serdes * @map_irq: function callback to map interrupts * @ep_is_internal: indicates an internal emulated endpoint device is connected @@ -85,7 +84,6 @@ struct iproc_pcie { void __iomem *base; phys_addr_t base_addr; struct resource mem; - struct pci_bus *root_bus; struct phy *phy; int (*map_irq)(const struct pci_dev *, u8, u8); bool ep_is_internal; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index ebfa7d5a4e2d..cf4c18f0c25a 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -209,7 +209,6 @@ struct mtk_pcie_port { * @mem: non-prefetchable memory resource * @ports: pointer to PCIe port information * @soc: pointer to SoC-dependent operations - * @busnr: root bus number */ struct mtk_pcie { struct device *dev; @@ -218,7 +217,6 @@ struct mtk_pcie { struct list_head ports; const struct mtk_pcie_soc *soc; - unsigned int busnr; }; static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) @@ -905,7 +903,6 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie, int slot) { struct mtk_pcie_port *port; - struct resource *regs; struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); char name[10]; @@ -916,8 +913,7 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie, return -ENOMEM; snprintf(name, sizeof(name), "port%d", slot); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - port->base = devm_ioremap_resource(dev, regs); + port->base = devm_platform_ioremap_resource_byname(pdev, name); if (IS_ERR(port->base)) { dev_err(dev, "failed to map port%d base\n", slot); return PTR_ERR(port->base); @@ -1031,18 +1027,8 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) struct device *dev = pcie->dev; struct device_node *node = dev->of_node, *child; struct mtk_pcie_port *port, *tmp; - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; - struct resource *bus; int err; - err = pci_parse_request_of_pci_ranges(dev, windows, - &host->dma_ranges, &bus); - if (err) - return err; - - pcie->busnr = bus->start; - for_each_available_child_of_node(node, child) { int slot; @@ -1096,11 +1082,7 @@ static int mtk_pcie_probe(struct platform_device *pdev) if (err) return err; - host->busnr = pcie->busnr; - host->dev.parent = pcie->dev; host->ops = pcie->soc->ops; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; host->sysdata = pcie; err = pci_host_probe(host); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index d210a36561be..cdc0963f154e 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -53,8 +53,6 @@ struct rcar_pcie_host { struct device *dev; struct phy *phy; void __iomem *base; - struct list_head resources; - int root_bus_nr; struct clk *bus_clk; struct rcar_msi msi; int (*phy_init_fn)(struct rcar_pcie_host *host); @@ -100,22 +98,14 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, if (dev != 0) return PCIBIOS_DEVICE_NOT_FOUND; - if (access_type == RCAR_PCI_ACCESS_READ) { + if (access_type == RCAR_PCI_ACCESS_READ) *data = rcar_pci_read_reg(pcie, PCICONF(index)); - } else { - /* Keep an eye out for changes to the root bus number */ - if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) - host->root_bus_nr = *data & 0xff; - + else rcar_pci_write_reg(pcie, *data, PCICONF(index)); - } return PCIBIOS_SUCCESSFUL; } - if (host->root_bus_nr < 0) - return PCIBIOS_DEVICE_NOT_FOUND; - /* Clear errors */ rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); @@ -124,7 +114,7 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); /* Enable the configuration access */ - if (bus->parent->number == host->root_bus_nr) + if (pci_is_root_bus(bus->parent)) rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); else rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); @@ -212,38 +202,6 @@ static struct pci_ops rcar_pcie_ops = { .write = rcar_pcie_write_conf, }; -static int rcar_pcie_setup(struct list_head *resource, - struct rcar_pcie_host *host) -{ - struct resource_entry *win; - int i = 0; - - /* Setup PCI resources */ - resource_list_for_each_entry(win, &host->resources) { - struct resource *res = win->res; - - if (!res->flags) - continue; - - switch (resource_type(res)) { - case IORESOURCE_IO: - case IORESOURCE_MEM: - rcar_pcie_set_outbound(&host->pcie, i, win); - i++; - break; - case IORESOURCE_BUS: - host->root_bus_nr = res->start; - break; - default: - continue; - } - - pci_add_resource(resource, res); - } - - return 1; -} - static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; @@ -301,6 +259,7 @@ done: static void rcar_pcie_hw_enable(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); struct resource_entry *win; LIST_HEAD(res); int i = 0; @@ -309,7 +268,7 @@ static void rcar_pcie_hw_enable(struct rcar_pcie_host *host) rcar_pcie_force_speedup(pcie); /* Setup PCI resources */ - resource_list_for_each_entry(win, &host->resources) { + resource_list_for_each_entry(win, &bridge->windows) { struct resource *res = win->res; if (!res->flags) @@ -328,42 +287,17 @@ static void rcar_pcie_hw_enable(struct rcar_pcie_host *host) static int rcar_pcie_enable(struct rcar_pcie_host *host) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); - struct rcar_pcie *pcie = &host->pcie; - struct device *dev = pcie->dev; - struct pci_bus *bus, *child; - int ret; - - /* Try setting 5 GT/s link speed */ - rcar_pcie_force_speedup(pcie); - rcar_pcie_setup(&bridge->windows, host); + rcar_pcie_hw_enable(host); pci_add_flags(PCI_REASSIGN_ALL_BUS); - bridge->dev.parent = dev; bridge->sysdata = host; - bridge->busnr = host->root_bus_nr; bridge->ops = &rcar_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; if (IS_ENABLED(CONFIG_PCI_MSI)) bridge->msi = &host->msi.chip; - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - - return 0; + return pci_host_probe(bridge); } static int phy_wait_for_ack(struct rcar_pcie *pcie) @@ -968,7 +902,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) int err; struct pci_host_bridge *bridge; - bridge = pci_alloc_host_bridge(sizeof(*host)); + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host)); if (!bridge) return -ENOMEM; @@ -977,16 +911,11 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie->dev = dev; platform_set_drvdata(pdev, host); - err = pci_parse_request_of_pci_ranges(dev, &host->resources, - &bridge->dma_ranges, NULL); - if (err) - goto err_free_bridge; - pm_runtime_enable(pcie->dev); err = pm_runtime_get_sync(pcie->dev); if (err < 0) { dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); - goto err_pm_disable; + goto err_pm_put; } err = rcar_pcie_get_resources(host); @@ -1057,13 +986,7 @@ err_unmap_msi_irqs: err_pm_put: pm_runtime_put(dev); - -err_pm_disable: pm_runtime_disable(dev); - pci_free_resource_list(&host->resources); - -err_free_bridge: - pci_free_host_bridge(bridge); return err; } diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 5eaf36629a75..7631dc3961c1 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -22,6 +22,7 @@ /** * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver * @rockchip: Rockchip PCIe controller + * @epc: PCI EPC device * @max_regions: maximum number of regions supported by hardware * @ob_region_map: bitmask of mapped outbound regions * @ob_addr: base addresses in the AXI bus where the outbound regions start diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 94af6f5828a3..0bb2fb3e8a0b 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -72,14 +72,14 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, struct pci_bus *bus, int dev) { /* access only one slot on each root port */ - if (bus->number == rockchip->root_bus_nr && dev > 0) + if (pci_is_root_bus(bus) && dev > 0) return 0; /* * do not read more than one device on the bus directly attached * to RC's downstream side. */ - if (bus->primary == rockchip->root_bus_nr && dev > 0) + if (pci_is_root_bus(bus->parent) && dev > 0) return 0; return 1; @@ -170,7 +170,7 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, return PCIBIOS_BAD_REGISTER_NUMBER; } - if (bus->parent->number == rockchip->root_bus_nr) + if (pci_is_root_bus(bus->parent)) rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); else @@ -201,7 +201,7 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, if (!IS_ALIGNED(busdev, size)) return PCIBIOS_BAD_REGISTER_NUMBER; - if (bus->parent->number == rockchip->root_bus_nr) + if (pci_is_root_bus(bus->parent)) rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); else @@ -230,7 +230,7 @@ static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, return PCIBIOS_DEVICE_NOT_FOUND; } - if (bus->number == rockchip->root_bus_nr) + if (pci_is_root_bus(bus)) return rockchip_pcie_rd_own_conf(rockchip, where, size, val); return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, @@ -245,7 +245,7 @@ static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == rockchip->root_bus_nr) + if (pci_is_root_bus(bus)) return rockchip_pcie_wr_own_conf(rockchip, where, size, val); return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, @@ -549,10 +549,8 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) struct platform_device *pdev = to_platform_device(dev); irq = platform_get_irq_byname(pdev, "sys"); - if (irq < 0) { - dev_err(dev, "missing sys IRQ resource\n"); + if (irq < 0) return irq; - } err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, IRQF_SHARED, "pcie-sys", rockchip); @@ -562,20 +560,16 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) } irq = platform_get_irq_byname(pdev, "legacy"); - if (irq < 0) { - dev_err(dev, "missing legacy IRQ resource\n"); + if (irq < 0) return irq; - } irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler, rockchip); irq = platform_get_irq_byname(pdev, "client"); - if (irq < 0) { - dev_err(dev, "missing client IRQ resource\n"); + if (irq < 0) return irq; - } err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, IRQF_SHARED, "pcie-client", rockchip); @@ -949,9 +943,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) { struct rockchip_pcie *rockchip; struct device *dev = &pdev->dev; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; - struct resource *bus_res; int err; if (!dev->of_node) @@ -991,13 +983,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (err < 0) goto err_deinit_port; - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, &bus_res); - if (err) - goto err_remove_irq_domain; - - rockchip->root_bus_nr = bus_res->start; - err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_remove_irq_domain; @@ -1008,27 +993,13 @@ static int rockchip_pcie_probe(struct platform_device *pdev) goto err_remove_irq_domain; } - bridge->dev.parent = dev; bridge->sysdata = rockchip; - bridge->busnr = 0; bridge->ops = &rockchip_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - err = pci_scan_root_bus_bridge(bridge); + err = pci_host_probe(bridge); if (err < 0) goto err_remove_irq_domain; - bus = bridge->bus; - - rockchip->root_bus = bus; - - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); return 0; err_remove_irq_domain: @@ -1051,9 +1022,10 @@ static int rockchip_pcie_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip); - pci_stop_root_bus(rockchip->root_bus); - pci_remove_root_bus(rockchip->root_bus); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); irq_domain_remove(rockchip->irq_domain); rockchip_pcie_deinit_phys(rockchip); diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c index c53d1322a3d6..904dec0d3a88 100644 --- a/drivers/pci/controller/pcie-rockchip.c +++ b/drivers/pci/controller/pcie-rockchip.c @@ -45,9 +45,8 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) return -EINVAL; } - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "apb-base"); - rockchip->apb_base = devm_ioremap_resource(dev, regs); + rockchip->apb_base = + devm_platform_ioremap_resource_byname(pdev, "apb-base"); if (IS_ERR(rockchip->apb_base)) return PTR_ERR(rockchip->apb_base); diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index d90dfb354573..c7d0178fc8c2 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -298,12 +298,10 @@ struct rockchip_pcie { struct gpio_desc *ep_gpio; u32 lanes; u8 lanes_map; - u8 root_bus_nr; int link_gen; struct device *dev; struct irq_domain *irq_domain; int offset; - struct pci_bus *root_bus; void __iomem *msg_region; phys_addr_t msg_bus_addr; bool is_rc; diff --git a/drivers/pci/controller/pcie-tango.c b/drivers/pci/controller/pcie-tango.c index 8f640c70f936..d093a8ce4bb1 100644 --- a/drivers/pci/controller/pcie-tango.c +++ b/drivers/pci/controller/pcie-tango.c @@ -273,10 +273,8 @@ static int tango_pcie_probe(struct platform_device *pdev) writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset); virq = platform_get_irq(pdev, 1); - if (virq < 0) { - dev_err(dev, "Failed to map IRQ\n"); + if (virq < 0) return virq; - } irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie); if (!irq_dom) { diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c new file mode 100644 index 000000000000..f3082de44e8a --- /dev/null +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for Xilinx Versal CPM DMA Bridge + * + * (C) Copyright 2019 - 2020, Xilinx, Inc. + */ + +#include <linux/bitfield.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/pci-ecam.h> + +#include "../pci.h" + +/* Register definitions */ +#define XILINX_CPM_PCIE_REG_IDR 0x00000E10 +#define XILINX_CPM_PCIE_REG_IMR 0x00000E14 +#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C +#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20 +#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C +#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38 +#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C +#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340 +#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 +#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1) + +/* Interrupt registers definitions */ +#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0 +#define XILINX_CPM_PCIE_INTR_HOT_RESET 3 +#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4 +#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8 +#define XILINX_CPM_PCIE_INTR_CORRECTABLE 9 +#define XILINX_CPM_PCIE_INTR_NONFATAL 10 +#define XILINX_CPM_PCIE_INTR_FATAL 11 +#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12 +#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15 +#define XILINX_CPM_PCIE_INTR_INTX 16 +#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17 +#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20 +#define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21 +#define XILINX_CPM_PCIE_INTR_SLV_COMPL 22 +#define XILINX_CPM_PCIE_INTR_SLV_ERRP 23 +#define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24 +#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25 +#define XILINX_CPM_PCIE_INTR_MST_DECERR 26 +#define XILINX_CPM_PCIE_INTR_MST_SLVERR 27 +#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28 + +#define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x) + +#define XILINX_CPM_PCIE_IMR_ALL_MASK \ + ( \ + IMR(LINK_DOWN) | \ + IMR(HOT_RESET) | \ + IMR(CFG_PCIE_TIMEOUT) | \ + IMR(CFG_TIMEOUT) | \ + IMR(CORRECTABLE) | \ + IMR(NONFATAL) | \ + IMR(FATAL) | \ + IMR(CFG_ERR_POISON) | \ + IMR(PME_TO_ACK_RCVD) | \ + IMR(INTX) | \ + IMR(PM_PME_RCVD) | \ + IMR(SLV_UNSUPP) | \ + IMR(SLV_UNEXP) | \ + IMR(SLV_COMPL) | \ + IMR(SLV_ERRP) | \ + IMR(SLV_CMPABT) | \ + IMR(SLV_ILLBUR) | \ + IMR(MST_DECERR) | \ + IMR(MST_SLVERR) | \ + IMR(SLV_PCIE_TIMEOUT) \ + ) + +#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF +#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16) +#define XILINX_CPM_PCIE_IDRN_SHIFT 16 + +/* Root Port Error FIFO Read Register definitions */ +#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18) +#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0) +#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF + +/* Root Port Status/control Register definitions */ +#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0) + +/* Phy Status/Control Register definitions */ +#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) + +/** + * struct xilinx_cpm_pcie_port - PCIe port information + * @reg_base: Bridge Register Base + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base + * @dev: Device pointer + * @intx_domain: Legacy IRQ domain pointer + * @cpm_domain: CPM IRQ domain pointer + * @cfg: Holds mappings of config space window + * @intx_irq: legacy interrupt number + * @irq: Error interrupt number + * @lock: lock protecting shared register access + */ +struct xilinx_cpm_pcie_port { + void __iomem *reg_base; + void __iomem *cpm_base; + struct device *dev; + struct irq_domain *intx_domain; + struct irq_domain *cpm_domain; + struct pci_config_window *cfg; + int intx_irq; + int irq; + raw_spinlock_t lock; +}; + +static u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg) +{ + return readl_relaxed(port->reg_base + reg); +} + +static void pcie_write(struct xilinx_cpm_pcie_port *port, + u32 val, u32 reg) +{ + writel_relaxed(val, port->reg_base + reg); +} + +static bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port) +{ + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) & + XILINX_CPM_PCIE_REG_PSCR_LNKUP); +} + +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port) +{ + unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR); + + if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) { + dev_dbg(port->dev, "Requester ID %lu\n", + val & XILINX_CPM_PCIE_RPEFR_REQ_ID); + pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK, + XILINX_CPM_PCIE_REG_RPEFR); + } +} + +static void xilinx_cpm_mask_leg_irq(struct irq_data *data) +{ + struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data); + unsigned long flags; + u32 mask; + u32 val; + + mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); + raw_spin_lock_irqsave(&port->lock, flags); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); + pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void xilinx_cpm_unmask_leg_irq(struct irq_data *data) +{ + struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data); + unsigned long flags; + u32 mask; + u32 val; + + mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); + raw_spin_lock_irqsave(&port->lock, flags); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); + pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip xilinx_cpm_leg_irq_chip = { + .name = "INTx", + .irq_mask = xilinx_cpm_mask_leg_irq, + .irq_unmask = xilinx_cpm_unmask_leg_irq, +}; + +/** + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid + * @domain: IRQ domain + * @irq: Virtual IRQ number + * @hwirq: HW interrupt number + * + * Return: Always returns 0. + */ +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + + return 0; +} + +/* INTx IRQ Domain operations */ +static const struct irq_domain_ops intx_domain_ops = { + .map = xilinx_cpm_pcie_intx_map, +}; + +static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) +{ + struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long val; + int i; + + chained_irq_enter(chip, desc); + + val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK, + pcie_read(port, XILINX_CPM_PCIE_REG_IDRN)); + + for_each_set_bit(i, &val, PCI_NUM_INTX) + generic_handle_irq(irq_find_mapping(port->intx_domain, i)); + + chained_irq_exit(chip, desc); +} + +static void xilinx_cpm_mask_event_irq(struct irq_data *d) +{ + struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d); + u32 val; + + raw_spin_lock(&port->lock); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); + val &= ~BIT(d->hwirq); + pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); + raw_spin_unlock(&port->lock); +} + +static void xilinx_cpm_unmask_event_irq(struct irq_data *d) +{ + struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d); + u32 val; + + raw_spin_lock(&port->lock); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); + val |= BIT(d->hwirq); + pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); + raw_spin_unlock(&port->lock); +} + +static struct irq_chip xilinx_cpm_event_irq_chip = { + .name = "RC-Event", + .irq_mask = xilinx_cpm_mask_event_irq, + .irq_unmask = xilinx_cpm_unmask_event_irq, +}; + +static int xilinx_cpm_pcie_event_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + return 0; +} + +static const struct irq_domain_ops event_domain_ops = { + .map = xilinx_cpm_pcie_event_map, +}; + +static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) +{ + struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long val; + int i; + + chained_irq_enter(chip, desc); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR); + val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR); + for_each_set_bit(i, &val, 32) + generic_handle_irq(irq_find_mapping(port->cpm_domain, i)); + pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); + + /* + * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to + * CPM SLCR block. + */ + val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); + if (val) + writel_relaxed(val, + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); + + chained_irq_exit(chip, desc); +} + +#define _IC(x, s) \ + [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s } + +static const struct { + const char *sym; + const char *str; +} intr_cause[32] = { + _IC(LINK_DOWN, "Link Down"), + _IC(HOT_RESET, "Hot reset"), + _IC(CFG_TIMEOUT, "ECAM access timeout"), + _IC(CORRECTABLE, "Correctable error message"), + _IC(NONFATAL, "Non fatal error message"), + _IC(FATAL, "Fatal error message"), + _IC(SLV_UNSUPP, "Slave unsupported request"), + _IC(SLV_UNEXP, "Slave unexpected completion"), + _IC(SLV_COMPL, "Slave completion timeout"), + _IC(SLV_ERRP, "Slave Error Poison"), + _IC(SLV_CMPABT, "Slave Completer Abort"), + _IC(SLV_ILLBUR, "Slave Illegal Burst"), + _IC(MST_DECERR, "Master decode error"), + _IC(MST_SLVERR, "Master slave error"), + _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"), + _IC(CFG_ERR_POISON, "ECAM poisoned completion received"), + _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), + _IC(PM_PME_RCVD, "PM_PME message received"), + _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"), +}; + +static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) +{ + struct xilinx_cpm_pcie_port *port = dev_id; + struct device *dev = port->dev; + struct irq_data *d; + + d = irq_domain_get_irq_data(port->cpm_domain, irq); + + switch (d->hwirq) { + case XILINX_CPM_PCIE_INTR_CORRECTABLE: + case XILINX_CPM_PCIE_INTR_NONFATAL: + case XILINX_CPM_PCIE_INTR_FATAL: + cpm_pcie_clear_err_interrupts(port); + fallthrough; + + default: + if (intr_cause[d->hwirq].str) + dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); + else + dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq); + } + + return IRQ_HANDLED; +} + +static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port) +{ + if (port->intx_domain) { + irq_domain_remove(port->intx_domain); + port->intx_domain = NULL; + } + + if (port->cpm_domain) { + irq_domain_remove(port->cpm_domain); + port->cpm_domain = NULL; + } +} + +/** + * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain + * @port: PCIe port information + * + * Return: '0' on success and error value on failure + */ +static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port) +{ + struct device *dev = port->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + + /* Setup INTx */ + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -EINVAL; + } + + port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32, + &event_domain_ops, + port); + if (!port->cpm_domain) + goto out; + + irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); + + port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, + port); + if (!port->intx_domain) + goto out; + + irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); + + of_node_put(pcie_intc_node); + raw_spin_lock_init(&port->lock); + + return 0; +out: + xilinx_cpm_free_irq_domains(port); + dev_err(dev, "Failed to allocate IRQ domains\n"); + + return -ENOMEM; +} + +static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port) +{ + struct device *dev = port->dev; + struct platform_device *pdev = to_platform_device(dev); + int i, irq; + + port->irq = platform_get_irq(pdev, 0); + if (port->irq < 0) + return port->irq; + + for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { + int err; + + if (!intr_cause[i].str) + continue; + + irq = irq_create_mapping(port->cpm_domain, i); + if (!irq) { + dev_err(dev, "Failed to map interrupt\n"); + return -ENXIO; + } + + err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler, + 0, intr_cause[i].sym, port); + if (err) { + dev_err(dev, "Failed to request IRQ %d\n", irq); + return err; + } + } + + port->intx_irq = irq_create_mapping(port->cpm_domain, + XILINX_CPM_PCIE_INTR_INTX); + if (!port->intx_irq) { + dev_err(dev, "Failed to map INTx interrupt\n"); + return -ENXIO; + } + + /* Plug the INTx chained handler */ + irq_set_chained_handler_and_data(port->intx_irq, + xilinx_cpm_pcie_intx_flow, port); + + /* Plug the main event chained handler */ + irq_set_chained_handler_and_data(port->irq, + xilinx_cpm_pcie_event_flow, port); + + return 0; +} + +/** + * xilinx_cpm_pcie_init_port - Initialize hardware + * @port: PCIe port information + */ +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port) +{ + if (cpm_pcie_link_up(port)) + dev_info(port->dev, "PCIe Link is UP\n"); + else + dev_info(port->dev, "PCIe Link is DOWN\n"); + + /* Disable all interrupts */ + pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK, + XILINX_CPM_PCIE_REG_IMR); + + /* Clear pending interrupts */ + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) & + XILINX_CPM_PCIE_IMR_ALL_MASK, + XILINX_CPM_PCIE_REG_IDR); + + /* + * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to + * CPM SLCR block. + */ + writel(XILINX_CPM_PCIE_MISC_IR_LOCAL, + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); + /* Enable the Bridge enable bit */ + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | + XILINX_CPM_PCIE_REG_RPSC_BEN, + XILINX_CPM_PCIE_REG_RPSC); +} + +/** + * xilinx_cpm_pcie_parse_dt - Parse Device tree + * @port: PCIe port information + * @bus_range: Bus resource + * + * Return: '0' on success and error value on failure + */ +static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port, + struct resource *bus_range) +{ + struct device *dev = port->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + port->cpm_base = devm_platform_ioremap_resource_byname(pdev, + "cpm_slcr"); + if (IS_ERR(port->cpm_base)) + return PTR_ERR(port->cpm_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + if (!res) + return -ENXIO; + + port->cfg = pci_ecam_create(dev, res, bus_range, + &pci_generic_ecam_ops); + if (IS_ERR(port->cfg)) + return PTR_ERR(port->cfg); + + port->reg_base = port->cfg->win; + + return 0; +} + +static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port) +{ + irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL); + irq_set_chained_handler_and_data(port->irq, NULL, NULL); +} + +/** + * xilinx_cpm_pcie_probe - Probe function + * @pdev: Platform device pointer + * + * Return: '0' on success and error value on failure + */ +static int xilinx_cpm_pcie_probe(struct platform_device *pdev) +{ + struct xilinx_cpm_pcie_port *port; + struct device *dev = &pdev->dev; + struct pci_host_bridge *bridge; + struct resource_entry *bus; + int err; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + if (!bridge) + return -ENODEV; + + port = pci_host_bridge_priv(bridge); + + port->dev = dev; + + err = xilinx_cpm_pcie_init_irq_domain(port); + if (err) + return err; + + bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (!bus) + return -ENODEV; + + err = xilinx_cpm_pcie_parse_dt(port, bus->res); + if (err) { + dev_err(dev, "Parsing DT failed\n"); + goto err_parse_dt; + } + + xilinx_cpm_pcie_init_port(port); + + err = xilinx_cpm_setup_irq(port); + if (err) { + dev_err(dev, "Failed to set up interrupts\n"); + goto err_setup_irq; + } + + bridge->dev.parent = dev; + bridge->sysdata = port->cfg; + bridge->busnr = port->cfg->busr.start; + bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + err = pci_host_probe(bridge); + if (err < 0) + goto err_host_bridge; + + return 0; + +err_host_bridge: + xilinx_cpm_free_interrupts(port); +err_setup_irq: + pci_ecam_free(port->cfg); +err_parse_dt: + xilinx_cpm_free_irq_domains(port); + return err; +} + +static const struct of_device_id xilinx_cpm_pcie_of_match[] = { + { .compatible = "xlnx,versal-cpm-host-1.00", }, + {} +}; + +static struct platform_driver xilinx_cpm_pcie_driver = { + .driver = { + .name = "xilinx-cpm-pcie", + .of_match_table = xilinx_cpm_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = xilinx_cpm_pcie_probe, +}; + +builtin_platform_driver(xilinx_cpm_pcie_driver); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 9bd1427f2fd6..f3cf7d61924f 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -166,7 +166,6 @@ struct nwl_pcie { int irq_misc; u32 ecam_value; u8 last_busno; - u8 root_busno; struct nwl_msi msi; struct irq_domain *legacy_irq_domain; raw_spinlock_t leg_mask_lock; @@ -217,13 +216,11 @@ static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) struct nwl_pcie *pcie = bus->sysdata; /* Check link before accessing downstream ports */ - if (bus->number != pcie->root_busno) { + if (!pci_is_root_bus(bus)) { if (!nwl_pcie_link_up(pcie)) return false; - } - - /* Only one device down on each root port */ - if (bus->number == pcie->root_busno && devfn > 0) + } else if (devfn > 0) + /* Only one device down on each root port */ return false; return true; @@ -586,7 +583,6 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) /* Get msi_1 IRQ number */ msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); if (msi->irq_msi1 < 0) { - dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1); ret = -EINVAL; goto err; } @@ -597,7 +593,6 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) /* Get msi_0 IRQ number */ msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); if (msi->irq_msi0 < 0) { - dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0); ret = -EINVAL; goto err; } @@ -728,11 +723,8 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) /* Get misc IRQ number */ pcie->irq_misc = platform_get_irq_byname(pdev, "misc"); - if (pcie->irq_misc < 0) { - dev_err(dev, "failed to get misc IRQ %d\n", - pcie->irq_misc); + if (pcie->irq_misc < 0) return -EINVAL; - } err = devm_request_irq(dev, pcie->irq_misc, nwl_pcie_misc_handler, IRQF_SHARED, @@ -797,10 +789,8 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, /* Get intx IRQ number */ pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); - if (pcie->irq_intx < 0) { - dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx); + if (pcie->irq_intx < 0) return pcie->irq_intx; - } irq_set_chained_handler_and_data(pcie->irq_intx, nwl_pcie_leg_handler, pcie); @@ -817,8 +807,6 @@ static int nwl_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct nwl_pcie *pcie; - struct pci_bus *bus; - struct pci_bus *child; struct pci_host_bridge *bridge; int err; @@ -843,25 +831,14 @@ static int nwl_pcie_probe(struct platform_device *pdev) return err; } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - err = nwl_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); return err; } - bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = pcie->root_busno; bridge->ops = &nwl_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; if (IS_ENABLED(CONFIG_PCI_MSI)) { err = nwl_pcie_enable_msi(pcie); @@ -871,17 +848,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) } } - err = pci_scan_root_bus_bridge(bridge); - if (err) - return err; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; + return pci_host_probe(bridge); } static struct platform_driver nwl_pcie_driver = { diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 98e55297815b..8523be61bba5 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -98,7 +98,6 @@ * @reg_base: IO Mapped Register Base * @irq: Interrupt number * @msi_pages: MSI pages - * @root_busno: Root Bus number * @dev: Device pointer * @msi_domain: MSI IRQ domain pointer * @leg_domain: Legacy IRQ domain pointer @@ -108,7 +107,6 @@ struct xilinx_pcie_port { void __iomem *reg_base; u32 irq; unsigned long msi_pages; - u8 root_busno; struct device *dev; struct irq_domain *msi_domain; struct irq_domain *leg_domain; @@ -162,14 +160,13 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) struct xilinx_pcie_port *port = bus->sysdata; /* Check if link is up when trying to access downstream ports */ - if (bus->number != port->root_busno) + if (!pci_is_root_bus(bus)) { if (!xilinx_pcie_link_up(port)) return false; - - /* Only one device down on each root port */ - if (bus->number == port->root_busno && devfn > 0) + } else if (devfn > 0) { + /* Only one device down on each root port */ return false; - + } return true; } @@ -616,7 +613,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct xilinx_pcie_port *port; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; int err; @@ -645,35 +641,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev) return err; } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - - bridge->dev.parent = dev; bridge->sysdata = port; - bridge->busnr = 0; bridge->ops = &xilinx_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; #ifdef CONFIG_PCI_MSI xilinx_pcie_msi_chip.dev = dev; bridge->msi = &xilinx_pcie_msi_chip; #endif - err = pci_scan_root_bus_bridge(bridge); - if (err < 0) - return err; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; + return pci_host_probe(bridge); } static const struct of_device_id xilinx_pcie_of_match[] = { diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index ebec0a6e77ed..f69ef8c89f72 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -40,13 +40,19 @@ enum vmd_features { * membars, in order to allow proper address translation during * resource assignment to enable guest virtualization */ - VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), + VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), /* * Device may provide root port configuration information which limits * bus numbering */ - VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), + VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), + + /* + * Device contains physical location shadow registers in + * vendor-specific capability space + */ + VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2), }; /* @@ -454,6 +460,28 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) } } + if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) { + int pos = pci_find_capability(vmd->dev, PCI_CAP_ID_VNDR); + u32 reg, regu; + + pci_read_config_dword(vmd->dev, pos + 4, ®); + + /* "SHDW" */ + if (pos && reg == 0x53484457) { + pci_read_config_dword(vmd->dev, pos + 8, ®); + pci_read_config_dword(vmd->dev, pos + 12, ®u); + offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - + (((u64) regu << 32 | reg) & + PCI_BASE_ADDRESS_MEM_MASK); + + pci_read_config_dword(vmd->dev, pos + 16, ®); + pci_read_config_dword(vmd->dev, pos + 20, ®u); + offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - + (((u64) regu << 32 | reg) & + PCI_BASE_ADDRESS_MEM_MASK); + } + } + /* * Certain VMD devices may have a root port configuration option which * limits the bus range to between 0-127, 128-255, or 224-255 @@ -720,16 +748,20 @@ static int vmd_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); static const struct pci_device_id vmd_ids[] = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D), + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f), - .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d), - .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), - .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); |