summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/dwc/pcie-designware-ep.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/dwc/pcie-designware-ep.c')
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c321
1 files changed, 260 insertions, 61 deletions
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 8e07d432e74f..1a0bf9341542 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -102,6 +102,45 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
+/**
+ * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+ * @pci: DWC PCI device
+ * @prev_cap: Capability preceding the capability that should be hidden
+ * @cap: Capability that should be hidden
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+{
+ u16 prev_cap_offset, cap_offset;
+ u32 prev_cap_header, cap_header;
+
+ prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+ if (!prev_cap_offset)
+ return -EINVAL;
+
+ prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+ cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+ cap_header = dw_pcie_readl_dbi(pci, cap_offset);
+
+ /* cap must immediately follow prev_cap. */
+ if (PCI_EXT_CAP_ID(cap_header) != cap)
+ return -EINVAL;
+
+ /* Clear next ptr. */
+ prev_cap_header &= ~GENMASK(31, 20);
+
+ /* Set next ptr to next ptr of cap. */
+ prev_cap_header |= cap_header & GENMASK(31, 20);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
+
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
@@ -128,7 +167,7 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar,
+ dma_addr_t parent_bus_addr, enum pci_barno bar,
size_t size)
{
int ret;
@@ -146,7 +185,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar, size);
+ parent_bus_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
@@ -181,7 +220,7 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = atu->cpu_addr;
+ ep->outbound_addr[free_win] = atu->parent_bus_addr;
return 0;
}
@@ -205,6 +244,125 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
ep->bar_to_atu[bar] = 0;
}
+static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
+ enum pci_barno bar)
+{
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar_index = reg & PCI_REBAR_CTRL_BAR_IDX;
+ if (bar_index == bar)
+ return offset;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ unsigned int rebar_offset;
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
+
+ rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
+ if (!rebar_offset)
+ return -EINVAL;
+
+ ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /*
+ * A BAR mask should not be written for a resizable BAR. The BAR mask
+ * is automatically derived by the controller every time the "selected
+ * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
+ * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
+ * BIT(0) to set the BAR enable bit.
+ */
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ /*
+ * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+ rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
+ enum pci_barno bar)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (!ep->ops->get_features)
+ return BAR_PROGRAMMABLE;
+
+ epc_features = ep->ops->get_features(ep);
+
+ return epc_features->bar[bar].type;
+}
+
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
@@ -212,9 +370,9 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
+ enum pci_epc_bar_type bar_type;
int flags = epf_bar->flags;
int ret, type;
- u32 reg;
/*
* DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
@@ -246,19 +404,30 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
goto config_atu;
}
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
-
- dw_pcie_dbi_ro_wr_en(pci);
-
- dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
- dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
-
- if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
- dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+ switch (bar_type) {
+ case BAR_FIXED:
+ /*
+ * There is no need to write a BAR mask for a fixed BAR (except
+ * to write 1 to the LSB of the BAR mask register, to enable the
+ * BAR). Write the BAR mask regardless. (The fixed bits in the
+ * BAR mask register will be read-only anyway.)
+ */
+ fallthrough;
+ case BAR_PROGRAMMABLE:
+ ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
+ break;
+ case BAR_RESIZABLE:
+ ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(pci->dev, "Invalid BAR type\n");
+ break;
}
- dw_pcie_dbi_ro_wr_dis(pci);
+ if (ret)
+ return ret;
config_atu:
if (!(flags & PCI_BASE_ADDRESS_SPACE))
@@ -282,7 +451,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 index;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -314,7 +483,8 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_find_index(ep, addr, &atu_index);
+ ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
+ &atu_index);
if (ret < 0)
return;
@@ -333,7 +503,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
atu.func_no = func_no;
atu.type = PCIE_ATU_TYPE_MEM;
- atu.cpu_addr = addr;
+ atu.parent_bus_addr = addr - pci->parent_bus_offset;
atu.pci_addr = pci_addr;
atu.size = size;
ret = dw_pcie_ep_outbound_atu(ep, &atu);
@@ -666,6 +836,7 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ dwc_pcie_debugfs_deinit(pci);
dw_pcie_edma_remove(pci);
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
@@ -690,31 +861,15 @@ void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
-static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
-{
- u32 header;
- int pos = PCI_CFG_SPACE_SIZE;
-
- while (pos) {
- header = dw_pcie_readl_dbi(pci, pos);
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (!pos)
- break;
- }
-
- return 0;
-}
-
static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
+ struct dw_pcie_ep *ep = &pci->ep;
unsigned int offset;
unsigned int nbars;
- u32 reg, i;
+ enum pci_barno bar;
+ u32 reg, i, val;
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
dw_pcie_dbi_ro_wr_en(pci);
@@ -727,9 +882,29 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
* size in the range from 1 MB to 512 GB. Advertise support
* for 1 MB BAR size only.
+ *
+ * For a BAR that has been configured via dw_pcie_ep_set_bar(),
+ * advertise support for only that size instead.
*/
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ /*
+ * While the RESBAR_CAP_REG_* fields are sticky, the
+ * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
+ * sticky in certain versions of DWC PCIe, but not all).
+ *
+ * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar = val & PCI_REBAR_CTRL_BAR_IDX;
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
+ }
}
dw_pcie_setup(pci);
@@ -773,6 +948,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
if (ret)
return ret;
+ ret = -ENOMEM;
if (!ep->ib_window_map) {
ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
GFP_KERNEL);
@@ -817,7 +993,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
if (ep->ops->init)
ep->ops->init(ep);
- ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+ ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
/*
* PTM responder capability can be disabled only after disabling
@@ -837,6 +1013,8 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
dw_pcie_ep_init_non_sticky_registers(pci);
+ dwc_pcie_debugfs_init(pci);
+
return 0;
err_remove_edma:
@@ -883,26 +1061,15 @@ void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
-/**
- * dw_pcie_ep_init - Initialize the endpoint device
- * @ep: DWC EP device
- *
- * Initialize the endpoint device. Allocate resources and create the EPC
- * device with the endpoint framework.
- *
- * Return: 0 if success, errno otherwise.
- */
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
{
- int ret;
- struct resource *res;
- struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
-
- INIT_LIST_HEAD(&ep->func_list);
+ struct pci_epc *epc = ep->epc;
+ struct resource *res;
+ int ret;
ret = dw_pcie_get_resources(pci);
if (ret)
@@ -915,8 +1082,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- if (ep->ops->pre_init)
- ep->ops->pre_init(ep);
+ /*
+ * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
+ * dw_pcie_parent_bus_offset() after setting ep->phys_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
+ ep->phys_base);
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ return 0;
+}
+
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ INIT_LIST_HEAD(&ep->func_list);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
@@ -927,9 +1123,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
+ ret = dw_pcie_ep_get_resources(ep);
+ if (ret)
+ return ret;
+
+ if (ep->ops->pre_init)
+ ep->ops->pre_init(ep);
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);