diff options
-rw-r--r-- | drivers/thunderbolt/lc.c | 24 | ||||
-rw-r--r-- | drivers/thunderbolt/switch.c | 160 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.c | 7 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.h | 16 | ||||
-rw-r--r-- | drivers/thunderbolt/tb_regs.h | 102 | ||||
-rw-r--r-- | drivers/thunderbolt/tmu.c | 53 |
6 files changed, 324 insertions, 38 deletions
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index c178f0d7beab..53495a38b4eb 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -193,6 +193,30 @@ int tb_lc_start_lane_initialization(struct tb_port *port) return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); } +/** + * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter + * @port: Lane adapter + * + * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including + * active cables (if connected on the link). + */ +bool tb_lc_is_clx_supported(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + cap = find_port_lc_cap(port); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1); + if (ret) + return false; + + return !!(val & TB_LC_LINK_ATTR_CPS); +} + static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset, unsigned int flags) { diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index c79074f83442..e7dc54d3ea99 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2236,6 +2236,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, if (ret > 0) sw->cap_lc = ret; + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); + if (ret > 0) + sw->cap_lp = ret; + /* Root switch is always authorized */ if (!route) sw->authorized = true; @@ -3042,6 +3046,13 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime) tb_sw_dbg(sw, "suspending switch\n"); + /* + * Actually only needed for Titan Ridge but for simplicity can be + * done for USB4 device too as CLx is re-enabled at resume. + */ + if (tb_switch_disable_clx(sw, TB_CL0S)) + tb_sw_warn(sw, "failed to disable CLx on upstream port\n"); + err = tb_plug_events_active(sw, false); if (err) return; @@ -3309,6 +3320,7 @@ static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) return tb_port_pm_secondary_disable(down); } +/* Called for USB4 or Titan Ridge routers only */ static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx) { u32 mask, val; @@ -3322,8 +3334,12 @@ static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx) if (port->xdomain) return false; - if (!usb4_port_clx_supported(port)) + if (tb_switch_is_usb4(port->sw)) { + if (!usb4_port_clx_supported(port)) + return false; + } else if (!tb_lc_is_clx_supported(port)) { return false; + } switch (clx) { case TB_CL0S: @@ -3389,7 +3405,7 @@ static int tb_switch_enable_cl0s(struct tb_switch *sw) struct tb_port *up, *down; int ret; - if (!tb_switch_is_usb4(sw)) + if (!tb_switch_is_clx_supported(sw)) return 0; /* @@ -3431,6 +3447,13 @@ static int tb_switch_enable_cl0s(struct tb_switch *sw) return ret; } + ret = tb_switch_mask_clx_objections(sw); + if (ret) { + tb_port_cl0s_disable(up); + tb_port_cl0s_disable(down); + return ret; + } + sw->clx = TB_CL0S; tb_port_dbg(up, "CL0s enabled\n"); @@ -3477,7 +3500,7 @@ static int tb_switch_disable_cl0s(struct tb_switch *sw) struct tb_port *up, *down; int ret; - if (!tb_switch_is_usb4(sw)) + if (!tb_switch_is_clx_supported(sw)) return 0; /* @@ -3524,3 +3547,134 @@ int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) return -EOPNOTSUPP; } } + +/** + * tb_switch_mask_clx_objections() - Mask CLx objections for a router + * @sw: Router to mask objections for + * + * Mask the objections coming from the second depth routers in order to + * stop these objections from interfering with the CLx states of the first + * depth link. + */ +int tb_switch_mask_clx_objections(struct tb_switch *sw) +{ + int up_port = sw->config.upstream_port_number; + u32 offset, val[2], mask_obj, unmask_obj; + int ret, i; + + /* Only Titan Ridge of pre-USB4 devices support CLx states */ + if (!tb_switch_is_titan_ridge(sw)) + return 0; + + if (!tb_route(sw)) + return 0; + + /* + * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: + * Port A consists of lane adapters 1,2 and + * Port B consists of lane adapters 3,4 + * If upstream port is A, (lanes are 1,2), we mask objections from + * port B (lanes 3,4) and unmask objections from Port A and vice-versa. + */ + if (up_port == 1) { + mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; + unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; + offset = TB_LOW_PWR_C1_CL1; + } else { + mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; + unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; + offset = TB_LOW_PWR_C3_CL1; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lp + offset, ARRAY_SIZE(val)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(val); i++) { + val[i] |= mask_obj; + val[i] &= ~unmask_obj; + } + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lp + offset, ARRAY_SIZE(val)); +} + +/* + * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 + * device. For now used only for Titan Ridge. + */ +static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, + unsigned int pcie_offset, u32 value) +{ + u32 offset, command, val; + int ret; + + if (sw->generation != 3) + return -EOPNOTSUPP; + + offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; + ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; + command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); + command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; + command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL + << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; + command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; + + offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; + + ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + ret = tb_switch_wait_for_bit(sw, offset, + TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) + return -ETIMEDOUT; + + return 0; +} + +/** + * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state + * @sw: Router to enable PCIe L1 + * + * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable + * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel + * was configured. Due to Intel platforms limitation, shall be called only + * for first hop switch. + */ +int tb_switch_pcie_l1_enable(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + int ret; + + if (!tb_route(sw)) + return 0; + + if (!tb_switch_is_titan_ridge(sw)) + return 0; + + /* Enable PCIe L1 enable only for first hop router (depth = 1) */ + if (tb_route(parent)) + return 0; + + /* Write to downstream PCIe bridge #5 aka Dn4 */ + ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); + if (ret) + return ret; + + /* Write to Upstream PCIe bridge #0 aka Up0 */ + return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); +} diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 1454dc5b6b91..cbd0ad85ffb1 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -1092,6 +1092,13 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) return -EIO; } + /* + * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it + * here. + */ + if (tb_switch_pcie_l1_enable(sw)) + tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); + list_add_tail(&tunnel->list, &tcm->tunnel_list); return 0; } diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index f416e4dcda76..74d3b14f004e 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -140,6 +140,7 @@ enum tb_clx { * @cap_plug_events: Offset to the plug events capability (%0 if not found) * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found) * @cap_lc: Offset to the link controller capability (%0 if not found) + * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found) * @is_unplugged: The switch is going away * @drom: DROM of the switch (%NULL if not found) * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) @@ -192,6 +193,7 @@ struct tb_switch { int cap_plug_events; int cap_vsec_tmu; int cap_lc; + int cap_lp; bool is_unplugged; u8 *drom; struct tb_nvm *nvm; @@ -972,6 +974,19 @@ static inline bool tb_switch_is_cl0s_enabled(const struct tb_switch *sw) return sw->clx == TB_CL0S; } +/** + * tb_switch_is_clx_supported() - Is CLx supported on this type of router + * @sw: The router to check CLx support for + */ +static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw) +{ + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); +} + +int tb_switch_mask_clx_objections(struct tb_switch *sw); + +int tb_switch_pcie_l1_enable(struct tb_switch *sw); + int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_clear_counter(struct tb_port *port, int counter); @@ -1065,6 +1080,7 @@ void tb_lc_unconfigure_port(struct tb_port *port); int tb_lc_configure_xdomain(struct tb_port *port); void tb_lc_unconfigure_xdomain(struct tb_port *port); int tb_lc_start_lane_initialization(struct tb_port *port); +bool tb_lc_is_clx_supported(struct tb_port *port); int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags); int tb_lc_set_sleep(struct tb_switch *sw); bool tb_lc_lane_bonding_possible(struct tb_switch *sw); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 105118ecc9cd..a74f4878d3e7 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -447,42 +447,78 @@ struct tb_regs_hop { } __packed; /* TMU Thunderbolt 3 registers */ -#define TB_TIME_VSEC_3_CS_26 0x1a -#define TB_TIME_VSEC_3_CS_26_TD BIT(22) +#define TB_TIME_VSEC_3_CS_9 0x9 +#define TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK GENMASK(17, 16) +#define TB_TIME_VSEC_3_CS_26 0x1a +#define TB_TIME_VSEC_3_CS_26_TD BIT(22) + +/* + * Used for Titan Ridge only. Bits are part of the same register: TMU_ADP_CS_6 + * (see above) as in USB4 spec, but these specific bits used for Titan Ridge + * only and reserved in USB4 spec. + */ +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK GENMASK(3, 2) +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 BIT(2) +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2 BIT(3) + +/* Plug Events registers */ +#define TB_PLUG_EVENTS_PCIE_WR_DATA 0x1b +#define TB_PLUG_EVENTS_PCIE_CMD 0x1c +#define TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK GENMASK(9, 0) +#define TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT 10 +#define TB_PLUG_EVENTS_PCIE_CMD_BR_MASK GENMASK(17, 10) +#define TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK BIT(21) +#define TB_PLUG_EVENTS_PCIE_CMD_WR 0x1 +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT 22 +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_MASK GENMASK(24, 22) +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 0x2 +#define TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK BIT(30) +#define TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK BIT(31) +#define TB_PLUG_EVENTS_PCIE_CMD_RD_DATA 0x1d + +/* CP Low Power registers */ +#define TB_LOW_PWR_C1_CL1 0x1 +#define TB_LOW_PWR_C1_CL1_OBJ_MASK GENMASK(4, 1) +#define TB_LOW_PWR_C1_CL2_OBJ_MASK GENMASK(4, 1) +#define TB_LOW_PWR_C1_PORT_A_MASK GENMASK(2, 1) +#define TB_LOW_PWR_C0_PORT_B_MASK GENMASK(4, 3) +#define TB_LOW_PWR_C3_CL1 0x3 /* Common link controller registers */ -#define TB_LC_DESC 0x02 -#define TB_LC_DESC_NLC_MASK GENMASK(3, 0) -#define TB_LC_DESC_SIZE_SHIFT 8 -#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8) -#define TB_LC_DESC_PORT_SIZE_SHIFT 16 -#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) -#define TB_LC_FUSE 0x03 -#define TB_LC_SNK_ALLOCATION 0x10 -#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) -#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 -#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 -#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) -#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 -#define TB_LC_POWER 0x740 +#define TB_LC_DESC 0x02 +#define TB_LC_DESC_NLC_MASK GENMASK(3, 0) +#define TB_LC_DESC_SIZE_SHIFT 8 +#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8) +#define TB_LC_DESC_PORT_SIZE_SHIFT 16 +#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) +#define TB_LC_FUSE 0x03 +#define TB_LC_SNK_ALLOCATION 0x10 +#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) +#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 +#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 +#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) +#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 +#define TB_LC_POWER 0x740 /* Link controller registers */ -#define TB_LC_PORT_ATTR 0x8d -#define TB_LC_PORT_ATTR_BE BIT(12) - -#define TB_LC_SX_CTRL 0x96 -#define TB_LC_SX_CTRL_WOC BIT(1) -#define TB_LC_SX_CTRL_WOD BIT(2) -#define TB_LC_SX_CTRL_WODPC BIT(3) -#define TB_LC_SX_CTRL_WODPD BIT(4) -#define TB_LC_SX_CTRL_WOU4 BIT(5) -#define TB_LC_SX_CTRL_WOP BIT(6) -#define TB_LC_SX_CTRL_L1C BIT(16) -#define TB_LC_SX_CTRL_L1D BIT(17) -#define TB_LC_SX_CTRL_L2C BIT(20) -#define TB_LC_SX_CTRL_L2D BIT(21) -#define TB_LC_SX_CTRL_SLI BIT(29) -#define TB_LC_SX_CTRL_UPSTREAM BIT(30) -#define TB_LC_SX_CTRL_SLP BIT(31) +#define TB_LC_PORT_ATTR 0x8d +#define TB_LC_PORT_ATTR_BE BIT(12) + +#define TB_LC_SX_CTRL 0x96 +#define TB_LC_SX_CTRL_WOC BIT(1) +#define TB_LC_SX_CTRL_WOD BIT(2) +#define TB_LC_SX_CTRL_WODPC BIT(3) +#define TB_LC_SX_CTRL_WODPD BIT(4) +#define TB_LC_SX_CTRL_WOU4 BIT(5) +#define TB_LC_SX_CTRL_WOP BIT(6) +#define TB_LC_SX_CTRL_L1C BIT(16) +#define TB_LC_SX_CTRL_L1D BIT(17) +#define TB_LC_SX_CTRL_L2C BIT(20) +#define TB_LC_SX_CTRL_L2D BIT(21) +#define TB_LC_SX_CTRL_SLI BIT(29) +#define TB_LC_SX_CTRL_UPSTREAM BIT(30) +#define TB_LC_SX_CTRL_SLP BIT(31) +#define TB_LC_LINK_ATTR 0x97 +#define TB_LC_LINK_ATTR_CPS BIT(18) #endif diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c index 8392d1352c98..e4a07a26f693 100644 --- a/drivers/thunderbolt/tmu.c +++ b/drivers/thunderbolt/tmu.c @@ -334,7 +334,12 @@ out: */ int tb_switch_tmu_disable(struct tb_switch *sw) { - if (!tb_switch_is_usb4(sw)) + /* + * No need to disable TMU on devices that don't support CLx since + * on these devices e.g. Alpine Ridge and earlier, the TMU mode + * HiFi bi-directional is enabled by default and we don't change it. + */ + if (!tb_switch_is_clx_supported(sw)) return 0; /* Already disabled? */ @@ -450,6 +455,31 @@ out: return ret; } +static int tb_switch_tmu_objection_mask(struct tb_switch *sw) +{ + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); + if (ret) + return ret; + + val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); +} + +static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw) +{ + struct tb_port *up = tb_upstream_port(sw); + + return tb_port_tmu_write(up, TMU_ADP_CS_6, + TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK, + TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK); +} + /* * This function is called when the previous TMU mode was * TB_SWITCH_TMU_RATE_OFF. @@ -497,12 +527,31 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw) if (unidirectional && !sw->tmu.has_ucap) return -EOPNOTSUPP; - if (!tb_switch_is_usb4(sw)) + /* + * No need to enable TMU on devices that don't support CLx since on + * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi + * bi-directional is enabled by default. + */ + if (!tb_switch_is_clx_supported(sw)) return 0; if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request)) return 0; + if (tb_switch_is_titan_ridge(sw) && unidirectional) { + /* Titan Ridge supports only CL0s */ + if (!tb_switch_is_cl0s_enabled(sw)) + return -EOPNOTSUPP; + + ret = tb_switch_tmu_objection_mask(sw); + if (ret) + return ret; + + ret = tb_switch_tmu_unidirectional_enable(sw); + if (ret) + return ret; + } + ret = tb_switch_tmu_set_time_disruption(sw, true); if (ret) return ret; |