summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display/intel_dmc_wl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_dmc_wl.c')
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c369
1 files changed, 308 insertions, 61 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 5634ff07269d..7e2ce0c2f6c3 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -5,8 +5,11 @@
#include <linux/kernel.h>
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
#include "intel_de.h"
-#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_dmc_wl.h"
@@ -39,16 +42,115 @@
* potential future use.
*/
-#define DMC_WAKELOCK_CTL_TIMEOUT 5
+/*
+ * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
+ * atomic variant of waiting MMIO.
+ */
+#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
#define DMC_WAKELOCK_HOLD_TIME 50
+/*
+ * Possible non-negative values for the enable_dmc_wl param.
+ */
+enum {
+ ENABLE_DMC_WL_DISABLED,
+ ENABLE_DMC_WL_ENABLED,
+ ENABLE_DMC_WL_ANY_REGISTER,
+ ENABLE_DMC_WL_ALWAYS_LOCKED,
+ ENABLE_DMC_WL_MAX,
+};
+
struct intel_dmc_wl_range {
u32 start;
u32 end;
};
-static struct intel_dmc_wl_range lnl_wl_range[] = {
+static const struct intel_dmc_wl_range powered_off_ranges[] = {
+ { .start = 0x44400, .end = 0x4447f }, /* PIPE interrupt registers */
{ .start = 0x60000, .end = 0x7ffff },
+ {},
+};
+
+static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
+ { .start = 0x45500 }, /* DC_STATE_SEL */
+ { .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
+ { .start = 0x45504 }, /* DC_STATE_EN */
+ { .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
+ { .start = 0x454f0 }, /* RETENTION_CTRL */
+
+ /* DBUF_CTL_* */
+ { .start = 0x44300 },
+ { .start = 0x44304 },
+ { .start = 0x44f00 },
+ { .start = 0x44f04 },
+ { .start = 0x44fe8 },
+ { .start = 0x45008 },
+
+ { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
+ { .start = 0x46000 }, /* CDCLK_CTL */
+ { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
+
+ /* TRANS_CMTG_CTL_* */
+ { .start = 0x6fa88 },
+ { .start = 0x6fb88 },
+
+ { .start = 0x46430 }, /* CHICKEN_DCPR_1 */
+ { .start = 0x46434 }, /* CHICKEN_DCPR_2 */
+ { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
+ { .start = 0x42084 }, /* CHICKEN_MISC_2 */
+ { .start = 0x42088 }, /* CHICKEN_MISC_3 */
+ { .start = 0x46160 }, /* CMTG_CLK_SEL */
+ { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+ { .start = 0x45230 }, /* INITIATE_PM_DMD_REQ */
+
+ {},
+};
+
+static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
+ { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
+
+ { .start = 0x45504 }, /* DC_STATE_EN */
+
+ /* DBUF_CTL_* */
+ { .start = 0x44300 },
+ { .start = 0x44304 },
+ { .start = 0x44f00 },
+ { .start = 0x44f04 },
+ { .start = 0x44fe8 },
+ { .start = 0x45008 },
+
+ { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
+ { .start = 0x46000 }, /* CDCLK_CTL */
+ { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
+ { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+
+ /* Scanline registers */
+ { .start = 0x70000 },
+ { .start = 0x70004 },
+ { .start = 0x70014 },
+ { .start = 0x70018 },
+ { .start = 0x71000 },
+ { .start = 0x71004 },
+ { .start = 0x71014 },
+ { .start = 0x71018 },
+ { .start = 0x72000 },
+ { .start = 0x72004 },
+ { .start = 0x72014 },
+ { .start = 0x72018 },
+ { .start = 0x73000 },
+ { .start = 0x73004 },
+ { .start = 0x73014 },
+ { .start = 0x73018 },
+ { .start = 0x7b000 },
+ { .start = 0x7b004 },
+ { .start = 0x7b014 },
+ { .start = 0x7b018 },
+ { .start = 0x7c000 },
+ { .start = 0x7c004 },
+ { .start = 0x7c014 },
+ { .start = 0x7c018 },
+
+ {},
};
static void __intel_dmc_wl_release(struct intel_display *display)
@@ -72,15 +174,18 @@ static void intel_dmc_wl_work(struct work_struct *work)
spin_lock_irqsave(&wl->lock, flags);
- /* Bail out if refcount reached zero while waiting for the spinlock */
- if (!refcount_read(&wl->refcount))
+ /*
+ * Bail out if refcount became non-zero while waiting for the spinlock,
+ * meaning that the lock is now taken again.
+ */
+ if (refcount_read(&wl->refcount))
goto out_unlock;
__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
- if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK, 0,
- DMC_WAKELOCK_CTL_TIMEOUT)) {
+ if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT_US)) {
WARN_RATELIMIT(1, "DMC wakelock release timed out");
goto out_unlock;
}
@@ -91,46 +196,153 @@ out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
-static bool intel_dmc_wl_check_range(u32 address)
+static void __intel_dmc_wl_take(struct intel_display *display)
{
- int i;
- bool wl_needed = false;
-
- for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
- if (address >= lnl_wl_range[i].start &&
- address <= lnl_wl_range[i].end) {
- wl_needed = true;
- break;
- }
+ struct intel_dmc_wl *wl = &display->wl;
+
+ /*
+ * Only try to take the wakelock if it's not marked as taken
+ * yet. It may be already taken at this point if we have
+ * already released the last reference, but the work has not
+ * run yet.
+ */
+ if (wl->taken)
+ return;
+
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
+ DMC_WAKELOCK_CTL_REQ);
+
+ /*
+ * We need to use the atomic variant of the waiting routine
+ * because the DMC wakelock is also taken in atomic context.
+ */
+ if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT_US)) {
+ WARN_RATELIMIT(1, "DMC wakelock ack timed out");
+ return;
+ }
+
+ wl->taken = true;
+}
+
+static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
+ const struct intel_dmc_wl_range ranges[])
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+
+ for (int i = 0; ranges[i].start; i++) {
+ u32 end = ranges[i].end ?: ranges[i].start;
+
+ if (ranges[i].start <= offset && offset <= end)
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_dmc_wl_check_range(struct intel_display *display,
+ i915_reg_t reg,
+ u32 dc_state)
+{
+ const struct intel_dmc_wl_range *ranges;
+
+ if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER)
+ return true;
+
+ /*
+ * Check that the offset is in one of the ranges for which
+ * registers are powered off during DC states.
+ */
+ if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
+ return true;
+
+ /*
+ * Check that the offset is for a register that is touched by
+ * the DMC and requires a DC exit for proper access.
+ */
+ switch (dc_state) {
+ case DC_STATE_EN_DC3CO:
+ ranges = xe3lpd_dc3co_dmc_ranges;
+ break;
+ case DC_STATE_EN_UPTO_DC5:
+ case DC_STATE_EN_UPTO_DC6:
+ ranges = xe3lpd_dc5_dc6_dmc_ranges;
+ break;
+ default:
+ ranges = NULL;
}
- return wl_needed;
+ if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
+ return true;
+
+ return false;
}
static bool __intel_dmc_wl_supported(struct intel_display *display)
{
- if (DISPLAY_VER(display) < 20 ||
- !intel_dmc_has_payload(display) ||
- !display->params.enable_dmc_wl)
- return false;
+ return display->params.enable_dmc_wl;
+}
+
+static void intel_dmc_wl_sanitize_param(struct intel_display *display)
+{
+ const char *desc;
+
+ if (!HAS_DMC_WAKELOCK(display)) {
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
+ } else if (display->params.enable_dmc_wl < 0) {
+ if (DISPLAY_VER(display) >= 30)
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
+ else
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
+ } else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) {
+ display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
+ }
+
+ drm_WARN_ON(display->drm,
+ display->params.enable_dmc_wl < 0 ||
+ display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX);
+
+ switch (display->params.enable_dmc_wl) {
+ case ENABLE_DMC_WL_DISABLED:
+ desc = "disabled";
+ break;
+ case ENABLE_DMC_WL_ENABLED:
+ desc = "enabled";
+ break;
+ case ENABLE_DMC_WL_ANY_REGISTER:
+ desc = "match any register";
+ break;
+ case ENABLE_DMC_WL_ALWAYS_LOCKED:
+ desc = "always locked";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
- return true;
+ drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n",
+ display->params.enable_dmc_wl, desc);
}
void intel_dmc_wl_init(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
- /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
- if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
+ intel_dmc_wl_sanitize_param(display);
+
+ if (!display->params.enable_dmc_wl)
return;
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
spin_lock_init(&wl->lock);
- refcount_set(&wl->refcount, 0);
+ refcount_set(&wl->refcount,
+ display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0);
}
-void intel_dmc_wl_enable(struct intel_display *display)
+/* Must only be called as part of enabling dynamic DC states. */
+void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
{
struct intel_dmc_wl *wl = &display->wl;
unsigned long flags;
@@ -140,7 +352,9 @@ void intel_dmc_wl_enable(struct intel_display *display)
spin_lock_irqsave(&wl->lock, flags);
- if (wl->enabled)
+ wl->dc_state = dc_state;
+
+ if (drm_WARN_ON(display->drm, wl->enabled))
goto out_unlock;
/*
@@ -151,12 +365,29 @@ void intel_dmc_wl_enable(struct intel_display *display)
__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
wl->enabled = true;
- wl->taken = false;
+
+ /*
+ * This would be racy in the following scenario:
+ *
+ * 1. Function A calls intel_dmc_wl_get();
+ * 2. Some function calls intel_dmc_wl_disable();
+ * 3. Some function calls intel_dmc_wl_enable();
+ * 4. Concurrently with (3), function A performs the MMIO in between
+ * setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
+ * __intel_dmc_wl_take().
+ *
+ * TODO: Check with the hardware team whether it is safe to assert the
+ * hardware lock before enabling to avoid such a scenario. Otherwise, we
+ * would need to deal with it via software synchronization.
+ */
+ if (refcount_read(&wl->refcount))
+ __intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
+/* Must only be called as part of disabling dynamic DC states. */
void intel_dmc_wl_disable(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
@@ -165,39 +396,63 @@ void intel_dmc_wl_disable(struct intel_display *display)
if (!__intel_dmc_wl_supported(display))
return;
- flush_delayed_work(&wl->work);
+ intel_dmc_wl_flush_release_work(display);
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (drm_WARN_ON(display->drm, !wl->enabled))
goto out_unlock;
/* Disable wakelock in DMC */
__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
- refcount_set(&wl->refcount, 0);
wl->enabled = false;
+
+ /*
+ * The spec is not explicit about the expectation of existing
+ * lock users at the moment of disabling, but it does say that we must
+ * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
+ * disable with existing lock users.
+ *
+ * TODO: Get the correct expectation from the hardware team.
+ */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+
wl->taken = false;
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
-void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+void intel_dmc_wl_flush_release_work(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
- unsigned long flags;
if (!__intel_dmc_wl_supported(display))
return;
- if (!intel_dmc_wl_check_range(reg.reg))
+ flush_delayed_work(&wl->work);
+}
+
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
return;
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (i915_mmio_reg_valid(reg) &&
+ !intel_dmc_wl_check_range(display, reg, wl->dc_state))
+ goto out_unlock;
+
+ if (!wl->enabled) {
+ if (!refcount_inc_not_zero(&wl->refcount))
+ refcount_set(&wl->refcount, 1);
goto out_unlock;
+ }
cancel_delayed_work(&wl->work);
@@ -206,26 +461,7 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
refcount_set(&wl->refcount, 1);
- /*
- * Only try to take the wakelock if it's not marked as taken
- * yet. It may be already taken at this point if we have
- * already released the last reference, but the work has not
- * run yet.
- */
- if (!wl->taken) {
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
- DMC_WAKELOCK_CTL_REQ);
-
- if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_TIMEOUT)) {
- WARN_RATELIMIT(1, "DMC wakelock ack timed out");
- goto out_unlock;
- }
-
- wl->taken = true;
- }
+ __intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
@@ -239,12 +475,10 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
if (!__intel_dmc_wl_supported(display))
return;
- if (!intel_dmc_wl_check_range(reg.reg))
- return;
-
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (i915_mmio_reg_valid(reg) &&
+ !intel_dmc_wl_check_range(display, reg, wl->dc_state))
goto out_unlock;
if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
@@ -252,6 +486,9 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
goto out_unlock;
if (refcount_dec_and_test(&wl->refcount)) {
+ if (!wl->enabled)
+ goto out_unlock;
+
__intel_dmc_wl_release(display);
goto out_unlock;
@@ -260,3 +497,13 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
+
+void intel_dmc_wl_get_noreg(struct intel_display *display)
+{
+ intel_dmc_wl_get(display, INVALID_MMIO_REG);
+}
+
+void intel_dmc_wl_put_noreg(struct intel_display *display)
+{
+ intel_dmc_wl_put(display, INVALID_MMIO_REG);
+}