diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-07-26 17:03:52 -0400 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 11:39:15 -0500 |
commit | 063e09af6e1d9a4f26cdd0eb896c19526cb0afd3 (patch) | |
tree | 45f5d92bd4ef69d512ce6d6fef66f5f608f6015f | |
parent | f83a30f466ebbd56355b1f65ec9bcd5087840ffc (diff) | |
download | lwn-063e09af6e1d9a4f26cdd0eb896c19526cb0afd3.tar.gz lwn-063e09af6e1d9a4f26cdd0eb896c19526cb0afd3.zip |
drm/xe: Invert mask and val in xe_mmio_wait32.
The order: 'offset, mask, val'; is more common in other
drivers and in special in i915, where any dev could copy
a sequence and end up with unexpected behavior.
Done with coccinelle:
@rule1@
expression gt, reg, val, mask, timeout, out, atomic;
@@
- xe_mmio_wait32(gt, reg, val, mask, timeout, out, atomic)
+ xe_mmio_wait32(gt, reg, mask, val, timeout, out, atomic)
spatch -sp_file mmio.cocci *.c *.h compat-i915-headers/intel_uncore.h \
--in-place
v2: Rebased after changes on xe_guc_mcr usage of xe_mmio_wait32.
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
-rw-r--r-- | drivers/gpu/drm/xe/xe_force_wake.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_guc.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_huc.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_mmio.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pcode.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_uc_fw.c | 2 |
7 files changed, 17 insertions, 24 deletions
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index 7403673d532d..aba0784b608e 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -120,7 +120,7 @@ static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain) static int domain_sleep_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain) { - return xe_mmio_wait32(gt, domain->reg_ack, 0, domain->val, + return xe_mmio_wait32(gt, domain->reg_ack, domain->val, 0, XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC, NULL, false); } diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3e32d38aeeea..bb7794cf2c1a 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -456,8 +456,7 @@ static int do_gt_reset(struct xe_gt *gt) int err; xe_mmio_write32(gt, GDRST, GRDOM_FULL); - err = xe_mmio_wait32(gt, GDRST, 0, GRDOM_FULL, 5000, - NULL, false); + err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false); if (err) xe_gt_err(gt, "failed to clear GEN11_GRDOM_FULL (%pe)\n", ERR_PTR(err)); diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 8ae026838702..2530b6243661 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -290,8 +290,7 @@ int xe_guc_reset(struct xe_guc *guc) xe_mmio_write32(gt, GDRST, GRDOM_GUC); - ret = xe_mmio_wait32(gt, GDRST, 0, GRDOM_GUC, 5000, - &gdrst, false); + ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false); if (ret) { drm_err(&xe->drm, "GuC reset timed out, GEN6_GDRST=0x%8x\n", gdrst); @@ -386,10 +385,9 @@ static int guc_wait_ucode(struct xe_guc *guc) * 200ms. Even at slowest clock, this should be sufficient. And * in the working case, a larger timeout makes no difference. */ - ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, - FIELD_PREP(GS_UKERNEL_MASK, - XE_GUC_LOAD_STATUS_READY), - GS_UKERNEL_MASK, 200000, &status, false); + ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK, + FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY), + 200000, &status, false); if (ret) { struct drm_device *drm = &xe->drm; @@ -639,10 +637,9 @@ retry: xe_guc_notify(guc); - ret = xe_mmio_wait32(gt, reply_reg, - FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, - GUC_HXG_ORIGIN_GUC), - GUC_HXG_MSG_0_ORIGIN, 50000, &reply, false); + ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN, + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC), + 50000, &reply, false); if (ret) { timeout: drm_err(&xe->drm, "mmio request %#x: no reply %#x\n", @@ -654,11 +651,9 @@ timeout: if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) { - ret = xe_mmio_wait32(gt, reply_reg, - FIELD_PREP(GUC_HXG_MSG_0_TYPE, - GUC_HXG_TYPE_RESPONSE_SUCCESS), - GUC_HXG_MSG_0_TYPE, 1000000, &header, - false); + ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_TYPE, + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS), + 1000000, &header, false); if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC)) diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index 373a65c77946..dc1708b4e94a 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -85,8 +85,7 @@ int xe_huc_auth(struct xe_huc *huc) goto fail; } - ret = xe_mmio_wait32(gt, HUC_KERNEL_LOAD_INFO, - HUC_LOAD_SUCCESSFUL, + ret = xe_mmio_wait32(gt, HUC_KERNEL_LOAD_INFO, HUC_LOAD_SUCCESSFUL, HUC_LOAD_SUCCESSFUL, 100000, NULL, false); if (ret) { drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret); diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h index 4953a9a3f1fb..d24badca8677 100644 --- a/drivers/gpu/drm/xe/xe_mmio.h +++ b/drivers/gpu/drm/xe/xe_mmio.h @@ -107,8 +107,8 @@ static inline int xe_mmio_write32_and_verify(struct xe_gt *gt, return (reg_val & mask) != eval ? -EINVAL : 0; } -static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val, - u32 mask, u32 timeout_us, u32 *out_val, +static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, + u32 val, u32 timeout_us, u32 *out_val, bool atomic) { ktime_t cur = ktime_get_raw(); diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index e3ab1d3a367f..7f1bf2297f51 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -68,7 +68,7 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0); xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox); - err = xe_mmio_wait32(gt, PCODE_MAILBOX, 0, PCODE_READY, + err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0, timeout_ms * 1000, NULL, atomic); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 75f7a4cf6cbe..5801c10f3ccc 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -484,7 +484,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags) _MASKED_BIT_ENABLE(dma_flags | START_DMA)); /* Wait for DMA to finish */ - ret = xe_mmio_wait32(gt, DMA_CTRL, 0, START_DMA, 100000, &dma_ctrl, + ret = xe_mmio_wait32(gt, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl, false); if (ret) drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n", |