summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c29
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c74
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c140
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c233
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c474
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c74
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c298
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h14
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c25
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c47
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c193
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c120
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h31
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c124
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c324
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_dsc_helper.h11
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c54
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c19
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h10
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c17
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml7
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml1
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml2
90 files changed, 1791 insertions, 1034 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7ec833b6d829..974bc7c0ea76 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -170,6 +170,8 @@ config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
default y
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Compile in support for the HDMI output MSM DRM driver. It can
be a primary or a secondary display on device. Note that this is used
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 71dca78cd7a5..650e5bac225f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1253,7 +1253,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
/* Turn off the hangcheck timer to keep it from bothering us */
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 0469fea55010..36f72c43eae8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -182,7 +182,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
return;
/* Delete the preemption watchdog timer */
- del_timer(&a5xx_gpu->preempt_timer);
+ timer_delete(&a5xx_gpu->preempt_timer);
/*
* The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index edffb7737a97..53e2ff4406d8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -880,6 +880,35 @@ static const struct adreno_info a6xx_gpus[] = {
{ 137, 1 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06020300),
+ .family = ADRENO_6XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a623_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a690_hwcg,
+ .protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00010000,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ },
+ { /* sentinel */ },
+ },
+ },
+ .address_space_size = SZ_16G,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 699b0dd34b18..c8711938a5f4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -28,7 +28,7 @@ static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
gmu->hung = true;
/* Turn off the hangcheck timer while we are resetting */
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
/* Queue the GPU handler because we need to treat this as a recovery */
kthread_queue_work(gpu->worker, &gpu->recover_work);
@@ -1169,49 +1169,50 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val;
+ int ret;
/*
- * The GMU may still be in slumber unless the GPU started so check and
- * skip putting it back into slumber if so
+ * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when
+ * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when
+ * required
*/
- val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+ if (adreno_gpu->base.needs_hw_init) {
+ if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET))
+ goto force_off;
- if (val != 0xf) {
- int ret = a6xx_gmu_wait_for_idle(gmu);
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ }
- /* If the GMU isn't responding assume it is hung */
- if (ret) {
- a6xx_gmu_force_off(gmu);
- return;
- }
+ ret = a6xx_gmu_wait_for_idle(gmu);
- a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+ /* If the GMU isn't responding assume it is hung */
+ if (ret)
+ goto force_off;
- /* tell the GMU we want to slumber */
- ret = a6xx_gmu_notify_slumber(gmu);
- if (ret) {
- a6xx_gmu_force_off(gmu);
- return;
- }
+ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
- ret = gmu_poll_timeout(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
- !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
- 100, 10000);
+ /* tell the GMU we want to slumber */
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret)
+ goto force_off;
- /*
- * Let the user know we failed to slumber but don't worry too
- * much because we are powering down anyway
- */
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
- if (ret)
- DRM_DEV_ERROR(gmu->dev,
- "Unable to slumber GMU: status = 0%x/0%x\n",
- gmu_read(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
- gmu_read(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
- }
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
/* Turn off HFI */
a6xx_hfi_stop(gmu);
@@ -1221,6 +1222,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+
+ return;
+
+force_off:
+ a6xx_gmu_force_off(gmu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0ae29a7c8a4d..242d02d48c0c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -242,10 +242,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
- OUT_RING(ring, submit->cmd[i].size);
+ OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size));
ibs++;
break;
}
@@ -377,10 +377,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
- OUT_RING(ring, submit->cmd[i].size);
+ OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size));
ibs++;
break;
}
@@ -616,6 +616,14 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.uavflagprd_inv = 2;
}
+ if (adreno_is_a623(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 16;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
+ }
+
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
@@ -1698,7 +1706,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
/* Turn off the hangcheck timer to keep it from bothering us */
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
@@ -1718,7 +1726,7 @@ static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu)
*/
if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING |
A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) {
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 0fcae53c0b14..341a72a67401 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1214,12 +1214,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
- 3, sizeof(*a6xx_state->gmu_registers));
+ 4, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
- a6xx_state->nr_gmu_registers = 3;
+ a6xx_state->nr_gmu_registers = 4;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
@@ -1227,6 +1227,13 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
&a6xx_state->gmu_registers[1], true);
+ if (adreno_is_a621(adreno_gpu) || adreno_is_a623(adreno_gpu))
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a621_gpucc_reg,
+ &a6xx_state->gmu_registers[2], false);
+ else
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gpucc_reg,
+ &a6xx_state->gmu_registers[2], false);
+
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return;
@@ -1234,7 +1241,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
- &a6xx_state->gmu_registers[2], false);
+ &a6xx_state->gmu_registers[3], false);
}
static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
@@ -1507,6 +1514,8 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
/* Restore the size in the hardware */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
}
static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index dd4c28a8d923..e545106c70be 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -363,6 +363,9 @@ static const u32 a6xx_gmu_cx_registers[] = {
0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
/* GMU AO */
0x9300, 0x9316, 0x9400, 0x9400,
+};
+
+static const u32 a6xx_gmu_gpucc_registers[] = {
/* GPU CC */
0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
@@ -373,6 +376,17 @@ static const u32 a6xx_gmu_cx_registers[] = {
0xbc00, 0xbc16, 0xbc20, 0xbc27,
};
+static const u32 a621_gmu_gpucc_registers[] = {
+ /* GPU CC */
+ 0x9800, 0x980e, 0x9c00, 0x9c0e, 0xb000, 0xb004, 0xb400, 0xb404,
+ 0xb800, 0xb804, 0xbc00, 0xbc05, 0xbc14, 0xbc1d, 0xbc2a, 0xbc30,
+ 0xbc32, 0xbc32, 0xbc41, 0xbc55, 0xbc66, 0xbc68, 0xbc78, 0xbc7a,
+ 0xbc89, 0xbc8a, 0xbc9c, 0xbc9e, 0xbca0, 0xbca3, 0xbcb3, 0xbcb5,
+ 0xbcc5, 0xbcc7, 0xbcd6, 0xbcd8, 0xbce8, 0xbce9, 0xbcf9, 0xbcfc,
+ 0xbd0b, 0xbd0c, 0xbd1c, 0xbd1e, 0xbd40, 0xbd70, 0xbe00, 0xbe16,
+ 0xbe20, 0xbe2d,
+};
+
static const u32 a6xx_gmu_cx_rscc_registers[] = {
/* GPU RSCC */
0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
@@ -386,6 +400,9 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_gx_registers, 0, 0),
};
+static const struct a6xx_registers a6xx_gpucc_reg = REGS(a6xx_gmu_gpucc_registers, 0, 0);
+static const struct a6xx_registers a621_gpucc_reg = REGS(a621_gmu_gpucc_registers, 0, 0);
+
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 2fd4e39f618f..9b5e27d2373c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -146,7 +146,7 @@ void a6xx_preempt_irq(struct msm_gpu *gpu)
return;
/* Delete the preemption watchdog timer */
- del_timer(&a6xx_gpu->preempt_timer);
+ timer_delete(&a6xx_gpu->preempt_timer);
/*
* The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 1238f3265978..26db1f4b5fb9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -292,7 +292,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
if (do_devcoredump) {
/* Turn off the hangcheck timer to keep it from bothering us */
- del_timer(&gpu->hangcheck_timer);
+ timer_delete(&gpu->hangcheck_timer);
gpu->fault_info.ttbr0 = info->ttbr0;
gpu->fault_info.iova = iova;
@@ -883,6 +883,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
drm_printf(p, " - type=%s\n", info->type);
drm_printf(p, " - source=%s\n", info->block);
+
+ /* Information extracted from what we think are the current
+ * pgtables. Hopefully the TTBR0 matches what we've extracted
+ * from the SMMU registers in smmu_info!
+ */
+ drm_puts(p, "pgtable-fault-info:\n");
+ drm_printf(p, " - ttbr0: %.16llx\n", (u64)info->pgtbl_ttbr0);
+ drm_printf(p, " - asid: %d\n", info->asid);
+ drm_printf(p, " - ptes: %.16llx %.16llx %.16llx %.16llx\n",
+ info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]);
}
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index dcf454629ce0..92caba3584da 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -442,6 +442,11 @@ static inline int adreno_is_a621(const struct adreno_gpu *gpu)
return gpu->info->chip_ids[0] == 0x06020100;
}
+static inline int adreno_is_a623(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06020300;
+}
+
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index bcb39807fe61..6ac97c378056 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -343,8 +343,8 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -452,6 +452,7 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = {
.mdss_ver = &sm8650_mdss_ver,
.caps = &sm8650_dpu_caps,
.mdp = &sm8650_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8650_ctl),
.ctl = sm8650_ctl,
.sspp_count = ARRAY_SIZE(sm8650_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index ab3dfb0b374e..ad60089f18ea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -132,7 +132,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -141,7 +140,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
@@ -190,6 +188,7 @@ const struct dpu_mdss_cfg dpu_msm8937_cfg = {
.mdss_ver = &msm8937_mdss_ver,
.caps = &msm8937_dpu_caps,
.mdp = msm8937_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8937_ctl),
.ctl = msm8937_ctl,
.sspp_count = ARRAY_SIZE(msm8937_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index 6bdaecca6761..a1cf89a0a42d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -118,7 +118,6 @@ static const struct dpu_intf_cfg msm8917_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
},
};
@@ -167,6 +166,7 @@ const struct dpu_mdss_cfg dpu_msm8917_cfg = {
.mdss_ver = &msm8917_mdss_ver,
.caps = &msm8917_dpu_caps,
.mdp = msm8917_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8917_ctl),
.ctl = msm8917_ctl,
.sspp_count = ARRAY_SIZE(msm8917_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index 14f36ea6ad0e..eea9b80e2287 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -131,7 +131,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x268,
@@ -140,7 +139,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -149,7 +147,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
@@ -198,6 +195,7 @@ const struct dpu_mdss_cfg dpu_msm8953_cfg = {
.mdss_ver = &msm8953_mdss_ver,
.caps = &msm8953_dpu_caps,
.mdp = msm8953_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8953_ctl),
.ctl = msm8953_ctl,
.sspp_count = ARRAY_SIZE(msm8953_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index 491f6f5827d1..ae18a354e5d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -241,7 +241,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x268,
@@ -250,7 +249,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -259,7 +257,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x268,
@@ -267,7 +264,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
- .intr_tear_rd_ptr = -1,
},
};
@@ -316,6 +312,7 @@ const struct dpu_mdss_cfg dpu_msm8996_cfg = {
.mdss_ver = &msm8996_mdss_ver,
.caps = &msm8996_dpu_caps,
.mdp = msm8996_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8996_ctl),
.ctl = msm8996_ctl,
.sspp_count = ARRAY_SIZE(msm8996_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 64c94e919a69..746474679ef5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -302,6 +302,7 @@ const struct dpu_mdss_cfg dpu_msm8998_cfg = {
.mdss_ver = &msm8998_mdss_ver,
.caps = &msm8998_dpu_caps,
.mdp = &msm8998_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8998_ctl),
.ctl = msm8998_ctl,
.sspp_count = ARRAY_SIZE(msm8998_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index 424815e7fb7d..bb89da0a481d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -202,7 +202,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x280,
@@ -211,7 +210,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x280,
@@ -220,7 +218,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
@@ -269,6 +266,7 @@ const struct dpu_mdss_cfg dpu_sdm660_cfg = {
.mdss_ver = &sdm660_mdss_ver,
.caps = &sdm660_dpu_caps,
.mdp = &sdm660_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm660_ctl),
.ctl = sdm660_ctl,
.sspp_count = ARRAY_SIZE(sdm660_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index df01227fc364..7caf876ca3e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -147,7 +147,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x280,
@@ -156,7 +155,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
},
};
@@ -205,6 +203,7 @@ const struct dpu_mdss_cfg dpu_sdm630_cfg = {
.mdss_ver = &sdm630_mdss_ver,
.caps = &sdm630_dpu_caps,
.mdp = &sdm630_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm630_ctl),
.ctl = sdm630_ctl,
.sspp_count = ARRAY_SIZE(sdm630_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 72bd4f7e9e50..ab7b4822ca63 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -319,6 +319,7 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = {
.mdss_ver = &sdm845_mdss_ver,
.caps = &sdm845_dpu_caps,
.mdp = &sdm845_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm845_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index daef07924886..c2fde980fb52 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -132,6 +132,7 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
.mdss_ver = &sdm670_mdss_ver,
.caps = &sdm845_dpu_caps,
.mdp = &sdm670_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm670_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 36cc9dbc00b5..979527d98fbc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -298,8 +298,8 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -388,6 +388,7 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.mdss_ver = &sm8150_mdss_ver,
.caps = &sm8150_dpu_caps,
.mdp = &sm8150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl,
.sspp_count = ARRAY_SIZE(sm8150_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index e8eacdb47967..d76b8992a6c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -305,8 +305,8 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -414,6 +414,7 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.mdss_ver = &sc8180x_mdss_ver,
.caps = &sc8180x_dpu_caps,
.mdp = &sc8180x_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc8180x_ctl),
.ctl = sc8180x_ctl,
.sspp_count = ARRAY_SIZE(sc8180x_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 2fe674d1e059..83db11339b29 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -261,8 +261,8 @@ static const struct dpu_wb_cfg sm7150_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -309,6 +309,7 @@ const struct dpu_mdss_cfg dpu_sm7150_cfg = {
.mdss_ver = &sm7150_mdss_ver,
.caps = &sm7150_dpu_caps,
.mdp = &sm7150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm7150_ctl),
.ctl = sm7150_ctl,
.sspp_count = ARRAY_SIZE(sm7150_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index d761ed705bac..da11830d4407 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6150_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -162,6 +163,21 @@ static const struct dpu_pingpong_cfg sm6150_pp[] = {
},
};
+static const struct dpu_wb_cfg sm6150_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 2160,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -232,6 +248,7 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.mdss_ver = &sm6150_mdss_ver,
.caps = &sm6150_dpu_caps,
.mdp = &sm6150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6150_ctl),
.ctl = sm6150_ctl,
.sspp_count = ARRAY_SIZE(sm6150_sspp),
@@ -242,6 +259,8 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.dspp = sm6150_dspp,
.pingpong_count = ARRAY_SIZE(sm6150_pp),
.pingpong = sm6150_pp,
+ .wb_count = ARRAY_SIZE(sm6150_wb),
+ .wb = sm6150_wb,
.intf_count = ARRAY_SIZE(sm6150_intf),
.intf = sm6150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index 76f60a2df7a8..d3d3a34d0b45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -145,8 +145,8 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -216,6 +216,7 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = {
.mdss_ver = &sm6125_mdss_ver,
.caps = &sm6125_dpu_caps,
.mdp = &sm6125_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6125_ctl),
.ctl = sm6125_ctl,
.sspp_count = ARRAY_SIZE(sm6125_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index e8916ae826a6..47e01c3c242f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -386,7 +386,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = {
.mdss_ver = &sm8250_mdss_ver,
.caps = &sm8250_dpu_caps,
.mdp = &sm8250_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8250_ctl),
.ctl = sm8250_ctl,
.sspp_count = ARRAY_SIZE(sm8250_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 7382ebb6e5b2..040c94c0bb66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -157,8 +157,8 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -204,6 +204,7 @@ const struct dpu_mdss_cfg dpu_sc7180_cfg = {
.mdss_ver = &sc7180_mdss_ver,
.caps = &sc7180_dpu_caps,
.mdp = &sc7180_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc7180_ctl),
.ctl = sc7180_ctl,
.sspp_count = ARRAY_SIZE(sc7180_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 0502cee2f116..397278ba999b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -151,8 +151,8 @@ static const struct dpu_wb_cfg sm6350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -222,6 +222,7 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.mdss_ver = &sm6350_mdss_ver,
.caps = &sm6350_dpu_caps,
.mdp = &sm6350_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6350_ctl),
.ctl = sm6350_ctl,
.sspp_count = ARRAY_SIZE(sm6350_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index f7c08e89c882..0c860e804cab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -305,8 +305,8 @@ static const struct dpu_wb_cfg sm8350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -396,6 +396,7 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = {
.mdss_ver = &sm8350_mdss_ver,
.caps = &sm8350_dpu_caps,
.mdp = &sm8350_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8350_ctl),
.ctl = sm8350_ctl,
.sspp_count = ARRAY_SIZE(sm8350_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index 2f153e0b5c6a..e9625c48c567 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -248,7 +248,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
.mdss_ver = &sc7280_mdss_ver,
.caps = &sc7280_dpu_caps,
.mdp = &sc7280_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc7280_ctl),
.ctl = sc7280_ctl,
.sspp_count = ARRAY_SIZE(sc7280_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 0d143e390eca..fcee1c3665f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -435,6 +435,7 @@ const struct dpu_mdss_cfg dpu_sc8280xp_cfg = {
.mdss_ver = &sc8280xp_mdss_ver,
.caps = &sc8280xp_dpu_caps,
.mdp = &sc8280xp_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc8280xp_ctl),
.ctl = sc8280xp_ctl,
.sspp_count = ARRAY_SIZE(sc8280xp_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 08742472f9cc..19b2ee8bbd5f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -321,8 +321,8 @@ static const struct dpu_wb_cfg sm8450_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -412,6 +412,7 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = {
.mdss_ver = &sm8450_mdss_ver,
.caps = &sm8450_dpu_caps,
.mdp = &sm8450_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8450_ctl),
.ctl = sm8450_ctl,
.sspp_count = ARRAY_SIZE(sm8450_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 76ec72a32378..4d96ce71746f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -458,7 +458,7 @@ const struct dpu_mdss_cfg dpu_sa8775p_cfg = {
.mdss_ver = &sa8775p_mdss_ver,
.caps = &sa8775p_dpu_caps,
.mdp = &sa8775p_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sa8775p_ctl),
.ctl = sa8775p_ctl,
.sspp_count = ARRAY_SIZE(sa8775p_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 4d3787fceb72..24f988465bf6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -317,8 +317,8 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -407,6 +407,7 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = {
.mdss_ver = &sm8550_mdss_ver,
.caps = &sm8550_dpu_caps,
.mdp = &sm8550_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8550_ctl),
.ctl = sm8550_ctl,
.sspp_count = ARRAY_SIZE(sm8550_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 6b112e3d17da..6417baa84f82 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -317,8 +317,8 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -453,6 +453,7 @@ const struct dpu_mdss_cfg dpu_x1e80100_cfg = {
.mdss_ver = &x1e80100_mdss_ver,
.caps = &x1e80100_dpu_caps,
.mdp = &x1e80100_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(x1e80100_ctl),
.ctl = x1e80100_ctl,
.sspp_count = ARRAY_SIZE(x1e80100_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 6f0a37f954fe..0fb5789c60d0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -118,26 +118,38 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
return;
}
- memset(perf, 0, sizeof(struct dpu_core_perf_params));
-
- if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
- perf->bw_ctl = 0;
- perf->max_per_pipe_ib = 0;
- perf->core_clk_rate = 0;
- } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) {
- perf->bw_ctl = core_perf->fix_core_ab_vote;
- perf->max_per_pipe_ib = core_perf->fix_core_ib_vote;
- perf->core_clk_rate = core_perf->fix_core_clk_rate;
- } else {
- perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
- perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
- perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
- }
-
+ perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
+ perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
+ perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
DRM_DEBUG_ATOMIC(
- "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
+ "crtc=%d clk_rate=%llu core_ib=%u core_ab=%u\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
+ perf->max_per_pipe_ib,
+ (u32)DIV_ROUND_UP_ULL(perf->bw_ctl, 1000));
+}
+
+static void dpu_core_perf_aggregate(struct drm_device *ddev,
+ enum dpu_crtc_client_type curr_client_type,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+
+ drm_for_each_crtc(tmp_crtc, ddev) {
+ if (tmp_crtc->enabled &&
+ curr_client_type == dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ dpu_cstate->new_perf.max_per_pipe_ib);
+
+ perf->bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+ DRM_DEBUG_ATOMIC("crtc=%d bw=%llu\n",
+ tmp_crtc->base.id,
+ dpu_cstate->new_perf.bw_ctl);
+ }
+ }
}
/**
@@ -150,11 +162,9 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
u32 bw, threshold;
- u64 bw_sum_of_intfs = 0;
- enum dpu_crtc_client_type curr_client_type;
struct dpu_crtc_state *dpu_cstate;
- struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
+ struct dpu_core_perf_params perf = { 0 };
if (!crtc || !state) {
DPU_ERROR("invalid crtc\n");
@@ -172,80 +182,56 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(&kms->perf, crtc, state, &dpu_cstate->new_perf);
- bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
- curr_client_type = dpu_crtc_get_client_type(crtc);
-
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- dpu_crtc_get_client_type(tmp_crtc) == curr_client_type &&
- tmp_crtc != crtc) {
- struct dpu_crtc_state *tmp_cstate =
- to_dpu_crtc_state(tmp_crtc->state);
-
- DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
- tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
- tmp_cstate->bw_control);
+ dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
- bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
- }
-
- /* convert bandwidth to kb */
- bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
- DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(perf.bw_ctl, 1000);
+ DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
- threshold = kms->perf.perf_cfg->max_bw_high;
+ threshold = kms->perf.perf_cfg->max_bw_high;
- DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
+ DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
- if (!threshold) {
- DPU_ERROR("no bandwidth limits specified\n");
- return -E2BIG;
- } else if (bw > threshold) {
- DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
- threshold);
- return -E2BIG;
- }
+ if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
}
return 0;
}
static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc)
{
struct dpu_core_perf_params perf = { 0 };
- enum dpu_crtc_client_type curr_client_type
- = dpu_crtc_get_client_type(crtc);
- struct drm_crtc *tmp_crtc;
- struct dpu_crtc_state *dpu_cstate;
int i, ret = 0;
- u64 avg_bw;
+ u32 avg_bw;
+ u32 peak_bw;
if (!kms->num_paths)
return 0;
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- curr_client_type ==
- dpu_crtc_get_client_type(tmp_crtc)) {
- dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
-
- perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
- dpu_cstate->new_perf.max_per_pipe_ib);
-
- perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ avg_bw = 0;
+ peak_bw = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ avg_bw = kms->perf.fix_core_ab_vote;
+ peak_bw = kms->perf.fix_core_ib_vote;
+ } else {
+ dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
- DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
- tmp_crtc->base.id,
- dpu_cstate->new_perf.bw_ctl, kms->num_paths);
- }
+ avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/
+ peak_bw = perf.max_per_pipe_ib;
}
- avg_bw = perf.bw_ctl;
- do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+ avg_bw /= kms->num_paths;
for (i = 0; i < kms->num_paths; i++)
- icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+ icc_set_bw(kms->path[i], avg_bw, peak_bw);
return ret;
}
@@ -476,9 +462,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0400, entry,
+ debugfs_create_u32("low_core_ab", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
- debugfs_create_u32("threshold_high", 0400, entry,
+ debugfs_create_u32("max_core_ab", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
@@ -490,9 +476,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
- debugfs_create_u64("fix_core_ib_vote", 0600, entry,
+ debugfs_create_u32("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
- debugfs_create_u64("fix_core_ab_vote", 0600, entry,
+ debugfs_create_u32("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index 451bf8021114..d2f21d34e501 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -19,7 +19,7 @@
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
- u64 max_per_pipe_ib;
+ u32 max_per_pipe_ib;
u64 bw_ctl;
u64 core_clk_rate;
};
@@ -40,8 +40,8 @@ struct dpu_core_perf_tune {
* @perf_tune: debug control for performance tuning
* @enable_bw_release: debug control for bandwidth release
* @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
- * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
- * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in KBps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in KBps used in mode 2
*/
struct dpu_core_perf {
const struct dpu_perf_cfg *perf_cfg;
@@ -50,8 +50,8 @@ struct dpu_core_perf {
struct dpu_core_perf_tune perf_tune;
u32 enable_bw_release;
u64 fix_core_clk_rate;
- u64 fix_core_ib_vote;
- u64 fix_core_ab_vote;
+ u32 fix_core_ib_vote;
+ u32 fix_core_ab_vote;
};
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index e5dcd41a361f..0714936d8835 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -953,6 +953,45 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
+static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ /* Find encoder for real time display */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ wb_encoder = encoder;
+ else
+ rt_encoder = encoder;
+ }
+
+ if (!rt_encoder || !wb_encoder) {
+ DRM_DEBUG_ATOMIC("real time or wb encoder not found\n");
+ return -EINVAL;
+ }
+
+ dpu_encoder_prepare_for_kickoff(wb_encoder);
+ dpu_encoder_prepare_for_kickoff(rt_encoder);
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ /*
+ * Kickoff real time encoder last as it's the encoder that
+ * will do the flush
+ */
+ dpu_encoder_kickoff(wb_encoder);
+ dpu_encoder_kickoff(rt_encoder);
+
+ /* Don't start frame done timers until the kickoffs have finished */
+ dpu_encoder_start_frame_done_timer(wb_encoder);
+ dpu_encoder_start_frame_done_timer(rt_encoder);
+
+ return 0;
+}
+
/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
@@ -981,13 +1020,27 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
goto end;
}
}
- /*
- * Encoder will flush/start now, unless it has a tx pending. If so, it
- * may delay and flush at an irq event (e.g. ppdone)
- */
- drm_for_each_encoder_mask(encoder, crtc->dev,
- crtc->state->encoder_mask)
- dpu_encoder_prepare_for_kickoff(encoder);
+
+ if (drm_crtc_in_clone_mode(crtc->state)) {
+ if (dpu_crtc_kickoff_clone_mode(crtc))
+ goto end;
+ } else {
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder);
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ dpu_encoder_kickoff(encoder);
+ dpu_encoder_start_frame_done_timer(encoder);
+ }
+ }
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
@@ -997,11 +1050,6 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
dpu_crtc->play_count++;
- dpu_vbif_clear_errors(dpu_kms);
-
- drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
- dpu_encoder_kickoff(encoder);
-
reinit_completion(&dpu_crtc->frame_done_comp);
end:
@@ -1230,6 +1278,151 @@ done:
return ret;
}
+#define MAX_CHANNELS_PER_CRTC 2
+#define MAX_HDISPLAY_SPLIT 1080
+
+static struct msm_display_topology dpu_crtc_get_topology(
+ struct drm_crtc *crtc,
+ struct dpu_kms *dpu_kms,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct msm_display_topology topology = {0};
+ struct drm_encoder *drm_enc;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
+ dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
+ &crtc_state->adjusted_mode);
+
+ topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state);
+
+ /*
+ * Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ * If DSC is enabled, use 2 LMs for 2:2:1 topology
+ *
+ * Add dspps to the reservation requirements if ctm is requested
+ *
+ * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not
+ * enabled. This is because in cases where CWB is enabled, num_intf will
+ * count both the WB and real-time phys encoders.
+ *
+ * For non-DSC CWB usecases, have the num_lm be decided by the
+ * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
+ */
+
+ if (topology.num_intf == 2 && !topology.cwb_enabled)
+ topology.num_lm = 2;
+ else if (topology.num_dsc == 2)
+ topology.num_lm = 2;
+ else if (dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+ else
+ topology.num_lm = 1;
+
+ if (crtc_state->ctm)
+ topology.num_dspp = topology.num_lm;
+
+ return topology;
+}
+
+static int dpu_crtc_assign_resources(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC];
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC];
+ int i, num_lm, num_ctl, num_dspp;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_global_state *global_state;
+ struct dpu_crtc_state *cstate;
+ struct msm_display_topology topology;
+ int ret;
+
+ /*
+ * Release and Allocate resources on every modeset
+ */
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ dpu_rm_release(global_state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state);
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ crtc_state->crtc, &topology);
+ if (ret)
+ return ret;
+
+ cstate = to_dpu_crtc_state(crtc_state);
+
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_CTL, hw_ctl,
+ ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_LM, hw_lm,
+ ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ if (i < num_dspp)
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
+ }
+
+ cstate->num_mixers = num_lm;
+
+ return 0;
+}
+
+/**
+ * dpu_crtc_check_mode_changed: check if full modeset is required
+ * @old_crtc_state: Previous CRTC state
+ * @new_crtc_state: Corresponding CRTC state to be checked
+ *
+ * Check if the changes in the object properties demand full mode set.
+ */
+int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_encoder *drm_enc;
+ struct drm_crtc *crtc = new_crtc_state->crtc;
+ bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state);
+ bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state);
+
+ DRM_DEBUG_ATOMIC("%d\n", crtc->base.id);
+
+ /* there might be cases where encoder needs a modeset too */
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) {
+ if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state))
+ new_crtc_state->mode_changed = true;
+ }
+
+ if ((clone_mode_requested && !clone_mode_enabled) ||
+ (!clone_mode_requested && clone_mode_enabled))
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1245,6 +1438,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ /* don't reallocate resources if only ACTIVE has beeen changed */
+ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+ rc = dpu_crtc_assign_resources(crtc, crtc_state);
+ if (rc < 0)
+ return rc;
+ }
+
if (dpu_use_virtual_planes &&
(crtc_state->planes_changed || crtc_state->zpos_changed)) {
rc = dpu_crtc_reassign_planes(crtc, crtc_state);
@@ -1262,10 +1462,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
- /* force a full mode set if active state changed */
- if (crtc_state->active_changed)
- crtc_state->mode_changed = true;
-
if (cstate->num_mixers) {
rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
if (rc)
@@ -1484,8 +1680,9 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
- seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
- seq_printf(s, "max_per_pipe_ib: %llu\n",
+ seq_printf(s, "bw_ctl: %uk\n",
+ (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000));
+ seq_printf(s, "max_per_pipe_ib: %u\n",
dpu_crtc->cur_perf.max_per_pipe_ib);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 0b148f3ce0d7..94392b9b9245 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -239,6 +239,9 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
+int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
+
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 48e6e8d74c85..862e9e6bf0a5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -24,6 +24,7 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_cwb.h"
#include "dpu_hw_dspp.h"
#include "dpu_hw_dsc.h"
#include "dpu_hw_merge3d.h"
@@ -58,8 +59,6 @@
#define IDLE_SHORT_TIMEOUT 1
-#define MAX_HDISPLAY_SPLIT 1080
-
/* timeout in frames waiting for frame done */
#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
@@ -135,8 +134,12 @@ enum dpu_enc_rc_states {
* @cur_slave: As above but for the slave encoder.
* @hw_pp: Handle to the pingpong blocks used for the display. No.
* pingpong blocks can be different than num_phys_encs.
+ * @hw_cwb: Handle to the CWB muxes used for concurrent writeback
+ * display. Number of CWB muxes can be different than
+ * num_phys_encs.
* @hw_dsc: Handle to the DSC blocks used for the display.
* @dsc_mask: Bitmask of used DSC blocks.
+ * @cwb_mask: Bitmask of used CWB muxes
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
@@ -179,9 +182,11 @@ struct dpu_encoder_virt {
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_cwb *hw_cwb[MAX_CHANNELS_PER_ENC];
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
unsigned int dsc_mask;
+ unsigned int cwb_mask;
bool intfs_swapped;
@@ -622,9 +627,9 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
if (dpu_enc->phys_encs[i])
intf_count++;
- /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
- if (dpu_enc->dsc)
- num_dsc = 2;
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ if (dpu_enc->hw_dsc[i])
+ num_dsc++;
return (num_dsc > 0) && (num_dsc > intf_count);
}
@@ -647,130 +652,51 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
return NULL;
}
-static struct msm_display_topology dpu_encoder_get_topology(
- struct dpu_encoder_virt *dpu_enc,
- struct dpu_kms *dpu_kms,
- struct drm_display_mode *mode,
- struct drm_crtc_state *crtc_state,
- struct drm_dsc_config *dsc)
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+ struct msm_display_topology *topology,
+ struct drm_atomic_state *state,
+ const struct drm_display_mode *adj_mode)
{
- struct msm_display_topology topology = {0};
- int i, intf_count = 0;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct msm_drm_private *priv = dpu_enc->base.dev->dev_private;
+ struct msm_display_info *disp_info = &dpu_enc->disp_info;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_framebuffer *fb;
+ struct drm_dsc_config *dsc;
+
+ int i;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
if (dpu_enc->phys_encs[i])
- intf_count++;
+ topology->num_intf++;
- /* Datapath topology selection
- *
- * Dual display
- * 2 LM, 2 INTF ( Split display using 2 interfaces)
- *
- * Single display
- * 1 LM, 1 INTF
- * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
- *
- * Add dspps to the reservation requirements if ctm is requested
- */
- if (intf_count == 2)
- topology.num_lm = 2;
- else if (!dpu_kms->catalog->caps->has_3d_merge)
- topology.num_lm = 1;
- else
- topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
-
- if (crtc_state->ctm)
- topology.num_dspp = topology.num_lm;
-
- topology.num_intf = intf_count;
+ dsc = dpu_encoder_get_dsc_config(drm_enc);
+ /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
if (dsc) {
/*
- * In case of Display Stream Compression (DSC), we would use
- * 2 DSC encoders, 2 layer mixers and 1 interface
- * this is power optimal and can drive up to (including) 4k
- * screens
+ * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces
+ * when Display Stream Compression (DSC) is enabled,
+ * and when enough DSC blocks are available.
+ * This is power-optimal and can drive up to (including) 4k
+ * screens.
*/
- topology.num_dsc = 2;
- topology.num_lm = 2;
- topology.num_intf = 1;
- }
-
- return topology;
-}
-
-static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms,
- struct drm_encoder *drm_enc,
- struct dpu_global_state *global_state,
- struct drm_crtc_state *crtc_state)
-{
- struct dpu_crtc_state *cstate;
- struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_dspp, i;
-
- cstate = to_dpu_crtc_state(crtc_state);
-
- memset(cstate->mixers, 0, sizeof(cstate->mixers));
-
- num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
- num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
- ARRAY_SIZE(hw_dspp));
-
- for (i = 0; i < num_lm; i++) {
- int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
-
- cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
- cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
- cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL;
- }
-
- cstate->num_mixers = num_lm;
-}
-
-static int dpu_encoder_virt_atomic_check(
- struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct dpu_encoder_virt *dpu_enc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- struct drm_display_mode *adj_mode;
- struct msm_display_topology topology;
- struct msm_display_info *disp_info;
- struct dpu_global_state *global_state;
- struct drm_framebuffer *fb;
- struct drm_dsc_config *dsc;
- int ret = 0;
-
- if (!drm_enc || !crtc_state || !conn_state) {
- DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
- drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
- return -EINVAL;
+ WARN(topology->num_intf > 2,
+ "DSC topology cannot support more than 2 interfaces\n");
+ if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2)
+ topology->num_dsc = 2;
+ else
+ topology->num_dsc = 1;
}
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
-
- priv = drm_enc->dev->dev_private;
- disp_info = &dpu_enc->disp_info;
- dpu_kms = to_dpu_kms(priv->kms);
- adj_mode = &crtc_state->adjusted_mode;
- global_state = dpu_kms_get_global_state(crtc_state->state);
- if (IS_ERR(global_state))
- return PTR_ERR(global_state);
-
- trace_dpu_enc_atomic_check(DRMID(drm_enc));
-
- dsc = dpu_encoder_get_dsc_config(drm_enc);
-
- topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
+ connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+ if (!connector)
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return;
/*
* Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
@@ -781,34 +707,45 @@ static int dpu_encoder_virt_atomic_check(
fb = conn_state->writeback_job->fb;
if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
- topology.needs_cdm = true;
+ topology->num_cdm++;
} else if (disp_info->intf_type == INTF_DP) {
if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
- topology.needs_cdm = true;
+ topology->num_cdm++;
}
+}
- if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- /*
- * Release and Allocate resources on every modeset
- * Dont allocate when active is false.
- */
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- dpu_rm_release(global_state, drm_enc);
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_framebuffer *fb;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- if (!crtc_state->active_changed || crtc_state->enable)
- ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
- drm_enc, crtc_state, &topology);
- if (!ret)
- dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
- global_state, crtc_state);
- }
+ if (!drm_enc || !state)
+ return false;
- trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
+ connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+ if (!connector)
+ return false;
- return ret;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+
+ /**
+ * These checks are duplicated from dpu_encoder_update_topology() since
+ * CRTC and encoder don't hold topology information
+ */
+ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ fb = conn_state->writeback_job->fb;
+ if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) {
+ if (!dpu_enc->cur_master->hw_cdm)
+ return true;
+ } else {
+ if (dpu_enc->cur_master->hw_cdm)
+ return true;
+ }
+ }
+
+ return false;
}
static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
@@ -1219,8 +1156,12 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC];
int num_ctl, num_pp, num_dsc;
+ int num_cwb = 0;
+ bool is_cwb_encoder;
unsigned int dsc_mask = 0;
+ unsigned int cwb_mask = 0;
int i;
if (!drm_enc) {
@@ -1233,6 +1174,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
+ is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) &&
+ dpu_enc->disp_info.intf_type == INTF_WB;
global_state = dpu_kms_get_existing_global_state(dpu_kms);
if (IS_ERR_OR_NULL(global_state)) {
@@ -1243,18 +1186,38 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
/* Query resource that have been reserved in atomic check step. */
- num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
- ARRAY_SIZE(hw_pp));
+ if (is_cwb_encoder) {
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_DCWB_PINGPONG,
+ hw_pp, ARRAY_SIZE(hw_pp));
+ num_cwb = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_CWB,
+ hw_cwb, ARRAY_SIZE(hw_cwb));
+ } else {
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ }
+
+ for (i = 0; i < num_cwb; i++) {
+ dpu_enc->hw_cwb[i] = to_dpu_hw_cwb(hw_cwb[i]);
+ cwb_mask |= BIT(dpu_enc->hw_cwb[i]->idx - CWB_0);
+ }
+
+ dpu_enc->cwb_mask = cwb_mask;
+
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSC,
+ drm_enc->crtc, DPU_HW_BLK_DSC,
hw_dsc, ARRAY_SIZE(hw_dsc));
for (i = 0; i < num_dsc; i++) {
dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
@@ -1268,7 +1231,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_cdm = NULL;
dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CDM,
+ drm_enc->crtc, DPU_HW_BLK_CDM,
&hw_cdm, 1);
dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
}
@@ -1447,7 +1410,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
/* after phys waits for frame-done, should be no more frames pending */
if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
- del_timer_sync(&dpu_enc->frame_done_timer);
+ timer_delete_sync(&dpu_enc->frame_done_timer);
}
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
@@ -1619,7 +1582,7 @@ void dpu_encoder_frame_done_callback(
if (!dpu_enc->frame_busy_mask[0]) {
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
- del_timer(&dpu_enc->frame_done_timer);
+ timer_delete(&dpu_enc->frame_done_timer);
dpu_encoder_resource_control(drm_enc,
DPU_ENC_RC_EVENT_FRAME_DONE);
@@ -1654,6 +1617,7 @@ static void dpu_encoder_off_work(struct work_struct *work)
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
u32 ret = UINT_MAX;
@@ -1671,6 +1635,15 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ /* Return early if encoder is writeback and in clone mode */
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
+ dpu_enc->cwb_mask) {
+ DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n",
+ DRMID(drm_enc));
+ return;
+ }
+
+
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1693,6 +1666,8 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
*/
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
+ struct dpu_encoder_virt *dpu_enc;
+
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
return;
@@ -1703,6 +1678,14 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
return;
}
+ dpu_enc = to_dpu_encoder_virt(phys->parent);
+
+ if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
+ dpu_enc->cwb_mask) {
+ DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent));
+ return;
+ }
+
if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
phys->ops.trigger_start(phys);
}
@@ -2020,7 +2003,6 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
struct drm_dsc_config *dsc)
{
- /* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
@@ -2030,22 +2012,24 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
int dsc_common_mode;
int pic_width;
u32 initial_lines;
+ int num_dsc = 0;
int i;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
hw_pp[i] = dpu_enc->hw_pp[i];
hw_dsc[i] = dpu_enc->hw_dsc[i];
- if (!hw_pp[i] || !hw_dsc[i]) {
- DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
- return;
- }
+ if (!hw_pp[i] || !hw_dsc[i])
+ break;
+
+ num_dsc++;
}
- dsc_common_mode = 0;
pic_width = dsc->pic_width;
- dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+ dsc_common_mode = 0;
+ if (num_dsc > 1)
+ dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
if (dpu_encoder_use_dsc_merge(enc_master->parent))
dsc_common_mode |= DSC_MODE_MULTIPLEX;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
@@ -2054,14 +2038,10 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
this_frame_slices = pic_width / dsc->slice_width;
intf_ip_w = this_frame_slices * dsc->slice_width;
- /*
- * dsc merge case: when using 2 encoders for the same stream,
- * no. of slices need to be same on both the encoders.
- */
- enc_ip_w = intf_ip_w / 2;
+ enc_ip_w = intf_ip_w / num_dsc;
initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ for (i = 0; i < num_dsc; i++)
dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
dsc, dsc_common_mode, initial_lines);
}
@@ -2135,6 +2115,25 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
}
/**
+ * dpu_encoder_start_frame_done_timer - Start the encoder frame done timer
+ * @drm_enc: Pointer to drm encoder structure
+ */
+void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ unsigned long timeout_ms;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+
+}
+
+/**
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @drm_enc: encoder pointer
@@ -2143,7 +2142,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
- unsigned long timeout_ms;
unsigned int i;
DPU_ATRACE_BEGIN("encoder_kickoff");
@@ -2151,13 +2149,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
trace_dpu_enc_kickoff(DRMID(drm_enc));
- timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
- drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
-
- atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
- mod_timer(&dpu_enc->frame_done_timer,
- jiffies + msecs_to_jiffies(timeout_ms));
-
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc);
@@ -2183,22 +2174,22 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
memset(&mixer, 0, sizeof(mixer));
/* reset all mixers for this encoder */
- if (phys_enc->hw_ctl->ops.clear_all_blendstages)
- phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+ if (ctl->ops.clear_all_blendstages)
+ ctl->ops.clear_all_blendstages(ctl);
global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
- phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ phys_enc->parent->crtc, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
- if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
- phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
+ if (ctl->ops.update_pending_flush_mixer)
+ ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
- if (phys_enc->hw_ctl->ops.setup_blendstage)
- phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ if (ctl->ops.setup_blendstage)
+ ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
}
}
@@ -2250,7 +2241,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
- phys_enc->hw_ctl->ops.reset(ctl);
+ ctl->ops.reset(ctl);
dpu_encoder_helper_reset_mixers(phys_enc);
@@ -2265,8 +2256,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
/* mark WB flush as pending */
- if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
- phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
+ if (ctl->ops.update_pending_flush_wb)
+ ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
} else {
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
@@ -2275,8 +2266,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
PINGPONG_NONE);
/* mark INTF flush as pending */
- if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
- phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
+ if (ctl->ops.update_pending_flush_intf)
+ ctl->ops.update_pending_flush_intf(ctl,
dpu_enc->phys_encs[i]->hw_intf->idx);
}
}
@@ -2284,12 +2275,15 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
+ if (dpu_enc->cwb_mask)
+ dpu_encoder_helper_phys_setup_cwb(phys_enc, false);
+
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
- if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
- phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
+ if (ctl->ops.update_pending_flush_merge_3d)
+ ctl->ops.update_pending_flush_merge_3d(ctl,
phys_enc->hw_pp->merge_3d->idx);
}
@@ -2297,9 +2291,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
PINGPONG_NONE);
- if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
- phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
- phys_enc->hw_cdm->idx);
+ if (ctl->ops.update_pending_flush_cdm)
+ ctl->ops.update_pending_flush_cdm(ctl,
+ phys_enc->hw_cdm->idx);
}
if (dpu_enc->dsc) {
@@ -2310,6 +2304,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+ intf_cfg.cwb = dpu_enc->cwb_mask;
if (phys_enc->hw_intf)
intf_cfg.intf = phys_enc->hw_intf->idx;
@@ -2327,6 +2322,68 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
ctl->ops.clear_pending_flush(ctl);
}
+void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ struct dpu_hw_cwb *hw_cwb;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cwb_setup_cfg cwb_cfg;
+
+ struct dpu_kms *dpu_kms;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *rt_pp_list[MAX_CHANNELS_PER_ENC];
+ int num_pp;
+
+ if (!phys_enc->hw_wb)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+
+ if (!phys_enc->hw_ctl) {
+ DPU_DEBUG("[wb:%d] no ctl assigned\n",
+ phys_enc->hw_wb->idx - WB_0);
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ phys_enc->parent->crtc,
+ DPU_HW_BLK_PINGPONG, rt_pp_list,
+ ARRAY_SIZE(rt_pp_list));
+
+ if (num_pp == 0 || num_pp > MAX_CHANNELS_PER_ENC) {
+ DPU_DEBUG_ENC(dpu_enc, "invalid num_pp %d\n", num_pp);
+ return;
+ }
+
+ /*
+ * The CWB mux supports using LM or DSPP as tap points. For now,
+ * always use LM tap point
+ */
+ cwb_cfg.input = INPUT_MODE_LM_OUT;
+
+ for (int i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_cwb = dpu_enc->hw_cwb[i];
+ if (!hw_cwb)
+ continue;
+
+ if (enable) {
+ struct dpu_hw_pingpong *hw_pp =
+ to_dpu_hw_pingpong(rt_pp_list[i]);
+ cwb_cfg.pp_idx = hw_pp->idx;
+ } else {
+ cwb_cfg.pp_idx = PINGPONG_NONE;
+ }
+
+ hw_cwb->ops.config_cwb(hw_cwb, &cwb_cfg);
+
+ if (hw_ctl->ops.update_pending_flush_cwb)
+ hw_ctl->ops.update_pending_flush_cwb(hw_ctl, hw_cwb->idx);
+ }
+}
+
/**
* dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
* @phys_enc: Pointer to physical encoder
@@ -2513,6 +2570,38 @@ static int dpu_encoder_virt_add_phys_encs(
return 0;
}
+/**
+ * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder
+ * @drm_enc: DRM encoder pointer
+ * Returns: possible_clones mask
+ */
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc)
+{
+ struct drm_encoder *curr;
+ int type = drm_enc->encoder_type;
+ uint32_t clone_mask = drm_encoder_mask(drm_enc);
+
+ /*
+ * Set writeback as possible clones of real-time DSI encoders and vice
+ * versa
+ *
+ * Writeback encoders can't be clones of each other and DSI
+ * encoders can't be clones of each other.
+ *
+ * TODO: Add DP encoders as valid possible clones for writeback encoders
+ * (and vice versa) once concurrent writeback has been validated for DP
+ */
+ drm_for_each_encoder(curr, drm_enc->dev) {
+ if ((type == DRM_MODE_ENCODER_VIRTUAL &&
+ curr->encoder_type == DRM_MODE_ENCODER_DSI) ||
+ (type == DRM_MODE_ENCODER_DSI &&
+ curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL))
+ clone_mask |= drm_encoder_mask(curr);
+ }
+
+ return clone_mask;
+}
+
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct msm_display_info *disp_info)
@@ -2630,7 +2719,6 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
.atomic_disable = dpu_encoder_virt_atomic_disable,
.atomic_enable = dpu_encoder_virt_atomic_enable,
- .atomic_check = dpu_encoder_virt_atomic_check,
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
@@ -2789,6 +2877,18 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
}
/**
+ * dpu_encoder_helper_get_cwb_mask - get CWB blocks mask for the DPU encoder
+ * @phys_enc: Pointer to physical encoder structure
+ */
+unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ return dpu_enc->cwb_mask;
+}
+
+/**
* dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
* This helper function is used by physical encoder to get DSC blocks mask
* used for this encoder.
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 92b5ee390788..ca1ca2e51d7e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -60,6 +60,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc);
+
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info);
@@ -80,6 +82,13 @@ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+ struct msm_display_topology *topology,
+ struct drm_atomic_state *state,
+ const struct drm_display_mode *adj_mode);
+
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state);
+
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
@@ -88,4 +97,5 @@ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 63f09857025c..61b22d949454 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*/
@@ -309,6 +309,8 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
return BLEND_3D_NONE;
}
+unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc);
+
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc);
@@ -331,6 +333,9 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
+void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
+ bool enable);
+
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct msm_format *dpu_fmt,
u32 output_type);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index e9bbccc44dad..da9994a79ca2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_pingpong.h"
@@ -261,7 +262,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
- enable ? "true" : "false", refcount);
+ str_true_false(enable), refcount);
if (enable) {
if (phys_enc->vblank_refcount == 0)
@@ -285,7 +286,7 @@ end:
DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0, ret,
- enable ? "true" : "false", refcount);
+ str_true_false(enable), refcount);
}
return ret;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4c006ec74575..849fea580a4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -68,7 +68,7 @@ static void dpu_encoder_phys_wb_set_ot_limit(
ot_params.num = hw_wb->idx - WB_0;
ot_params.width = phys_enc->cached_mode.hdisplay;
ot_params.height = phys_enc->cached_mode.vdisplay;
- ot_params.is_wfd = true;
+ ot_params.is_wfd = !dpu_encoder_helper_get_cwb_mask(phys_enc);
ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
ot_params.vbif_idx = hw_wb->caps->vbif_idx;
ot_params.rd = false;
@@ -111,7 +111,7 @@ static void dpu_encoder_phys_wb_set_qos_remap(
qos_params.vbif_idx = hw_wb->caps->vbif_idx;
qos_params.xin_id = hw_wb->caps->xin_id;
qos_params.num = hw_wb->idx - WB_0;
- qos_params.is_rt = false;
+ qos_params.is_rt = dpu_encoder_helper_get_cwb_mask(phys_enc);
DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
qos_params.num,
@@ -174,6 +174,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb;
struct dpu_hw_wb_cfg *wb_cfg;
+ u32 cdp_usage;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid encoder\n");
@@ -182,6 +183,10 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
hw_wb = phys_enc->hw_wb;
wb_cfg = &wb_enc->wb_cfg;
+ if (dpu_encoder_helper_get_cwb_mask(phys_enc))
+ cdp_usage = DPU_PERF_CDP_USAGE_RT;
+ else
+ cdp_usage = DPU_PERF_CDP_USAGE_NRT;
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->roi.x1 = 0;
@@ -199,7 +204,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
hw_wb->ops.setup_cdp(hw_wb, format,
- perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
+ perf->cdp_cfg[cdp_usage].wr_enable);
}
if (hw_wb->ops.setup_outaddress)
@@ -236,6 +241,7 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
intf_cfg.intf = DPU_NONE;
intf_cfg.wb = hw_wb->idx;
+ intf_cfg.cwb = dpu_encoder_helper_get_cwb_mask(phys_enc);
if (mode_3d && hw_pp && hw_pp->merge_3d)
intf_cfg.merge_3d = hw_pp->merge_3d->idx;
@@ -340,6 +346,8 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB);
+ dpu_encoder_helper_phys_setup_cwb(phys_enc, true);
+
dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 0b342c043875..64265ca4656a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -232,37 +232,6 @@ static const u32 rotation_v2_formats[] = {
/* TODO add formats after validation */
};
-static const u32 wb2_formats_rgb[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_BGR565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGBA4444,
- DRM_FORMAT_RGBX4444,
- DRM_FORMAT_XRGB4444,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_BGRA8888,
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_BGRA5551,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_BGRX5551,
- DRM_FORMAT_ABGR4444,
- DRM_FORMAT_BGRA4444,
- DRM_FORMAT_BGRX4444,
- DRM_FORMAT_XBGR4444,
-};
-
static const u32 wb2_formats_rgb_yuv[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
@@ -507,7 +476,14 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
/*************************************************************
* CDM block config
*************************************************************/
-static const struct dpu_cdm_cfg sc7280_cdm = {
+static const struct dpu_cdm_cfg dpu_cdm_1_x_4_x = {
+ .name = "cdm_0",
+ .id = CDM_0,
+ .len = 0x224,
+ .base = 0x79200,
+};
+
+static const struct dpu_cdm_cfg dpu_cdm_5_x = {
.name = "cdm_0",
.id = CDM_0,
.len = 0x228,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
index ae1534c49ae0..3f88c3641d4a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -214,7 +214,9 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_
mux_cfg = DPU_REG_READ(c, CDM_MUX);
mux_cfg &= ~0xf;
- if (pp)
+ if (pp >= PINGPONG_CWB_0)
+ mux_cfg |= 0xd;
+ else if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 4893f10d6a58..411a7cf088eb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -31,12 +31,14 @@
#define CTL_MERGE_3D_ACTIVE 0x0E4
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_WB_ACTIVE 0x0EC
+#define CTL_CWB_ACTIVE 0x0F0
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_CDM_ACTIVE 0x0F8
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
+#define CTL_CWB_FLUSH 0x10C
#define CTL_INTF_FLUSH 0x110
#define CTL_CDM_FLUSH 0x114
#define CTL_PERIPH_FLUSH 0x128
@@ -53,6 +55,7 @@
#define PERIPH_IDX 30
#define INTF_IDX 31
#define WB_IDX 16
+#define CWB_IDX 28
#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
@@ -110,6 +113,7 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
ctx->pending_flush_mask = 0x0;
ctx->pending_intf_flush_mask = 0;
ctx->pending_wb_flush_mask = 0;
+ ctx->pending_cwb_flush_mask = 0;
ctx->pending_merge_3d_flush_mask = 0;
ctx->pending_dsc_flush_mask = 0;
ctx->pending_cdm_flush_mask = 0;
@@ -144,6 +148,9 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
if (ctx->pending_flush_mask & BIT(WB_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
ctx->pending_wb_flush_mask);
+ if (ctx->pending_flush_mask & BIT(CWB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
+ ctx->pending_cwb_flush_mask);
if (ctx->pending_flush_mask & BIT(DSPP_IDX))
for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
@@ -310,6 +317,13 @@ static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(WB_IDX);
}
+static void dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_cwb cwb)
+{
+ ctx->pending_cwb_flush_mask |= BIT(cwb - CWB_0);
+ ctx->pending_flush_mask |= BIT(CWB_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
@@ -547,6 +561,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 intf_active = 0;
u32 dsc_active = 0;
u32 wb_active = 0;
+ u32 cwb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
@@ -561,6 +576,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
if (cfg->intf)
@@ -569,12 +585,16 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
+ if (cfg->cwb)
+ cwb_active |= cfg->cwb;
+
if (cfg->dsc)
dsc_active |= cfg->dsc;
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
if (cfg->merge_3d)
@@ -624,6 +644,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
+ u32 cwb_active = 0;
u32 merge3d_active = 0;
u32 dsc_active;
u32 cdm_active;
@@ -651,6 +672,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
+ if (cfg->cwb) {
+ cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
+ cwb_active &= ~cfg->cwb;
+ DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
+ }
+
if (cfg->wb) {
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
wb_active &= ~BIT(cfg->wb - WB_0);
@@ -703,6 +730,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
ops->update_pending_flush_dsc =
dpu_hw_ctl_update_pending_flush_dsc_v1;
ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 85c6c835cc87..080a9550a0cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_HW_CTL_H
@@ -42,6 +42,7 @@ struct dpu_hw_stage_cfg {
* @cdm: CDM block used
* @stream_sel: Stream selection for multi-stream interfaces
* @dsc: DSC BIT masks used
+ * @cwb: CWB BIT masks used
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
@@ -51,6 +52,7 @@ struct dpu_hw_intf_cfg {
enum dpu_ctl_mode_sel intf_mode_sel;
enum dpu_cdm cdm;
int stream_sel;
+ unsigned int cwb;
unsigned int dsc;
};
@@ -115,6 +117,15 @@ struct dpu_hw_ctl_ops {
enum dpu_wb blk);
/**
+ * OR in the given flushbits to the cached pending_(cwb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : concurrent writeback block index
+ */
+ void (*update_pending_flush_cwb)(struct dpu_hw_ctl *ctx,
+ enum dpu_cwb blk);
+
+ /**
* OR in the given flushbits to the cached pending_(intf_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
@@ -258,6 +269,7 @@ struct dpu_hw_ctl_ops {
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
+ * @pending_cwb_flush_mask: pending CWB flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
* @ops: operation list
@@ -274,6 +286,7 @@ struct dpu_hw_ctl {
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
u32 pending_wb_flush_mask;
+ u32 pending_cwb_flush_mask;
u32 pending_periph_flush_mask;
u32 pending_merge_3d_flush_mask;
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index ba7bb05efe9b..8d820cd1b554 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -77,12 +77,14 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_DCWB_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
DPU_HW_BLK_DSC,
DPU_HW_BLK_CDM,
+ DPU_HW_BLK_CWB,
DPU_HW_BLK_MAX,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 97e9cb8c2b09..3305ad0623ca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -446,6 +446,19 @@ static void dpu_kms_disable_commit(struct msm_kms *kms)
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
+static int dpu_kms_check_mode_changed(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+ dpu_crtc_check_mode_changed(old_crtc_state, new_crtc_state);
+
+ return 0;
+}
+
static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -811,8 +824,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
return ret;
num_encoders = 0;
- drm_for_each_encoder(encoder, dev)
+ drm_for_each_encoder(encoder, dev) {
num_encoders++;
+ if (catalog->cwb_count > 0)
+ encoder->possible_clones = dpu_encoder_get_clones(encoder);
+ }
max_crtc_count = min(catalog->mixer_count, num_encoders);
@@ -1062,6 +1078,7 @@ static const struct msm_kms_funcs kms_funcs = {
.irq = dpu_core_irq,
.enable_commit = dpu_kms_enable_commit,
.disable_commit = dpu_kms_disable_commit,
+ .check_mode_changed = dpu_kms_check_mode_changed,
.flush_commit = dpu_kms_flush_commit,
.wait_flush = dpu_kms_wait_flush,
.complete_commit = dpu_kms_complete_commit,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 547cdb2c0c78..a57ec2ec1060 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -124,14 +124,15 @@ struct dpu_global_state {
struct dpu_rm *rm;
- uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
- uint32_t mixer_to_enc_id[LM_MAX - LM_0];
- uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
- uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
- uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
- uint32_t cdm_to_enc_id;
+ uint32_t pingpong_to_crtc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_crtc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_crtc_id[CTL_MAX - CTL_0];
+ uint32_t dspp_to_crtc_id[DSPP_MAX - DSPP_0];
+ uint32_t dsc_to_crtc_id[DSC_MAX - DSC_0];
+ uint32_t cdm_to_crtc_id;
uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE];
+ uint32_t cwb_to_crtc_id[CWB_MAX - CWB_0];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index af3e541f60c3..e03d6091f736 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -729,12 +729,40 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct msm_format *fmt,
- const struct drm_display_mode *mode)
+ const struct drm_display_mode *mode,
+ struct drm_plane_state *new_plane_state)
{
uint32_t min_src_size;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
+ const struct msm_format *fmt;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps;
+ const struct dpu_sspp_sub_blks *sblk;
+
+ pipe_hw_caps = pipe->sspp->cap;
+ sblk = pipe->sspp->cap->sblk;
+
+ /*
+ * We already have verified scaling against platform limitations.
+ * Now check if the SSPP supports scaling at all.
+ */
+ if (!sblk->scaler_blk.len &&
+ ((drm_rect_width(&new_plane_state->src) >> 16 !=
+ drm_rect_width(&new_plane_state->dst)) ||
+ (drm_rect_height(&new_plane_state->src) >> 16 !=
+ drm_rect_height(&new_plane_state->dst))))
+ return -ERANGE;
+
+ fmt = msm_framebuffer_format(new_plane_state->fb);
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -923,47 +951,20 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
- uint32_t supported_rotations;
- const struct dpu_sspp_cfg *pipe_hw_caps;
- const struct dpu_sspp_sub_blks *sblk;
int ret = 0;
- pipe_hw_caps = pipe->sspp->cap;
- sblk = pipe->sspp->cap->sblk;
-
- /*
- * We already have verified scaling against platform limitations.
- * Now check if the SSPP supports scaling at all.
- */
- if (!sblk->scaler_blk.len &&
- ((drm_rect_width(&new_plane_state->src) >> 16 !=
- drm_rect_width(&new_plane_state->dst)) ||
- (drm_rect_height(&new_plane_state->src) >> 16 !=
- drm_rect_height(&new_plane_state->dst))))
- return -ERANGE;
-
- fmt = msm_framebuffer_format(new_plane_state->fb);
-
- supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
-
- if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
- supported_rotations |= DRM_MODE_ROTATE_90;
-
- pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation,
- supported_rotations);
- r_pipe_cfg->rotation = pipe_cfg->rotation;
-
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt,
- &crtc_state->adjusted_mode);
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
+ &crtc_state->adjusted_mode,
+ new_plane_state);
if (ret)
return ret;
if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
- ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
- &crtc_state->adjusted_mode);
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg,
+ &crtc_state->adjusted_mode,
+ new_plane_state);
if (ret)
return ret;
}
@@ -1059,6 +1060,9 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state;
int ret;
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
if (plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
plane_state->crtc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 5baf9df702b8..3efbba425ca6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -22,9 +22,9 @@
static inline bool reserved_by_other(uint32_t *res_map, int idx,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
- return res_map[idx] && res_map[idx] != enc_id;
+ return res_map[idx] && res_map[idx] != crtc_id;
}
/**
@@ -233,13 +233,66 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
return -EINVAL;
}
+static int _dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t crtc_id,
+ struct msm_display_topology *topology)
+{
+ int num_cwb_mux = topology->num_lm, cwb_mux_count = 0;
+ int cwb_pp_start_idx = PINGPONG_CWB_0 - PINGPONG_0;
+ int cwb_pp_idx[MAX_BLOCKS];
+ int cwb_mux_idx[MAX_BLOCKS];
+
+ /*
+ * Reserve additional dedicated CWB PINGPONG blocks and muxes for each
+ * mixer
+ *
+ * TODO: add support reserving resources for platforms with no
+ * PINGPONG_CWB
+ */
+ for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ cwb_mux_count < num_cwb_mux; i++) {
+ for (int j = 0; j < ARRAY_SIZE(rm->cwb_blks); j++) {
+ /*
+ * Odd LMs must be assigned to odd CWB muxes and even
+ * LMs with even CWB muxes.
+ *
+ * Since the RM HW block array index is based on the HW
+ * block ids, we can also use the array index to enforce
+ * the odd/even rule. See dpu_rm_init() for more
+ * information
+ */
+ if (reserved_by_other(global_state->cwb_to_crtc_id, j, crtc_id) ||
+ i % 2 != j % 2)
+ continue;
+
+ cwb_mux_idx[cwb_mux_count] = j;
+ cwb_pp_idx[cwb_mux_count] = j + cwb_pp_start_idx;
+ cwb_mux_count++;
+ break;
+ }
+ }
+
+ if (cwb_mux_count != num_cwb_mux) {
+ DPU_ERROR("Unable to reserve all CWB PINGPONGs\n");
+ return -ENAVAIL;
+ }
+
+ for (int i = 0; i < cwb_mux_count; i++) {
+ global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id;
+ global_state->cwb_to_crtc_id[cwb_mux_idx[i]] = crtc_id;
+ }
+
+ return 0;
+}
+
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
* @global_state: resources shared across multiple kms objects
- * @enc_id: encoder id requesting for allocation
+ * @crtc_id: crtc id requesting for allocation
* @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
* if lm, and all other hardwired blocks connected to the lm (pp) is
* available and appropriate
@@ -252,14 +305,14 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ uint32_t crtc_id, int lm_idx, int *pp_idx, int *dspp_idx,
struct msm_display_topology *topology)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
/* Already reserved? */
- if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ if (reserved_by_other(global_state->mixer_to_crtc_id, lm_idx, crtc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false;
}
@@ -271,7 +324,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
- if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ if (reserved_by_other(global_state->pingpong_to_crtc_id, idx, crtc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
lm_cfg->pingpong);
return false;
@@ -287,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
- if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ if (reserved_by_other(global_state->dspp_to_crtc_id, idx, crtc_id)) {
DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
lm_cfg->dspp);
return false;
@@ -299,7 +352,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
struct msm_display_topology *topology)
{
@@ -323,7 +376,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
- enc_id, i, &pp_idx[lm_count],
+ crtc_id, i, &pp_idx[lm_count],
&dspp_idx[lm_count], topology)) {
continue;
}
@@ -342,7 +395,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
continue;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
- global_state, enc_id, j,
+ global_state, crtc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
topology)) {
continue;
@@ -359,12 +412,12 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
}
for (i = 0; i < lm_count; i++) {
- global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
- global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
- global_state->dspp_to_enc_id[dspp_idx[i]] =
- topology->num_dspp ? enc_id : 0;
+ global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id;
+ global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id;
+ global_state->dspp_to_crtc_id[dspp_idx[i]] =
+ topology->num_dspp ? crtc_id : 0;
- trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, crtc_id,
pp_idx[i] + PINGPONG_0);
}
@@ -374,15 +427,25 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int ctl_idx[MAX_BLOCKS];
int i = 0, j, num_ctls;
bool needs_split_display;
- /* each hw_intf needs its own hw_ctrl to program its control path */
- num_ctls = top->num_intf;
+ /*
+ * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
+ * control path.
+ *
+ * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
+ * writeback and real-time encoders must be driven by the same control
+ * path
+ */
+ if (top->cwb_enabled)
+ num_ctls = 1;
+ else
+ num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
@@ -393,7 +456,7 @@ static int _dpu_rm_reserve_ctls(
if (!rm->ctl_blks[j])
continue;
- if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
+ if (reserved_by_other(global_state->ctl_to_crtc_id, j, crtc_id))
continue;
ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
@@ -417,8 +480,8 @@ static int _dpu_rm_reserve_ctls(
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
- global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
- trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
+ global_state->ctl_to_crtc_id[ctl_idx[i]] = crtc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, crtc_id);
}
return 0;
@@ -426,12 +489,12 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state,
int start,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
int i;
for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) {
- if (global_state->pingpong_to_enc_id[i] == enc_id)
+ if (global_state->pingpong_to_crtc_id[i] == crtc_id)
return i;
}
@@ -452,7 +515,7 @@ static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx)
static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int num_dsc = 0;
@@ -465,10 +528,10 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
if (!rm->dsc_blks[dsc_idx])
continue;
- if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id))
+ if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id))
continue;
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -476,7 +539,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
if (ret)
return -ENAVAIL;
- global_state->dsc_to_enc_id[dsc_idx] = enc_id;
+ global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
num_dsc++;
pp_idx++;
}
@@ -492,7 +555,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int num_dsc = 0;
@@ -507,11 +570,11 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
/* consective dsc index to be paired */
- if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) ||
- reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id))
+ if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id) ||
+ reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx + 1, crtc_id))
continue;
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -521,7 +584,7 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
}
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -531,8 +594,8 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
}
- global_state->dsc_to_enc_id[dsc_idx] = enc_id;
- global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id;
+ global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
+ global_state->dsc_to_crtc_id[dsc_idx + 1] = crtc_id;
num_dsc += 2;
pp_idx++; /* start for next pair */
}
@@ -548,11 +611,9 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
- uint32_t enc_id = enc->base.id;
-
if (!top->num_dsc || !top->num_intf)
return 0;
@@ -568,16 +629,17 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
/* num_dsc should be either 1, 2 or 4 */
if (top->num_dsc > top->num_intf) /* merge mode */
- return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top);
+ return _dpu_rm_dsc_alloc_pair(rm, global_state, crtc_id, top);
else
- return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top);
+ return _dpu_rm_dsc_alloc(rm, global_state, crtc_id, top);
return 0;
}
static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc)
+ uint32_t crtc_id,
+ int num_cdm)
{
/* try allocating only one CDM block */
if (!rm->cdm_blk) {
@@ -585,12 +647,17 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
return -EIO;
}
- if (global_state->cdm_to_enc_id) {
+ if (num_cdm > 1) {
+ DPU_ERROR("More than 1 INTF requesting CDM\n");
+ return -EINVAL;
+ }
+
+ if (global_state->cdm_to_crtc_id) {
DPU_ERROR("CDM_0 is already allocated\n");
return -EIO;
}
- global_state->cdm_to_enc_id = enc->base.id;
+ global_state->cdm_to_crtc_id = crtc_id;
return 0;
}
@@ -598,30 +665,37 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
+ uint32_t crtc_id,
struct msm_display_topology *topology)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology);
+ ret = _dpu_rm_reserve_lms(rm, global_state, crtc_id, topology);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ if (topology->cwb_enabled) {
+ ret = _dpu_rm_reserve_cwb_mux_and_pingpongs(rm, global_state,
+ crtc_id, topology);
+ if (ret)
+ return ret;
+ }
+
+ ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id,
topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology);
+ ret = _dpu_rm_reserve_dsc(rm, global_state, crtc_id, topology);
if (ret)
return ret;
- if (topology->needs_cdm) {
- ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
+ if (topology->num_cdm > 0) {
+ ret = _dpu_rm_reserve_cdm(rm, global_state, crtc_id, topology->num_cdm);
if (ret) {
DPU_ERROR("unable to find CDM blk\n");
return ret;
@@ -632,12 +706,12 @@ static int _dpu_rm_make_reservation(
}
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
int i;
for (i = 0; i < cnt; i++) {
- if (res_mapping[i] == enc_id)
+ if (res_mapping[i] == crtc_id)
res_mapping[i] = 0;
}
}
@@ -646,23 +720,27 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
* dpu_rm_release - Given the encoder for the display chain, release any
* HW blocks previously reserved for that use case.
* @global_state: resources shared across multiple kms objects
- * @enc: DRM Encoder handle
+ * @crtc: DRM CRTC handle
* @return: 0 on Success otherwise -ERROR
*/
void dpu_rm_release(struct dpu_global_state *global_state,
- struct drm_encoder *enc)
+ struct drm_crtc *crtc)
{
- _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
- ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
- ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
- ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
- ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
- ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
+ uint32_t crtc_id = crtc->base.id;
+
+ _dpu_rm_clear_mapping(global_state->pingpong_to_crtc_id,
+ ARRAY_SIZE(global_state->pingpong_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_crtc_id,
+ ARRAY_SIZE(global_state->mixer_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_crtc_id,
+ ARRAY_SIZE(global_state->ctl_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->dsc_to_crtc_id,
+ ARRAY_SIZE(global_state->dsc_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->dspp_to_crtc_id,
+ ARRAY_SIZE(global_state->dspp_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(&global_state->cdm_to_crtc_id, 1, crtc_id);
+ _dpu_rm_clear_mapping(global_state->cwb_to_crtc_id,
+ ARRAY_SIZE(global_state->cwb_to_crtc_id), crtc_id);
}
/**
@@ -674,42 +752,33 @@ void dpu_rm_release(struct dpu_global_state *global_state,
* HW Reservations should be released via dpu_rm_release_hw.
* @rm: DPU Resource Manager handle
* @global_state: resources shared across multiple kms objects
- * @enc: DRM Encoder handle
- * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @crtc: DRM CRTC handle
* @topology: Pointer to topology info for the display
* @return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc,
struct msm_display_topology *topology)
{
int ret;
- /* Check if this is just a page-flip */
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
- return 0;
-
if (IS_ERR(global_state)) {
DPU_ERROR("failed to global state\n");
return PTR_ERR(global_state);
}
- DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
- enc->base.id, crtc_state->crtc->base.id);
+ DRM_DEBUG_KMS("reserving hw for crtc %d\n", crtc->base.id);
DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
topology->num_lm, topology->num_dsc,
topology->num_intf);
- ret = _dpu_rm_make_reservation(rm, global_state, enc, topology);
+ ret = _dpu_rm_make_reservation(rm, global_state, crtc->base.id, topology);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
-
-
return ret;
}
@@ -800,50 +869,57 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
* assigned to this encoder
* @rm: DPU Resource Manager handle
* @global_state: resources shared across multiple kms objects
- * @enc_id: encoder id requesting for allocation
+ * @crtc: DRM CRTC handle
* @type: resource type to return data for
* @blks: pointer to the array to be filled by HW resources
* @blks_size: size of the @blks array
*/
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
- struct dpu_global_state *global_state, uint32_t enc_id,
+ struct dpu_global_state *global_state, struct drm_crtc *crtc,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
{
+ uint32_t crtc_id = crtc->base.id;
struct dpu_hw_blk **hw_blks;
- uint32_t *hw_to_enc_id;
+ uint32_t *hw_to_crtc_id;
int i, num_blks, max_blks;
switch (type) {
case DPU_HW_BLK_PINGPONG:
+ case DPU_HW_BLK_DCWB_PINGPONG:
hw_blks = rm->pingpong_blks;
- hw_to_enc_id = global_state->pingpong_to_enc_id;
+ hw_to_crtc_id = global_state->pingpong_to_crtc_id;
max_blks = ARRAY_SIZE(rm->pingpong_blks);
break;
case DPU_HW_BLK_LM:
hw_blks = rm->mixer_blks;
- hw_to_enc_id = global_state->mixer_to_enc_id;
+ hw_to_crtc_id = global_state->mixer_to_crtc_id;
max_blks = ARRAY_SIZE(rm->mixer_blks);
break;
case DPU_HW_BLK_CTL:
hw_blks = rm->ctl_blks;
- hw_to_enc_id = global_state->ctl_to_enc_id;
+ hw_to_crtc_id = global_state->ctl_to_crtc_id;
max_blks = ARRAY_SIZE(rm->ctl_blks);
break;
case DPU_HW_BLK_DSPP:
hw_blks = rm->dspp_blks;
- hw_to_enc_id = global_state->dspp_to_enc_id;
+ hw_to_crtc_id = global_state->dspp_to_crtc_id;
max_blks = ARRAY_SIZE(rm->dspp_blks);
break;
case DPU_HW_BLK_DSC:
hw_blks = rm->dsc_blks;
- hw_to_enc_id = global_state->dsc_to_enc_id;
+ hw_to_crtc_id = global_state->dsc_to_crtc_id;
max_blks = ARRAY_SIZE(rm->dsc_blks);
break;
case DPU_HW_BLK_CDM:
hw_blks = &rm->cdm_blk;
- hw_to_enc_id = &global_state->cdm_to_enc_id;
+ hw_to_crtc_id = &global_state->cdm_to_crtc_id;
max_blks = 1;
break;
+ case DPU_HW_BLK_CWB:
+ hw_blks = rm->cwb_blks;
+ hw_to_crtc_id = global_state->cwb_to_crtc_id;
+ max_blks = ARRAY_SIZE(rm->cwb_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
@@ -851,17 +927,31 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
num_blks = 0;
for (i = 0; i < max_blks; i++) {
- if (hw_to_enc_id[i] != enc_id)
+ if (hw_to_crtc_id[i] != crtc_id)
continue;
+ if (type == DPU_HW_BLK_PINGPONG) {
+ struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
+
+ if (pp->idx >= PINGPONG_CWB_0)
+ continue;
+ }
+
+ if (type == DPU_HW_BLK_DCWB_PINGPONG) {
+ struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
+
+ if (pp->idx < PINGPONG_CWB_0)
+ continue;
+ }
+
if (num_blks == blks_size) {
- DPU_ERROR("More than %d resources assigned to enc %d\n",
- blks_size, enc_id);
+ DPU_ERROR("More than %d resources assigned to crtc %d\n",
+ blks_size, crtc_id);
break;
}
if (!hw_blks[i]) {
- DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
- type, enc_id);
+ DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n",
+ type, crtc_id);
break;
}
blks[num_blks++] = hw_blks[i];
@@ -896,38 +986,38 @@ void dpu_rm_print_state(struct drm_printer *p,
drm_puts(p, "resource mapping:\n");
drm_puts(p, "\tpingpong=");
- for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->pingpong_blks[i],
- global_state->pingpong_to_enc_id[i]);
+ global_state->pingpong_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tmixer=");
- for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->mixer_blks[i],
- global_state->mixer_to_enc_id[i]);
+ global_state->mixer_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tctl=");
- for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->ctl_blks[i],
- global_state->ctl_to_enc_id[i]);
+ global_state->ctl_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tdspp=");
- for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->dspp_blks[i],
- global_state->dspp_to_enc_id[i]);
+ global_state->dspp_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tdsc=");
- for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->dsc_blks[i],
- global_state->dsc_to_enc_id[i]);
+ global_state->dsc_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tcdm=");
dpu_rm_print_state_helper(p, rm->cdm_blk,
- global_state->cdm_to_enc_id);
+ global_state->cdm_to_crtc_id);
drm_puts(p, "\n");
drm_puts(p, "\tsspp=");
@@ -936,4 +1026,10 @@ void dpu_rm_print_state(struct drm_printer *p,
dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
global_state->sspp_to_crtc_id[i]);
drm_puts(p, "\n");
+
+ drm_puts(p, "\tcwb=");
+ for (i = 0; i < ARRAY_SIZE(global_state->cwb_to_crtc_id); i++)
+ dpu_rm_print_state_helper(p, rm->cwb_blks[i],
+ global_state->cwb_to_crtc_id[i]);
+ drm_puts(p, "\n");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 99bd594ee0d1..a19dbdb1b6f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -51,14 +51,17 @@ struct dpu_rm_sspp_requirements {
* @num_intf: number of interfaces the panel is mounted on
* @num_dspp: number of dspp blocks used
* @num_dsc: number of Display Stream Compression (DSC) blocks used
- * @needs_cdm: indicates whether cdm block is needed for this display topology
+ * @num_cdm: indicates how many outputs are requesting cdm block for
+ * this display topology
+ * @cwb_enabled: indicates whether CWB is enabled for this display topology
*/
struct msm_display_topology {
u32 num_lm;
u32 num_intf;
u32 num_dspp;
u32 num_dsc;
- bool needs_cdm;
+ int num_cdm;
+ bool cwb_enabled;
};
int dpu_rm_init(struct drm_device *dev,
@@ -69,12 +72,11 @@ int dpu_rm_init(struct drm_device *dev,
int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc,
struct msm_display_topology *topology);
void dpu_rm_release(struct dpu_global_state *global_state,
- struct drm_encoder *enc);
+ struct drm_crtc *crtc);
struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -85,7 +87,7 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
struct drm_crtc *crtc);
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
- struct dpu_global_state *global_state, uint32_t enc_id,
+ struct dpu_global_state *global_state, struct drm_crtc *crtc,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
void dpu_rm_print_state(struct drm_printer *p,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index 7444b75c4215..52e728181b52 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -58,7 +58,7 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
static enum drm_mode_status
mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 666de99a46a5..fc183fe37f56 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*/
+#include <linux/string_choices.h>
#include "mdp5_kms.h"
#include "mdp5_ctl.h"
@@ -233,7 +234,7 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
return -EINVAL;
ctl->encoder_enabled = enabled;
- DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
+ DBG("intf_%d: %s", intf->num, str_on_off(enabled));
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 62de248ed1b0..bb1601921938 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -368,7 +368,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
}
static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state, bool flip)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 9c463ae2f8fa..d8633a596f8d 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -11,6 +11,7 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
+#include <linux/string_choices.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_fixed.h>
@@ -1366,9 +1367,9 @@ int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "enable core clocks \n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
return 0;
}
@@ -1385,9 +1386,9 @@ void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "disable core clocks \n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
}
static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
@@ -1416,9 +1417,9 @@ static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "enable link clocks\n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
return 0;
}
@@ -1435,9 +1436,9 @@ static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "disabled link clocks\n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
}
static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 3898850739ab..bbc47d86ae9e 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -11,6 +11,7 @@
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/drm_edid.h>
@@ -343,8 +344,7 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
{
if ((hpd && dp->msm_dp_display.link_ready) ||
(!hpd && !dp->msm_dp_display.link_ready)) {
- drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
- (hpd ? "on" : "off"));
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n", str_on_off(hpd));
return 0;
}
@@ -367,6 +367,19 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
return 0;
}
+static void msm_dp_display_lttpr_init(struct msm_dp_display_private *dp)
+{
+ u8 lttpr_caps[DP_LTTPR_COMMON_CAP_SIZE];
+ int rc;
+
+ if (drm_dp_read_lttpr_common_caps(dp->aux, dp->panel->dpcd, lttpr_caps))
+ return;
+
+ rc = drm_dp_lttpr_init(dp->aux, drm_dp_lttpr_count(lttpr_caps));
+ if (rc)
+ DRM_ERROR("failed to set LTTPRs transparency mode, rc=%d\n", rc);
+}
+
static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
{
struct drm_connector *connector = dp->msm_dp_display.connector;
@@ -377,6 +390,8 @@ static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
if (rc)
goto end;
+ msm_dp_display_lttpr_init(dp);
+
msm_dp_link_process_request(dp->link);
if (!dp->msm_dp_display.is_edp)
@@ -1492,13 +1507,13 @@ int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
}
void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
int rc = 0;
struct msm_dp_display_private *msm_dp_display;
- u32 state;
+ u32 hpd_state;
bool force_link_train = false;
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
@@ -1517,8 +1532,8 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
return;
}
- state = msm_dp_display->hpd_state;
- if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state != ST_DISPLAY_OFF && hpd_state != ST_MAINLINK_READY) {
mutex_unlock(&msm_dp_display->event_mutex);
return;
}
@@ -1530,9 +1545,9 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
return;
}
- state = msm_dp_display->hpd_state;
+ hpd_state = msm_dp_display->hpd_state;
- if (state == ST_DISPLAY_OFF) {
+ if (hpd_state == ST_DISPLAY_OFF) {
msm_dp_display_host_phy_init(msm_dp_display);
force_link_train = true;
}
@@ -1553,7 +1568,7 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
}
void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
@@ -1565,11 +1580,11 @@ void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
}
void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
- u32 state;
+ u32 hpd_state;
struct msm_dp_display_private *msm_dp_display;
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
@@ -1579,15 +1594,15 @@ void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
mutex_lock(&msm_dp_display->event_mutex);
- state = msm_dp_display->hpd_state;
- if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED)
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state != ST_DISCONNECT_PENDING && hpd_state != ST_CONNECTED)
drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
- dp->connector_type, state);
+ dp->connector_type, hpd_state);
msm_dp_display_disable(msm_dp_display);
- state = msm_dp_display->hpd_state;
- if (state == ST_DISCONNECT_PENDING) {
+ hpd_state = msm_dp_display->hpd_state;
+ if (hpd_state == ST_DISCONNECT_PENDING) {
/* completed disconnection */
msm_dp_display->hpd_state = ST_DISCONNECTED;
} else {
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 16b7913d1eef..cca57e56c906 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
+#include <linux/string_choices.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_bridge.h>
@@ -25,7 +26,7 @@ static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
- (dp->link_ready) ? "true" : "false");
+ str_true_false(dp->link_ready));
return (dp->link_ready) ? connector_status_connected :
connector_status_disconnected;
@@ -41,7 +42,7 @@ static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge,
dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
- (dp->link_ready) ? "true" : "false");
+ str_true_false(dp->link_ready));
/*
* There is no protection in the DRM framework to check if the display
@@ -137,9 +138,8 @@ static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
}
static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
@@ -151,25 +151,24 @@ static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
* If the panel is in psr, just exit psr state and skip the full
* bridge enable sequence.
*/
- crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state,
+ crtc = drm_atomic_get_new_crtc_for_encoder(state,
drm_bridge->encoder);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
if (old_crtc_state && old_crtc_state->self_refresh_active) {
msm_dp_display_set_psr(dp, false);
return;
}
- msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_enable(drm_bridge, state);
}
static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL;
struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
@@ -208,13 +207,12 @@ static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
}
out:
- msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_disable(drm_bridge, atomic_state);
}
static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state)
+ struct drm_atomic_state *atomic_state)
{
- struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -233,7 +231,7 @@ static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
if (new_crtc_state->self_refresh_active)
return;
- msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_post_disable(drm_bridge, atomic_state);
}
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 8eae2f74839f..d8c9b905f8bf 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -26,11 +26,11 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
bool yuv_supported);
void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 007311c21fda..4d75529c0e85 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -179,18 +179,18 @@ struct msm_dsi_host {
int irq;
};
-
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
return readl(msm_host->ctrl_base + reg);
}
+
static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
{
writel(data, msm_host->ctrl_base + reg);
}
-static const struct msm_dsi_cfg_handler *dsi_get_config(
- struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *
+dsi_get_config(struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct device *dev = &msm_host->pdev->dev;
@@ -200,7 +200,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
- pr_err("%s: cannot get interface clock\n", __func__);
+ dev_err_probe(dev, PTR_ERR(ahb_clk), "%s: cannot get interface clock\n",
+ __func__);
goto exit;
}
@@ -208,13 +209,13 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ret = clk_prepare_enable(ahb_clk);
if (ret) {
- pr_err("%s: unable to enable ahb_clk\n", __func__);
+ dev_err_probe(dev, ret, "%s: unable to enable ahb_clk\n", __func__);
goto runtime_put;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
if (ret) {
- pr_err("%s: Invalid version\n", __func__);
+ dev_err_probe(dev, ret, "%s: Invalid version\n", __func__);
goto disable_clks;
}
@@ -281,42 +282,31 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->num_bus_clks = cfg->num_bus_clks;
ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret);
- goto exit;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Unable to get clocks\n");
/* get link and source clocks */
msm_host->byte_clk = msm_clk_get(pdev, "byte");
- if (IS_ERR(msm_host->byte_clk)) {
- ret = PTR_ERR(msm_host->byte_clk);
- pr_err("%s: can't find dsi_byte clock. ret=%d\n",
- __func__, ret);
- msm_host->byte_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->byte_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->byte_clk),
+ "%s: can't find dsi_byte clock\n",
+ __func__);
msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
- if (IS_ERR(msm_host->pixel_clk)) {
- ret = PTR_ERR(msm_host->pixel_clk);
- pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
- __func__, ret);
- msm_host->pixel_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->pixel_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->pixel_clk),
+ "%s: can't find dsi_pixel clock\n",
+ __func__);
msm_host->esc_clk = msm_clk_get(pdev, "core");
- if (IS_ERR(msm_host->esc_clk)) {
- ret = PTR_ERR(msm_host->esc_clk);
- pr_err("%s: can't find dsi_esc clock. ret=%d\n",
- __func__, ret);
- msm_host->esc_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->esc_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->esc_clk),
+ "%s: can't find dsi_esc clock\n",
+ __func__);
if (cfg_hnd->ops->clk_init_ver)
ret = cfg_hnd->ops->clk_init_ver(msm_host);
-exit:
+
return ret;
}
@@ -380,7 +370,6 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
return 0;
}
-
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -598,7 +587,6 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
msm_host->byte_clk_rate);
-
}
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
@@ -687,8 +675,8 @@ static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
return NON_BURST_SYNCH_EVENT;
}
-static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
- const enum mipi_dsi_pixel_format mipi_fmt)
+static inline enum dsi_vid_dst_format
+dsi_get_vid_fmt(const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
@@ -699,8 +687,8 @@ static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
}
}
-static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
- const enum mipi_dsi_pixel_format mipi_fmt)
+static inline enum dsi_cmd_dst_format
+dsi_get_cmd_fmt(const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
@@ -846,7 +834,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
}
-static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
+static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode)
{
struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, reg_ctrl, reg_ctrl2;
@@ -858,7 +846,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
/* first calculate dsc parameters and then program
* compress mode registers
*/
- slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
+ slice_per_intf = dsc->slice_count;
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */
@@ -991,7 +979,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
if (msm_host->dsc)
- dsi_update_dsc_timing(msm_host, false, mode->hdisplay);
+ dsi_update_dsc_timing(msm_host, false);
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -1012,7 +1000,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
if (msm_host->dsc)
- dsi_update_dsc_timing(msm_host, true, mode->hdisplay);
+ dsi_update_dsc_timing(msm_host, true);
/* image data and 1 byte write_memory_start cmd */
if (!msm_host->dsc)
@@ -1292,14 +1280,15 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
+
if (data && (msg->rx_len >= 1)) {
*data = buf[1]; /* strip out dcs type */
return 1;
- } else {
- pr_err("%s: read data does not match with rx_buf len %zu\n",
- __func__, msg->rx_len);
- return -EINVAL;
}
+
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
}
/*
@@ -1308,15 +1297,16 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
+
if (data && (msg->rx_len >= 2)) {
data[0] = buf[1]; /* strip out dcs type */
data[1] = buf[2];
return 2;
- } else {
- pr_err("%s: read data does not match with rx_buf len %zu\n",
- __func__, msg->rx_len);
- return -EINVAL;
}
+
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
}
static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
@@ -1376,8 +1366,9 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
ret = -ETIMEDOUT;
else
ret = len;
- } else
+ } else {
ret = len;
+ }
return ret;
}
@@ -1445,11 +1436,12 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
- /* for video mode, do not send cmds more than
- * one pixel line, since it only transmit it
- * during BLLP.
- */
- /* TODO: if the command is sent in LP mode, the bit rate is only
+ /*
+ * for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ *
+ * TODO: if the command is sent in LP mode, the bit rate is only
* half of esc clk rate. In this case, if the video is already
* actively streaming, we need to check more carefully if the
* command can be fit into one BLLP.
@@ -1767,8 +1759,20 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
return -EINVAL;
}
- if (dsc->bits_per_component != 8) {
- DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n");
+ switch (dsc->bits_per_component) {
+ case 8:
+ case 10:
+ case 12:
+ /*
+ * Only 8, 10, and 12 bpc are supported for DSC 1.1 block.
+ * If additional bpc values need to be supported, update
+ * this quard with the appropriate DSC version verification.
+ */
+ break;
+ default:
+ DRM_DEV_ERROR(&msm_host->pdev->dev,
+ "Unsupported bits_per_component value: %d\n",
+ dsc->bits_per_component);
return -EOPNOTSUPP;
}
@@ -1779,7 +1783,7 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
drm_dsc_set_const_params(dsc);
drm_dsc_set_rc_buf_thresh(dsc);
- /* handle only bpp = bpc = 8, pre-SCR panels */
+ /* DPU supports only pre-SCR panels */
ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR);
if (ret) {
DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n");
@@ -1827,8 +1831,15 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
__func__, ret);
goto err;
}
- if (!ret)
+ if (!ret) {
msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
+ if (!msm_dsi->te_source) {
+ DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
ret = 0;
if (of_property_present(np, "syscon-sfpb")) {
@@ -1874,39 +1885,35 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
- if (!msm_host) {
+ if (!msm_host)
return -ENOMEM;
- }
msm_host->pdev = pdev;
msm_dsi->host = &msm_host->base;
ret = dsi_host_parse_dt(msm_host);
- if (ret) {
- pr_err("%s: failed to parse dt\n", __func__);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "%s: failed to parse dt\n",
+ __func__);
msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size);
- if (IS_ERR(msm_host->ctrl_base)) {
- pr_err("%s: unable to map Dsi ctrl base\n", __func__);
- return PTR_ERR(msm_host->ctrl_base);
- }
+ if (IS_ERR(msm_host->ctrl_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->ctrl_base),
+ "%s: unable to map Dsi ctrl base\n", __func__);
pm_runtime_enable(&pdev->dev);
msm_host->cfg_hnd = dsi_get_config(msm_host);
- if (!msm_host->cfg_hnd) {
- pr_err("%s: get config failed\n", __func__);
- return -EINVAL;
- }
+ if (!msm_host->cfg_hnd)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "%s: get config failed\n", __func__);
cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
- if (msm_host->id < 0) {
- pr_err("%s: unable to identify DSI host index\n", __func__);
- return msm_host->id;
- }
+ if (msm_host->id < 0)
+ return dev_err_probe(&pdev->dev, msm_host->id,
+ "%s: unable to identify DSI host index\n",
+ __func__);
/* fixup base address by io offset */
msm_host->ctrl_base += cfg->io_offset;
@@ -1918,42 +1925,32 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
return ret;
ret = dsi_clk_init(msm_host);
- if (ret) {
- pr_err("%s: unable to initialize dsi clks\n", __func__);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "%s: unable to initialize dsi clks\n", __func__);
msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
- if (!msm_host->rx_buf) {
- pr_err("%s: alloc rx temp buf failed\n", __func__);
+ if (!msm_host->rx_buf)
return -ENOMEM;
- }
ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev);
- if (ret && ret != -ENODEV) {
- dev_err(&pdev->dev, "invalid OPP table in device tree\n");
- return ret;
- }
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&pdev->dev, ret, "invalid OPP table in device tree\n");
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (!msm_host->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -EINVAL;
- }
+ if (!msm_host->irq)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get irq\n");
/* do not autoenable, will be enabled later */
ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
"dsi_isr", msm_host);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
- msm_host->irq, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to request IRQ%u\n",
+ msm_host->irq);
init_completion(&msm_host->dma_comp);
init_completion(&msm_host->video_comp);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index a210b7c9e5ca..4fabb01345aa 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id)
int ret;
if (!IS_BONDED_DSI()) {
+ /*
+ * Set the usecase before calling msm_dsi_host_register(), which would
+ * already program the PLL source mux based on a default usecase.
+ */
+ msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+
ret = msm_dsi_host_register(msm_dsi->host);
if (ret)
return ret;
-
- msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
- msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
} else if (other_dsi) {
struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
other_dsi : msm_dsi;
+
+ /*
+ * PLL0 is to drive both DSI link clocks in bonded DSI mode.
+ *
+ * Set the usecase before calling msm_dsi_host_register(), which would
+ * already program the PLL source mux based on a default usecase.
+ */
+ msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+ MSM_DSI_PHY_MASTER);
+ msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+ MSM_DSI_PHY_SLAVE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
+
/* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master
* DSI device's probe.
@@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id)
ret = msm_dsi_host_register(master_link_dsi->host);
if (ret)
return ret;
-
- /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
- msm_dsi_phy_set_usecase(clk_master_dsi->phy,
- MSM_DSI_PHY_MASTER);
- msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
- MSM_DSI_PHY_SLAVE);
- msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
- msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
}
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 8985818bb2e0..1925418d9999 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -6,6 +6,7 @@
#ifndef __DSI_PHY_H__
#define __DSI_PHY_H__
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
@@ -84,9 +85,7 @@ struct msm_dsi_dphy_timing {
u8 hs_halfbyte_en_ckln;
};
-#define DSI_BYTE_PLL_CLK 0
-#define DSI_PIXEL_PLL_CLK 1
-#define NUM_PROVIDED_CLKS 2
+#define NUM_PROVIDED_CLKS (DSI_PIXEL_PLL_CLK + 1)
#define DSI_LANE_MAX 5
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 677c62571811..9812b4d69197 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 2c3cbe0f2870..3a1c8ece6657 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 1383e3a4e050..90348a2af3e9 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 5311ab7f3c70..f3643320ff2f 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 798168180c1a..a92decbee5b5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -3,6 +3,8 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
@@ -305,7 +307,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
writel(pll->phy->cphy_mode ? 0x00 : 0x10,
base + REG_DSI_7nm_PHY_PLL_CMODE_1);
writel(config->pll_clock_inverters,
- base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS);
+ base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -572,11 +574,11 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
- cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
- cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+ cached->bit_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK, cmn_clk_cfg0);
+ cached->pix_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK, cmn_clk_cfg0);
cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- cached->pll_mux = cmn_clk_cfg1 & 0x3;
+ cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1);
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
@@ -598,7 +600,8 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_pll_cmn_clk_cfg0_write(pll_7nm,
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
- dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
+ cached->pll_mux);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@@ -736,11 +739,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
* don't register a pclk_mux clock and just use post_out_div instead
*/
if (pll_7nm->phy->cphy_mode) {
- u32 data;
-
- data = readl(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data | 3, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm,
+ DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
+ DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL(3));
phy_pll_out_dsi_parent = pll_post_out_div;
} else {
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 37b3809c6bdd..248541ff4492 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -12,8 +12,8 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
+#include <drm/display/drm_hdmi_state_helper.h>
-#include <sound/hdmi-codec.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -24,7 +24,7 @@ void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
spin_lock_irqsave(&hdmi->reg_lock, flags);
if (power_on) {
ctrl |= HDMI_CTRL_ENABLE;
- if (!hdmi->hdmi_mode) {
+ if (!hdmi->connector->display_info.is_hdmi) {
ctrl |= HDMI_CTRL_HDMI;
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
ctrl &= ~HDMI_CTRL_HDMI;
@@ -165,8 +165,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->dev = dev;
hdmi->encoder = encoder;
- hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
-
ret = msm_hdmi_bridge_init(hdmi);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
@@ -246,111 +244,6 @@ static const struct hdmi_platform_config hdmi_tx_8974_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
-/*
- * HDMI audio codec callbacks
- */
-static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct hdmi *hdmi = dev_get_drvdata(dev);
- unsigned int chan;
- unsigned int channel_allocation = 0;
- unsigned int rate;
- unsigned int level_shift = 0; /* 0dB */
- bool down_mix = false;
-
- DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
- params->sample_width, params->cea.channels);
-
- switch (params->cea.channels) {
- case 2:
- /* FR and FL speakers */
- channel_allocation = 0;
- chan = MSM_HDMI_AUDIO_CHANNEL_2;
- break;
- case 4:
- /* FC, LFE, FR and FL speakers */
- channel_allocation = 0x3;
- chan = MSM_HDMI_AUDIO_CHANNEL_4;
- break;
- case 6:
- /* RR, RL, FC, LFE, FR and FL speakers */
- channel_allocation = 0x0B;
- chan = MSM_HDMI_AUDIO_CHANNEL_6;
- break;
- case 8:
- /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
- channel_allocation = 0x1F;
- chan = MSM_HDMI_AUDIO_CHANNEL_8;
- break;
- default:
- return -EINVAL;
- }
-
- switch (params->sample_rate) {
- case 32000:
- rate = HDMI_SAMPLE_RATE_32KHZ;
- break;
- case 44100:
- rate = HDMI_SAMPLE_RATE_44_1KHZ;
- break;
- case 48000:
- rate = HDMI_SAMPLE_RATE_48KHZ;
- break;
- case 88200:
- rate = HDMI_SAMPLE_RATE_88_2KHZ;
- break;
- case 96000:
- rate = HDMI_SAMPLE_RATE_96KHZ;
- break;
- case 176400:
- rate = HDMI_SAMPLE_RATE_176_4KHZ;
- break;
- case 192000:
- rate = HDMI_SAMPLE_RATE_192KHZ;
- break;
- default:
- DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
- params->sample_rate);
- return -EINVAL;
- }
-
- msm_hdmi_audio_set_sample_rate(hdmi, rate);
- msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
- level_shift, down_mix);
-
- return 0;
-}
-
-static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
-{
- struct hdmi *hdmi = dev_get_drvdata(dev);
-
- msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
-}
-
-static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
- .hw_params = msm_hdmi_audio_hw_params,
- .audio_shutdown = msm_hdmi_audio_shutdown,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &msm_hdmi_audio_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
-{
- hdmi->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
-}
-
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
@@ -362,12 +255,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
return err;
priv->hdmi = hdmi;
- err = msm_hdmi_register_audio_driver(hdmi, dev);
- if (err) {
- DRM_ERROR("Failed to attach an audio codec %d\n", err);
- hdmi->audio_pdev = NULL;
- }
-
return 0;
}
@@ -377,9 +264,6 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
if (priv->hdmi) {
- if (priv->hdmi->audio_pdev)
- platform_device_unregister(priv->hdmi->audio_pdev);
-
if (priv->hdmi->bridge)
msm_hdmi_hpd_disable(priv->hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index a62d2aedfbb7..a5f481c39277 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -24,8 +24,8 @@ struct hdmi_platform_config;
struct hdmi_audio {
bool enabled;
- struct hdmi_audio_infoframe infoframe;
int rate;
+ int channels;
};
struct hdmi_hdcp_ctrl;
@@ -33,7 +33,6 @@ struct hdmi_hdcp_ctrl;
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
- struct platform_device *audio_pdev;
const struct hdmi_platform_config *config;
@@ -67,8 +66,6 @@ struct hdmi {
/* the encoder we are hooked to (outside of hdmi block) */
struct drm_encoder *encoder;
- bool hdmi_mode; /* are we in hdmi mode? */
-
int irq;
struct workqueue_struct *workq;
@@ -207,26 +204,16 @@ static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev)
/*
* audio:
*/
-/* Supported HDMI Audio channels and rates */
-#define MSM_HDMI_AUDIO_CHANNEL_2 0
-#define MSM_HDMI_AUDIO_CHANNEL_4 1
-#define MSM_HDMI_AUDIO_CHANNEL_6 2
-#define MSM_HDMI_AUDIO_CHANNEL_8 3
-
-#define HDMI_SAMPLE_RATE_32KHZ 0
-#define HDMI_SAMPLE_RATE_44_1KHZ 1
-#define HDMI_SAMPLE_RATE_48KHZ 2
-#define HDMI_SAMPLE_RATE_88_2KHZ 3
-#define HDMI_SAMPLE_RATE_96KHZ 4
-#define HDMI_SAMPLE_RATE_176_4KHZ 5
-#define HDMI_SAMPLE_RATE_192KHZ 6
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
int msm_hdmi_audio_update(struct hdmi *hdmi);
-int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
- uint32_t num_of_channels, uint32_t channel_allocation,
- uint32_t level_shift, bool down_mix);
-void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
-
+int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge);
/*
* hdmi bridge:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 4c2058c4adc1..8bb975e82c17 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -4,11 +4,13 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/display/drm_hdmi_state_helper.h>
+
#include <linux/hdmi.h>
-#include "hdmi.h"
-/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
-static int nchannels[] = { 2, 4, 6, 8 };
+#include <sound/hdmi-codec.h>
+
+#include "hdmi.h"
/* Supported HDMI Audio sample rates */
#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
@@ -74,16 +76,17 @@ static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
- struct hdmi_audio_infoframe *info = &audio->infoframe;
const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
- uint32_t infofrm_ctrl, audio_config;
+ uint32_t audio_config;
+
+ if (!hdmi->connector->display_info.is_hdmi)
+ return -EINVAL;
+
+ DBG("audio: enabled=%d, channels=%d, rate=%d",
+ audio->enabled, audio->channels, audio->rate);
- DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
- "level_shift_value=%d, downmix_inhibit=%d, rate=%d",
- audio->enabled, info->channels, info->channel_allocation,
- info->level_shift_value, info->downmix_inhibit, audio->rate);
DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
@@ -104,7 +107,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
- infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
/* Clear N/CTS selection bits */
@@ -113,7 +115,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
if (enabled) {
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
- uint8_t buf[14];
n = arcs->lut[audio->rate].n;
cts = arcs->lut[audio->rate].cts;
@@ -155,20 +156,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
HDMI_ACR_1_N(n));
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
- COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
+ COND(audio->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
- /* configure infoframe: */
- hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
- hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
- (buf[3] << 0) | (buf[4] << 8) |
- (buf[5] << 16) | (buf[6] << 24));
- hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
- (buf[7] << 0) | (buf[8] << 8));
-
hdmi_write(hdmi, REG_HDMI_GC, 0);
vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
@@ -176,11 +169,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
-
audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
@@ -190,17 +178,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
}
hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
hdmi_write(hdmi, REG_HDMI_AUD_INT,
COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
@@ -214,41 +197,72 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
return 0;
}
-int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
- uint32_t num_of_channels, uint32_t channel_allocation,
- uint32_t level_shift, bool down_mix)
+int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct hdmi_audio *audio;
-
- if (!hdmi)
- return -ENXIO;
-
- audio = &hdmi->audio;
-
- if (num_of_channels >= ARRAY_SIZE(nchannels))
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ unsigned int rate;
+ int ret;
+
+ drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n",
+ params->sample_rate,
+ params->sample_width,
+ params->cea.channels);
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = MSM_HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ rate = MSM_HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ rate = MSM_HDMI_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ rate = MSM_HDMI_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ rate = MSM_HDMI_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ rate = MSM_HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ drm_err(bridge->dev, "rate[%d] not supported!\n",
+ params->sample_rate);
return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &params->cea);
+ if (ret)
+ return ret;
- audio->enabled = enabled;
- audio->infoframe.channels = nchannels[num_of_channels];
- audio->infoframe.channel_allocation = channel_allocation;
- audio->infoframe.level_shift_value = level_shift;
- audio->infoframe.downmix_inhibit = down_mix;
+ hdmi->audio.rate = rate;
+ hdmi->audio.channels = params->cea.channels;
+ hdmi->audio.enabled = true;
return msm_hdmi_audio_update(hdmi);
}
-void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
- struct hdmi_audio *audio;
-
- if (!hdmi)
- return;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
- audio = &hdmi->audio;
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
- if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
- return;
+ hdmi->audio.rate = 0;
+ hdmi->audio.channels = 2;
+ hdmi->audio.enabled = false;
- audio->rate = rate;
msm_hdmi_audio_update(hdmi);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 4a5b5112227f..1456354c8af4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -7,6 +7,8 @@
#include <linux/delay.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "msm_kms.h"
#include "hdmi.h"
@@ -67,24 +69,20 @@ static void power_off(struct drm_bridge *bridge)
}
#define AVI_IFRAME_LINE_NUMBER 1
+#define SPD_IFRAME_LINE_NUMBER 1
+#define VENSPEC_IFRAME_LINE_NUMBER 3
-static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
+static int msm_hdmi_config_avi_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
{
- struct drm_crtc *crtc = hdmi->encoder->crtc;
- const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- union hdmi_infoframe frame;
- u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ u32 buf[4] = {};
u32 val;
- int len;
+ int i;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- hdmi->connector, mode);
-
- len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (len < 0) {
+ if (len != HDMI_INFOFRAME_SIZE(AVI) || len - 3 > sizeof(buf)) {
DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
- return;
+ return -EINVAL;
}
/*
@@ -93,57 +91,245 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
* written to the LSB byte of AVI_INFO0 and the version is written to
* the third byte from the LSB of AVI_INFO3
*/
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(0),
+ memcpy(buf, &buffer[3], len - 3);
+
+ buf[3] |= buffer[1] << 24;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val |= HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ return 0;
+}
+
+static int msm_hdmi_config_audio_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 val;
+
+ if (len != HDMI_INFOFRAME_SIZE(AUDIO)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure audio infoframe\n");
+ return -EINVAL;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
buffer[3] |
buffer[4] << 8 |
buffer[5] << 16 |
buffer[6] << 24);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(1),
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
buffer[7] |
buffer[8] << 8 |
buffer[9] << 16 |
buffer[10] << 24);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(2),
- buffer[11] |
- buffer[12] << 8 |
- buffer[13] << 16 |
- buffer[14] << 24);
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(3),
- buffer[15] |
- buffer[16] << 8 |
- buffer[1] << 24);
+ return 0;
+}
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
- HDMI_INFOFRAME_CTRL0_AVI_SEND |
- HDMI_INFOFRAME_CTRL0_AVI_CONT);
+static int msm_hdmi_config_spd_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 buf[7] = {};
+ u32 val;
+ int i;
- val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
- val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
- val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+ if (len != HDMI_INFOFRAME_SIZE(SPD) || len - 3 > sizeof(buf)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure SPD infoframe\n");
+ return -EINVAL;
+ }
+
+ /* checksum gets written together with the body of the frame */
+ hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR,
+ buffer[0] |
+ buffer[1] << 8 |
+ buffer[2] << 16);
+
+ memcpy(buf, &buffer[3], len - 3);
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val |= HDMI_GEN_PKT_CTRL_GENERIC1_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC1_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC1_LINE(SPD_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ return 0;
}
-static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+static int msm_hdmi_config_hdmi_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 buf[7] = {};
+ u32 val;
+ int i;
+
+ if (len < HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_SIZE ||
+ len - 3 > sizeof(buf)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure HDMI infoframe\n");
+ return -EINVAL;
+ }
+
+ /* checksum gets written together with the body of the frame */
+ hdmi_write(hdmi, REG_HDMI_GENERIC0_HDR,
+ buffer[0] |
+ buffer[1] << 8 |
+ buffer[2] << 16);
+
+ memcpy(buf, &buffer[3], len - 3);
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_GENERIC0(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val |= HDMI_GEN_PKT_CTRL_GENERIC0_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC0_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE |
+ HDMI_GEN_PKT_CTRL_GENERIC0_LINE(VENSPEC_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ return 0;
+}
+
+static int msm_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ u32 val;
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ val &= ~(HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ val &= ~(HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_SPD:
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val &= ~(HDMI_GEN_PKT_CTRL_GENERIC1_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC1_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val &= ~(HDMI_GEN_PKT_CTRL_GENERIC0_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC0_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE |
+ HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ break;
+
+ default:
+ drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type);
+ }
+
+ return 0;
+}
+
+static int msm_hdmi_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+
+ msm_hdmi_bridge_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return msm_hdmi_config_avi_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return msm_hdmi_config_audio_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return msm_hdmi_config_spd_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return msm_hdmi_config_hdmi_infoframe(hdmi, buffer, len);
+ default:
+ drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type);
+ return 0;
+ }
+}
+
+static void msm_hdmi_set_timings(struct hdmi *hdmi,
+ const struct drm_display_mode *mode);
+
+static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
DBG("power up");
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ hdmi->pixclock = conn_state->hdmi.tmds_char_rate;
+
+ msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode);
+
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- if (hdmi->hdmi_mode) {
- msm_hdmi_config_avi_infoframe(hdmi);
+ if (connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
- }
}
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
+
msm_hdmi_phy_powerup(phy, hdmi->pixclock);
msm_hdmi_set_mode(hdmi, true);
@@ -152,7 +338,8 @@ static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
-static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
@@ -169,25 +356,18 @@ static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
if (hdmi->power_on) {
power_off(bridge);
hdmi->power_on = false;
- if (hdmi->hdmi_mode)
+ if (hdmi->connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
}
-static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
+static void msm_hdmi_set_timings(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
{
- struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = hdmi_bridge->hdmi;
int hstart, hend, vstart, vend;
uint32_t frame_ctrl;
- mode = adjusted_mode;
-
- hdmi->pixclock = mode->clock * 1000;
-
hstart = mode->htotal - mode->hsync_start;
hend = mode->htotal - mode->hsync_start + mode->hdisplay;
@@ -232,7 +412,7 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
- if (hdmi->hdmi_mode)
+ if (hdmi->connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
}
@@ -251,32 +431,19 @@ static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridg
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
- if (drm_edid) {
- /*
- * FIXME: This should use connector->display_info.is_hdmi from a
- * path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- }
-
return drm_edid;
}
-static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_info *info,
- const struct drm_display_mode *mode)
+static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
+ long actual;
/* for mdp5/apq8074, we manage our own pixel clk (as opposed to
* mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
@@ -284,27 +451,34 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge
*/
if (kms->funcs->round_pixclk)
actual = kms->funcs->round_pixclk(kms,
- requested, hdmi_bridge->hdmi->encoder);
+ tmds_rate,
+ hdmi_bridge->hdmi->encoder);
else if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], requested);
+ actual = clk_round_rate(hdmi->pwr_clks[0], tmds_rate);
else
- actual = requested;
+ actual = tmds_rate;
- DBG("requested=%ld, actual=%ld", requested, actual);
+ DBG("requested=%lld, actual=%ld", tmds_rate, actual);
- if (actual != requested)
+ if (actual != tmds_rate)
return MODE_CLOCK_RANGE;
return 0;
}
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
- .pre_enable = msm_hdmi_bridge_pre_enable,
- .post_disable = msm_hdmi_bridge_post_disable,
- .mode_set = msm_hdmi_bridge_mode_set,
- .mode_valid = msm_hdmi_bridge_mode_valid,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = msm_hdmi_bridge_atomic_pre_enable,
+ .atomic_post_disable = msm_hdmi_bridge_atomic_post_disable,
.edid_read = msm_hdmi_bridge_edid_read,
.detect = msm_hdmi_bridge_detect,
+ .hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe,
+ .hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe,
+ .hdmi_audio_prepare = msm_hdmi_bridge_audio_prepare,
+ .hdmi_audio_shutdown = msm_hdmi_bridge_audio_shutdown,
};
static void
@@ -336,9 +510,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
+ bridge->vendor = "Qualcomm";
+ bridge->product = "Snapdragon";
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HDMI |
DRM_BRIDGE_OP_EDID;
+ bridge->hdmi_audio_max_i2s_playback_channels = 8;
+ bridge->hdmi_audio_dev = &hdmi->pdev->dev;
+ bridge->hdmi_audio_dai_port = -1;
ret = devm_drm_bridge_add(hdmi->dev->dev, bridge);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index a7a2384044ff..87a91148a731 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -183,10 +183,16 @@ static unsigned get_crtc_mask(struct drm_atomic_state *state)
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
- int i;
+ int i, ret = 0;
+ /*
+ * FIXME: stop setting allow_modeset and move this check to the DPU
+ * driver.
+ */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if ((old_crtc_state->ctm && !new_crtc_state->ctm) ||
@@ -196,6 +202,11 @@ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
}
}
+ if (kms && kms->funcs && kms->funcs->check_mode_changed)
+ ret = kms->funcs->check_mode_changed(kms, state);
+ if (ret)
+ return ret;
+
return drm_atomic_helper_check(dev, state);
}
@@ -221,6 +232,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
+ atomic_set(&kms->fault_snapshot_capture, 0);
+
/*
* Now that there is no in-progress flush, prepare the
* current update:
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ff7a7a9f7b0d..c3588dc9e537 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -894,6 +894,7 @@ static const struct drm_driver msm_driver = {
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET |
+ DRIVER_SYNCOBJ_TIMELINE |
DRIVER_SYNCOBJ,
.open = msm_open,
.postclose = msm_postclose,
diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h
index b9049fe1e279..63f95523b2cb 100644
--- a/drivers/gpu/drm/msm/msm_dsc_helper.h
+++ b/drivers/gpu/drm/msm/msm_dsc_helper.h
@@ -13,17 +13,6 @@
#include <drm/display/drm_dsc_helper.h>
/**
- * msm_dsc_get_slices_per_intf() - calculate number of slices per interface
- * @dsc: Pointer to drm dsc config struct
- * @intf_width: interface width in pixels
- * Returns: Integer representing the number of slices for the given interface
- */
-static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width)
-{
- return DIV_ROUND_UP(intf_width, dsc->slice_width);
-}
-
-/**
* msm_dsc_get_bytes_per_line() - calculate bytes per line
* @dsc: Pointer to drm dsc config struct
* Returns: Integer value representing bytes per line. DSI and DP need
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 1a5d4f1c8b42..d41e5a6bbee0 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -65,8 +65,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
fctx->completed_fence = fctx->last_fence;
*fctx->fenceptr = fctx->last_fence;
- hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fctx->deadline_timer.function = deadline_timer;
+ hrtimer_setup(&fctx->deadline_timer, deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
kthread_init_work(&fctx->deadline_work, deadline_work);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index dee470403036..3e9aa2cc38ef 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -509,7 +509,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
break;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8557998e0c92..c380d9d9f5af 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -281,6 +281,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (submit) {
int i;
+ if (state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = submit->aspace->mmu;
+
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
+
state->bos = kcalloc(submit->nr_bos,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 7cabc8480d7c..e25009150579 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -101,6 +101,14 @@ struct msm_gpu_fault_info {
int flags;
const char *type;
const char *block;
+
+ /* Information about what we think/expect is the current SMMU state,
+ * for example expected_ttbr0 should match smmu_info.ttbr0 which
+ * was read back from SMMU registers.
+ */
+ phys_addr_t pgtbl_ttbr0;
+ u64 ptes[4];
+ int asid;
};
/**
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index afedd61c3e28..a6efe1eac271 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -135,8 +135,7 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode)
{
- hrtimer_init(&work->timer, clock_id, mode);
- work->timer.function = msm_hrtimer_worktimer;
+ hrtimer_setup(&work->timer, msm_hrtimer_worktimer, clock_id, mode);
work->worker = worker;
kthread_init_work(&work->work, fn);
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 2a94e82316f9..fd73dcd3f30e 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -195,6 +195,28 @@ struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
return &iommu->domain->geometry;
}
+int
+msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4])
+{
+ struct msm_iommu_pagetable *pagetable;
+ struct arm_lpae_io_pgtable_walk_data wd = {};
+
+ if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+ return -EINVAL;
+
+ pagetable = to_pagetable(mmu);
+
+ if (!pagetable->pgtbl_ops->pgtable_walk)
+ return -EINVAL;
+
+ pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd);
+
+ for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++)
+ ptes[i] = wd.ptes[i];
+
+ return 0;
+}
+
static const struct msm_mmu_funcs pagetable_funcs = {
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
@@ -243,7 +265,7 @@ static const struct iommu_flush_ops tlb_ops = {
.tlb_add_page = msm_iommu_tlb_add_page,
};
-static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
@@ -319,7 +341,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
return &pagetable->base;
}
-static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
@@ -343,6 +365,17 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
+static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ struct msm_iommu *iommu = arg;
+
+ if (iommu->base.handler)
+ return iommu->base.handler(iommu->base.arg, iova, flags, NULL);
+
+ return -ENOSYS;
+}
+
static void msm_iommu_resume_translation(struct msm_mmu *mmu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
@@ -437,6 +470,21 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
return &iommu->base;
}
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
+{
+ struct msm_iommu *iommu;
+ struct msm_mmu *mmu;
+
+ mmu = msm_iommu_new(dev, quirks);
+ if (IS_ERR_OR_NULL(mmu))
+ return mmu;
+
+ iommu = to_msm_iommu(mmu);
+ iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu);
+
+ return mmu;
+}
+
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
@@ -448,7 +496,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
return mmu;
iommu = to_msm_iommu(mmu);
- iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+ iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
/* Enable stall on iommu fault: */
if (adreno_smmu->set_stall)
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 38965e12a6bf..35d5397e73b4 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -164,12 +164,26 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
vblank_ctrl_queue_work(priv, crtc, false);
}
+static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_kms *kms = arg;
+
+ if (atomic_read(&kms->fault_snapshot_capture) == 0) {
+ msm_disp_snapshot_state(kms->dev);
+ atomic_inc(&kms->fault_snapshot_capture);
+ }
+
+ return -ENOSYS;
+}
+
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct device *iommu_dev;
/*
@@ -181,7 +195,7 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
else
iommu_dev = mdss_dev;
- mmu = msm_iommu_new(iommu_dev, 0);
+ mmu = msm_iommu_disp_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
@@ -195,8 +209,11 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
if (IS_ERR(aspace)) {
dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
+ return aspace;
}
+ msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler);
+
return aspace;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e60162744c66..43b58d052ee6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -60,6 +60,13 @@ struct msm_kms_funcs {
void (*disable_commit)(struct msm_kms *kms);
/**
+ * @check_mode_changed:
+ *
+ * Verify if the commit requires a full modeset on one of CRTCs.
+ */
+ int (*check_mode_changed)(struct msm_kms *kms, struct drm_atomic_state *state);
+
+ /**
* Prepare for atomic commit. This is called after any previous
* (async or otherwise) commit has completed.
*/
@@ -128,6 +135,9 @@ struct msm_kms {
int irq;
bool irq_requested;
+ /* rate limit the snapshot capture to once per attach */
+ atomic_t fault_snapshot_capture;
+
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 88af4f490881..daf91529e02b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -42,6 +42,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags, void *data))
@@ -53,7 +54,8 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
- int *asid);
+ int *asid);
+int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]);
struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index c803556a8f64..c5651c39ac2a 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -59,8 +59,14 @@ static const struct drm_sched_backend_ops msm_sched_ops = {
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{
+ struct drm_sched_init_args args = {
+ .ops = &msm_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = num_hw_submissions,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .dev = gpu->dev->dev,
+ };
struct msm_ringbuffer *ring;
- long sched_timeout;
char name[32];
int ret;
@@ -87,6 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
}
msm_gem_object_set_name(ring->bo, "ring%d", id);
+ args.name = to_msm_bo(ring->bo)->name,
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
@@ -95,13 +102,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
- /* currently managing hangcheck ourselves: */
- sched_timeout = MAX_SCHEDULE_TIMEOUT;
-
- ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- num_hw_submissions, 0, sched_timeout,
- NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
+ ret = drm_sched_init(&ring->sched, &args);
if (ret) {
goto fail;
}
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 55a35182858c..5a6ae9fc3194 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -2259,5 +2259,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-">
+ <reg64 offset="0" name="IB_BASE" type="address"/>
+ <reg32 offset="2" name="2">
+ <bitfield name="IB_SIZE" low="0" high="19"/>
+ </reg32>
+</domain>
+
</database>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index 35f7f40e405b..d2c8c46bb041 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -17,6 +17,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="CLK_EN" pos="5" type="boolean"/>
<bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
<bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
+ <bitfield name="DSICLK_SEL" low="0" high="1" type="uint"/>
</reg32>
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml
index 1cf1b14fbd91..0ebb96297dae 100644
--- a/drivers/gpu/drm/msm/registers/display/hdmi.xml
+++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml
@@ -131,7 +131,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
-->
<bitfield name="GENERIC0_SEND" pos="0" type="boolean"/>
<bitfield name="GENERIC0_CONT" pos="1" type="boolean"/>
- <bitfield name="GENERIC0_UPDATE" low="2" high="3" type="uint"/> <!-- ??? -->
+ <bitfield name="GENERIC0_UPDATE" pos="2" type="boolean"/>
<bitfield name="GENERIC1_SEND" pos="4" type="boolean"/>
<bitfield name="GENERIC1_CONT" pos="5" type="boolean"/>
<bitfield name="GENERIC0_LINE" low="16" high="21" type="uint"/>