summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c10
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c10
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c150
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c741
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c151
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h268
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c869
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c316
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dram.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c107
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_buffer.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c101
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c245
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_oprom_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c584
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_parent.c156
-rw-r--r--drivers/gpu/drm/i915/display/intel_parent.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_rom.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c315
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c2
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c26
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c69
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c26
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c50
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/i915_bo.c156
-rw-r--r--drivers/gpu/drm/i915/i915_bo.h9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dpt.c277
-rw-r--r--drivers/gpu/drm/i915/i915_dpt.h20
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c32
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_dsb_buffer.c (renamed from drivers/gpu/drm/i915/display/intel_dsb_buffer.c)28
-rw-r--r--drivers/gpu/drm/i915/i915_dsb_buffer.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c2
-rw-r--r--drivers/gpu/drm/i915/i915_initial_plane.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_overlay.c517
-rw-r--r--drivers/gpu/drm/i915/i915_overlay.h11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c34
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h465
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h179
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c2
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c42
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c17
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h9
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c1
-rw-r--r--drivers/gpu/drm/xe/Makefile9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h36
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h9
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c109
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c27
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_bo.c (renamed from drivers/gpu/drm/xe/display/intel_fb_bo.c)63
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_bo.h9
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_pcode.c38
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_pcode.h9
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_vma.h18
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c28
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.h9
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c26
-rw-r--r--drivers/gpu/drm/xe/display/xe_frontbuffer.c71
-rw-r--r--drivers/gpu/drm/xe/display/xe_frontbuffer.h9
-rw-r--r--drivers/gpu/drm/xe/display/xe_initial_plane.c27
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h5
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c20
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c8
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c14
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c9
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h1
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c42
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c30
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h8
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c4
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c4
-rw-r--r--include/drm/intel/display_parent_interface.h108
-rw-r--r--include/drm/intel/i915_drm.h82
-rw-r--r--include/drm/intel/intel_gmd_interrupt_regs.h92
-rw-r--r--include/drm/intel/intel_gmd_misc_regs.h21
-rw-r--r--include/drm/intel/intel_pcode_regs.h108
-rw-r--r--include/drm/intel/pick.h51
-rw-r--r--include/drm/intel/reg_bits.h139
-rw-r--r--include/linux/iopoll.h8
-rw-r--r--include/video/vga.h1
218 files changed, 5610 insertions, 3870 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 715433187212..be976a90c5a6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -76,9 +76,13 @@ i915-$(CONFIG_PERF_EVENTS) += \
# core display adaptation
i915-y += \
+ i915_bo.o \
i915_display_pc8.o \
+ i915_dpt.o \
+ i915_dsb_buffer.o \
i915_hdcp_gsc.o \
i915_initial_plane.o \
+ i915_overlay.o \
i915_panic.o
# "Graphics Technology" (aka we talk to the gpu)
@@ -270,13 +274,10 @@ i915-y += \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dpt.o \
- display/intel_dpt_common.o \
display/intel_dram.o \
display/intel_drrs.o \
display/intel_dsb.o \
- display/intel_dsb_buffer.o \
display/intel_fb.o \
- display/intel_fb_bo.o \
display/intel_fb_pin.o \
display/intel_fbc.o \
display/intel_fdi.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 4cb753177fd8..d7de329abf19 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -10,7 +10,6 @@
#include <drm/drm_print.h>
#include "g4x_dp.h"
-#include "i915_reg.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 8b22447e8e23..5fe5067c4237 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -8,7 +8,6 @@
#include <drm/drm_print.h>
#include "g4x_hdmi.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 008d339d5c21..cbaef3f13f00 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -6,15 +6,15 @@
#include <linux/debugfs.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "hsw_ips.h"
-#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
-#include "intel_pcode.h"
+#include "intel_parent.h"
static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
{
@@ -39,8 +39,8 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL,
- val | IPS_PCODE_CONTROL));
+ intel_parent_pcode_write(display, DISPLAY_IPS_CONTROL,
+ val | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
@@ -72,7 +72,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL, 0));
+ intel_parent_pcode_write(display, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index b1fecf178906..9c16753a1f3b 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -10,7 +10,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 39dfceb438ae..9e170e79dcf6 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -6,8 +6,8 @@
#include <linux/iopoll.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
-#include "i915_reg.h"
#include "i9xx_wm.h"
#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
@@ -182,8 +182,8 @@ static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
intel_de_posting_read(display, DSPFW3(display));
} else if (display->platform.i945g || display->platform.i945gm) {
was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
- _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ val = enable ? REG_MASKED_FIELD_ENABLE(FW_BLC_SELF_EN) :
+ REG_MASKED_FIELD_DISABLE(FW_BLC_SELF_EN);
intel_de_write(display, FW_BLC_SELF, val);
intel_de_posting_read(display, FW_BLC_SELF);
} else if (display->platform.i915gm) {
@@ -193,8 +193,8 @@ static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
* FW_BLC_SELF. What's going on?
*/
was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
- _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ val = enable ? REG_MASKED_FIELD_ENABLE(INSTPM_SELF_EN) :
+ REG_MASKED_FIELD_DISABLE(INSTPM_SELF_EN);
intel_de_write(display, INSTPM, val);
intel_de_posting_read(display, INSTPM);
} else {
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index fc265f71d72b..c04327979678 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -34,7 +34,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
@@ -1624,12 +1623,6 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
if (crtc_state->pipe_bpp < 8 * 3)
return -EINVAL;
- /* FIXME: split only when necessary */
- if (crtc_state->dsc.slice_count > 1)
- crtc_state->dsc.num_streams = 2;
- else
- crtc_state->dsc.num_streams = 1;
-
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 07ffee38974b..a7350ce8e716 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -15,6 +15,7 @@
#include "intel_dp_aux.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_vrr.h"
#define SILENCE_PERIOD_MIN_TIME 80
#define SILENCE_PERIOD_MAX_TIME 180
@@ -43,12 +44,6 @@ bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
void intel_alpm_init(struct intel_dp *intel_dp)
{
- u8 dpcd;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &dpcd) < 0)
- return;
-
- intel_dp->alpm_dpcd = dpcd;
mutex_init(&intel_dp->alpm.lock);
}
@@ -248,14 +243,87 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
return true;
}
+int intel_alpm_lobf_min_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int first_sdp_position = adjusted_mode->crtc_vtotal -
+ adjusted_mode->crtc_vsync_start;
+ int waketime_in_lines;
+
+ /*
+ * #FIXME: Need to check if io_wake_lines or aux_less_wake_lines
+ * is applicable. Currently this information is not readily
+ * available in crtc_state, so max will suffice for now.
+ */
+ waketime_in_lines = max(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.aux_less_wake_lines);
+
+ if (!crtc_state->has_lobf)
+ return 0;
+
+ return first_sdp_position + waketime_in_lines + crtc_state->set_context_latency;
+}
+
+static bool intel_alpm_lobf_is_window1_sufficient(struct intel_crtc_state *crtc_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int vblank = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay;
+ int window1;
+
+ /*
+ * LOBF must be disabled if the number of lines within Window 1 is not
+ * greater than ALPM_CTL[ALPM Entry Check]
+ */
+ window1 = vblank - min(vblank,
+ crtc_state->vrr.guardband +
+ crtc_state->set_context_latency);
+
+ return window1 > crtc_state->alpm_state.check_entry_lines;
+}
+
+void intel_alpm_lobf_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int waketime_in_lines, first_sdp_position;
+
+ if (!crtc_state->has_lobf)
+ return;
+
+ if (!intel_alpm_lobf_is_window1_sufficient(crtc_state)) {
+ crtc_state->has_lobf = false;
+ return;
+ }
+
+ /*
+ * LOBF can only be enabled if the time from the start of the SCL+Guardband
+ * window to the position of the first SDP is greater than the time it takes
+ * to wake the main link.
+ *
+ * Position of first sdp : vsync_start
+ * start of scl + guardband : vtotal - (scl + guardband)
+ * time in lines to wake main link : waketime_in_lines
+ *
+ * Position of first sdp - start of (scl + guardband) > time in lines to wake main link
+ * vsync_start - (vtotal - (scl + guardband)) > waketime_in_lines
+ * vsync_start - vtotal + scl + guardband > waketime_in_lines
+ * scl + guardband > waketime_in_lines + (vtotal - vsync_start)
+ */
+ first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
+ if (intel_alpm_aux_less_wake_supported(intel_dp))
+ waketime_in_lines = crtc_state->alpm_state.io_wake_lines;
+ else
+ waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines;
+
+ crtc_state->has_lobf = (crtc_state->set_context_latency + crtc_state->vrr.guardband) >
+ (first_sdp_position + waketime_in_lines);
+}
+
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- int waketime_in_lines, first_sdp_position;
- int context_latency, guardband;
if (intel_dp->alpm.lobf_disable_debug) {
drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
@@ -277,8 +345,8 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (crtc_state->has_psr)
return;
- if (crtc_state->vrr.vmin != crtc_state->vrr.vmax ||
- crtc_state->vrr.vmin != crtc_state->vrr.flipline)
+ if (!intel_vrr_always_use_vrr_tg(display) ||
+ !intel_vrr_is_fixed_rr(crtc_state))
return;
if (!(intel_alpm_aux_wake_supported(intel_dp) ||
@@ -288,17 +356,7 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (!intel_alpm_compute_params(intel_dp, crtc_state))
return;
- context_latency = adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
- guardband = adjusted_mode->crtc_vtotal -
- adjusted_mode->crtc_vdisplay - context_latency;
- first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
- if (intel_alpm_aux_less_wake_supported(intel_dp))
- waketime_in_lines = crtc_state->alpm_state.io_wake_lines;
- else
- waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines;
-
- crtc_state->has_lobf = (context_latency + guardband) >
- (first_sdp_position + waketime_in_lines);
+ crtc_state->has_lobf = true;
}
static void lnl_alpm_configure(struct intel_dp *intel_dp,
@@ -388,25 +446,14 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp,
intel_de_write(display, PORT_ALPM_LFPS_CTL(port), lfps_ctl_val);
}
-void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_alpm_lobf_disable(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_display *display = to_intel_display(state);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_display *display = to_intel_display(new_crtc_state);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
- if (DISPLAY_VER(display) < 20)
- return;
-
- if (crtc_state->has_lobf || crtc_state->has_lobf == old_crtc_state->has_lobf)
- return;
-
for_each_intel_encoder_mask(display->drm, encoder,
- crtc_state->uapi.encoder_mask) {
+ new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp;
if (!intel_encoder_is_dp(encoder))
@@ -417,12 +464,10 @@ void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
if (!intel_dp_is_edp(intel_dp))
continue;
- if (old_crtc_state->has_lobf) {
- mutex_lock(&intel_dp->alpm.lock);
- intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
- drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
- mutex_unlock(&intel_dp->alpm.lock);
- }
+ mutex_lock(&intel_dp->alpm.lock);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
+ mutex_unlock(&intel_dp->alpm.lock);
}
}
@@ -443,22 +488,13 @@ void intel_alpm_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
}
-void intel_alpm_post_plane_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_alpm_lobf_enable(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_display *display = to_intel_display(state);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_encoder *encoder;
- if (crtc_state->has_psr || !crtc_state->has_lobf ||
- crtc_state->has_lobf == old_crtc_state->has_lobf)
- return;
-
for_each_intel_encoder_mask(display->drm, encoder,
- crtc_state->uapi.encoder_mask) {
+ new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp;
if (!intel_encoder_is_dp(encoder))
@@ -467,8 +503,8 @@ void intel_alpm_post_plane_update(struct intel_atomic_state *state,
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp_is_edp(intel_dp)) {
- intel_alpm_enable_sink(intel_dp, crtc_state);
- intel_alpm_configure(intel_dp, crtc_state);
+ intel_alpm_enable_sink(intel_dp, new_crtc_state);
+ intel_alpm_configure(intel_dp, new_crtc_state);
}
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index c6a4ec5b9561..1cf70668ab1b 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -25,12 +25,10 @@ void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+void intel_alpm_lobf_disable(const struct intel_crtc_state *new_crtc_state);
void intel_alpm_port_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-void intel_alpm_post_plane_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+void intel_alpm_lobf_enable(const struct intel_crtc_state *new_crtc_state);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
@@ -38,4 +36,7 @@ bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_disable(struct intel_dp *intel_dp);
bool intel_alpm_get_error(struct intel_dp *intel_dp);
+void intel_alpm_lobf_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+int intel_alpm_lobf_min_guardband(struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 5f3c175afdd2..081627e0d917 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -37,6 +37,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_lpe_audio.h"
/**
@@ -184,17 +185,6 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
{ 192000, TMDS_445_5M, 20480, 371250 },
};
-/*
- * WA_14020863754: Implement Audio Workaround
- * Corner case with Min Hblank Fix can cause audio hang
- */
-static bool needs_wa_14020863754(struct intel_display *display)
-{
- return DISPLAY_VERx100(display) == 3000 ||
- DISPLAY_VERx100(display) == 2000 ||
- DISPLAY_VERx100(display) == 1401;
-}
-
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
@@ -440,7 +430,11 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_OUTPUT_ENABLE(cpu_transcoder), 0);
- if (needs_wa_14020863754(display))
+ /*
+ * WA_14020863754: Implement Audio Workaround
+ * Corner case with Min Hblank Fix can cause audio hang
+ */
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14020863754))
intel_de_rmw(display, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
intel_audio_sdp_split_update(old_crtc_state, false);
@@ -572,7 +566,11 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
intel_audio_sdp_split_update(crtc_state, true);
- if (needs_wa_14020863754(display))
+ /*
+ * WA_14020863754: Implement Audio Workaround
+ * Corner case with Min Hblank Fix can cause audio hang
+ */
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14020863754))
intel_de_rmw(display, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
/* Enable audio presence detect */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index a68fdbd2acb9..34e95f05936e 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -12,7 +12,6 @@
#include <drm/drm_file.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a1fa3571eca0..b6fe87c29aa7 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -41,6 +41,7 @@
#include "intel_display_utils.h"
#include "intel_gmbus.h"
#include "intel_rom.h"
+#include "intel_vdsc.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
@@ -1545,6 +1546,10 @@ parse_edp(struct intel_display *display,
if (display->vbt.version >= 251)
panel->vbt.edp.dsc_disable =
panel_bool(edp->edp_dsc_disable, panel_type);
+
+ if (display->vbt.version >= 261)
+ panel->vbt.edp.pipe_joiner_enable =
+ panel_bool(edp->pipe_joiner_enable, panel_type);
}
static void
@@ -3543,12 +3548,13 @@ bool intel_bios_is_dsi_present(struct intel_display *display,
return false;
}
-static void fill_dsc(struct intel_crtc_state *crtc_state,
+static bool fill_dsc(struct intel_crtc_state *crtc_state,
struct dsc_compression_parameters_entry *dsc,
int dsc_max_bpc)
{
struct intel_display *display = to_intel_display(crtc_state);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int slices_per_line;
int bpc = 8;
vdsc_cfg->dsc_version_major = dsc->version_major;
@@ -3574,26 +3580,33 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
* throughput etc. into account.
*
* Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices.
+ *
+ * FIXME: split only when necessary
*/
if (dsc->slices_per_line & BIT(2)) {
- crtc_state->dsc.slice_count = 4;
+ slices_per_line = 4;
} else if (dsc->slices_per_line & BIT(1)) {
- crtc_state->dsc.slice_count = 2;
+ slices_per_line = 2;
} else {
/* FIXME */
if (!(dsc->slices_per_line & BIT(0)))
drm_dbg_kms(display->drm,
"VBT: Unsupported DSC slice count for DSI\n");
- crtc_state->dsc.slice_count = 1;
+ slices_per_line = 1;
}
+ if (drm_WARN_ON(display->drm,
+ !intel_dsc_get_slice_config(display, 1, slices_per_line,
+ &crtc_state->dsc.slice_config)))
+ return false;
+
if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
- crtc_state->dsc.slice_count != 0)
+ intel_dsc_line_slice_count(&crtc_state->dsc.slice_config) != 0)
drm_dbg_kms(display->drm,
"VBT: DSC hdisplay %d not divisible by slice count %d\n",
crtc_state->hw.adjusted_mode.crtc_hdisplay,
- crtc_state->dsc.slice_count);
+ intel_dsc_line_slice_count(&crtc_state->dsc.slice_config));
/*
* The VBT rc_buffer_block_size and rc_buffer_size definitions
@@ -3608,6 +3621,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
vdsc_cfg->block_pred_enable = dsc->block_prediction_enable;
vdsc_cfg->slice_height = dsc->slice_height;
+
+ return true;
}
/* FIXME: initially DSI specific */
@@ -3628,9 +3643,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!devdata->dsc)
return false;
- fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
-
- return true;
+ return fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index 8f372b33d48b..3b82d38a0504 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -1,87 +1,87 @@
// SPDX-License-Identifier: MIT
-/* Copyright © 2024 Intel Corporation */
+/* Copyright © 2026 Intel Corporation */
-#include <drm/drm_panic.h>
+#include <drm/drm_gem.h>
+#include <drm/intel/display_parent_interface.h>
-#include "gem/i915_gem_mman.h"
-#include "gem/i915_gem_object.h"
-#include "gem/i915_gem_object_frontbuffer.h"
-#include "pxp/intel_pxp.h"
-#include "i915_debugfs.h"
#include "intel_bo.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
bool intel_bo_is_tiled(struct drm_gem_object *obj)
{
- return i915_gem_object_is_tiled(to_intel_bo(obj));
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->is_tiled && display->parent->bo->is_tiled(obj);
}
bool intel_bo_is_userptr(struct drm_gem_object *obj)
{
- return i915_gem_object_is_userptr(to_intel_bo(obj));
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->is_userptr && display->parent->bo->is_userptr(obj);
}
bool intel_bo_is_shmem(struct drm_gem_object *obj)
{
- return i915_gem_object_is_shmem(to_intel_bo(obj));
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->is_shmem && display->parent->bo->is_shmem(obj);
}
bool intel_bo_is_protected(struct drm_gem_object *obj)
{
- return i915_gem_object_is_protected(to_intel_bo(obj));
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->is_protected(obj);
}
int intel_bo_key_check(struct drm_gem_object *obj)
{
- return intel_pxp_key_check(obj, false);
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->key_check(obj);
}
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- return i915_gem_fb_mmap(to_intel_bo(obj), vma);
-}
+ struct intel_display *display = to_intel_display(obj->dev);
-int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
-{
- return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size);
+ return display->parent->bo->fb_mmap(obj, vma);
}
-struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *_obj)
+int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
- struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- struct i915_frontbuffer *front;
+ struct intel_display *display = to_intel_display(obj->dev);
- front = i915_gem_object_frontbuffer_get(obj);
- if (!front)
- return NULL;
-
- return &front->base;
+ return display->parent->bo->read_from_page(obj, offset, dst, size);
}
-void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
+void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
- struct i915_frontbuffer *front =
- container_of(_front, typeof(*front), base);
+ struct intel_display *display = to_intel_display(obj->dev);
- i915_gem_object_frontbuffer_ref(front);
+ if (display->parent->bo->describe)
+ display->parent->bo->describe(m, obj);
}
-void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
+int intel_bo_framebuffer_init(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct i915_frontbuffer *front =
- container_of(_front, typeof(*front), base);
+ struct intel_display *display = to_intel_display(obj->dev);
- return i915_gem_object_frontbuffer_put(front);
+ return display->parent->bo->framebuffer_init(obj, mode_cmd);
}
-void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *_front)
+void intel_bo_framebuffer_fini(struct drm_gem_object *obj)
{
- struct i915_frontbuffer *front =
- container_of(_front, typeof(*front), base);
+ struct intel_display *display = to_intel_display(obj->dev);
- i915_gem_object_flush_if_display(front->obj);
+ display->parent->bo->framebuffer_fini(obj);
}
-void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+struct drm_gem_object *intel_bo_framebuffer_lookup(struct intel_display *display,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
- i915_debugfs_describe_obj(m, to_intel_bo(obj));
+ return display->parent->bo->framebuffer_lookup(display->drm, filp, user_mode_cmd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index 516a3836a6bc..aec188c706c2 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -6,8 +6,11 @@
#include <linux/types.h>
+struct drm_file;
struct drm_gem_object;
+struct drm_mode_fb_cmd2;
struct drm_scanout_buffer;
+struct intel_display;
struct intel_framebuffer;
struct seq_file;
struct vm_area_struct;
@@ -20,11 +23,12 @@ int intel_bo_key_check(struct drm_gem_object *obj);
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size);
-struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj);
-void intel_bo_frontbuffer_ref(struct intel_frontbuffer *front);
-void intel_bo_frontbuffer_put(struct intel_frontbuffer *front);
-void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front);
-
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
+void intel_bo_framebuffer_fini(struct drm_gem_object *obj);
+int intel_bo_framebuffer_init(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_gem_object *intel_bo_framebuffer_lookup(struct intel_display *display,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+
#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index fe48949b5880..07b4531a4376 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,8 +5,8 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_bw.h"
#include "intel_crtc.h"
#include "intel_display_core.h"
@@ -15,7 +15,7 @@
#include "intel_display_utils.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
-#include "intel_pcode.h"
+#include "intel_parent.h"
#include "intel_uncore.h"
#include "skl_watermark.h"
@@ -114,9 +114,9 @@ static int icl_pcode_read_qgv_point_info(struct intel_display *display,
u16 dclk;
int ret;
- ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
- &val, &val2);
+ ret = intel_parent_pcode_read(display, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
if (ret)
return ret;
@@ -141,8 +141,8 @@ static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
int ret;
int i;
- ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
+ ret = intel_parent_pcode_read(display, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -189,11 +189,11 @@ static int icl_pcode_restrict_qgv_points(struct intel_display *display,
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
- points_mask,
- ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
- ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
- 1);
+ ret = intel_parent_pcode_request(display, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
+ 1);
if (ret < 0) {
drm_err(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_casf.c b/drivers/gpu/drm/i915/display/intel_casf.c
index 95339b496f24..b167af31de5b 100644
--- a/drivers/gpu/drm/i915/display/intel_casf.c
+++ b/drivers/gpu/drm/i915/display/intel_casf.c
@@ -3,7 +3,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_casf.h"
#include "intel_casf_regs.h"
#include "intel_de.h"
@@ -116,6 +115,12 @@ int intel_casf_compute_config(struct intel_crtc_state *crtc_state)
return 0;
}
+ /* CASF with joiner not supported in hardware */
+ if (crtc_state->joiner_pipes) {
+ drm_dbg_kms(display->drm, "CASF not supported with joiner\n");
+ return -EINVAL;
+ }
+
crtc_state->hw.casf_params.casf_enable = true;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index f5946e677c93..121a12c5b8ac 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -27,9 +27,9 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "hsw_ips.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_cdclk.h"
@@ -42,8 +42,8 @@
#include "intel_display_wa.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
+#include "intel_parent.h"
#include "intel_pci_config.h"
-#include "intel_pcode.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_step.h"
@@ -888,7 +888,7 @@ static void bdw_set_cdclk(struct intel_display *display,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = intel_pcode_write(display->drm, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = intel_parent_pcode_write(display, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(display->drm,
"failed to inform pcode about cdclk change\n");
@@ -918,8 +918,8 @@ static void bdw_set_cdclk(struct intel_display *display,
if (ret)
drm_err(display->drm, "Switching back to LCPLL failed\n");
- intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level);
+ intel_parent_pcode_write(display, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level);
intel_de_write(display, CDCLK_FREQ,
DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -1175,10 +1175,10 @@ static void skl_set_cdclk(struct intel_display *display,
drm_WARN_ON_ONCE(display->drm,
display->platform.skylake && vco == 8640000);
- ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_parent_pcode_request(display, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
drm_err(display->drm,
"Failed to inform PCU about cdclk change (%d)\n", ret);
@@ -1221,8 +1221,8 @@ static void skl_set_cdclk(struct intel_display *display,
intel_de_posting_read(display, CDCLK_CTL);
/* inform PCU of the change */
- intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ intel_parent_pcode_write(display, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
intel_update_cdclk(display);
}
@@ -1870,7 +1870,7 @@ static void icl_cdclk_pll_disable(struct intel_display *display)
* after the PLL is enabled (which is already done as part of the
* normal flow of _bxt_set_cdclk()).
*/
- if (intel_display_wa(display, 13012396614))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_13012396614))
intel_de_rmw(display, CDCLK_CTL, MDCLK_SOURCE_SEL_MASK, MDCLK_SOURCE_SEL_CD2XCLK);
intel_de_rmw(display, BXT_DE_PLL_ENABLE,
@@ -2186,7 +2186,8 @@ static u32 bxt_cdclk_ctl(struct intel_display *display,
* icl_cdclk_pll_disable(). Here we are just making sure
* we keep the expected value.
*/
- if (intel_display_wa(display, 13012396614) && vco == 0)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_13012396614) &&
+ vco == 0)
val |= MDCLK_SOURCE_SEL_CD2XCLK;
else
val |= xe2lpd_mdclk_source_sel(display);
@@ -2247,18 +2248,18 @@ static void bxt_set_cdclk(struct intel_display *display,
if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
; /* NOOP */
else if (DISPLAY_VER(display) >= 11)
- ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_parent_pcode_request(display, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
else
/*
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = intel_pcode_write_timeout(display->drm,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 2);
+ ret = intel_parent_pcode_write_timeout(display,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2);
if (ret) {
drm_err(display->drm,
@@ -2287,8 +2288,8 @@ static void bxt_set_cdclk(struct intel_display *display,
* Display versions 14 and beyond
*/;
else if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
- ret = intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ ret = intel_parent_pcode_write(display, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
if (DISPLAY_VER(display) < 11) {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -2296,9 +2297,9 @@ static void bxt_set_cdclk(struct intel_display *display,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = intel_pcode_write_timeout(display->drm,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level, 2);
+ ret = intel_parent_pcode_write_timeout(display,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level, 2);
}
if (ret) {
drm_err(display->drm,
@@ -2598,11 +2599,11 @@ static void intel_pcode_notify(struct intel_display *display,
if (pipe_count_update_valid)
update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID;
- ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE |
- update_mask,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_parent_pcode_request(display, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE |
+ update_mask,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret)
drm_err(display->drm,
"Failed to inform PCU about display config (err %d)\n",
@@ -4006,7 +4007,7 @@ void intel_init_cdclk_hooks(struct intel_display *display)
display->cdclk.table = dg2_cdclk_table;
} else if (display->platform.alderlake_p) {
/* Wa_22011320316:adl-p[a0] */
- if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22011320316)) {
display->cdclk.table = adlp_a_step_cdclk_table;
display->funcs.cdclk = &tgl_cdclk_funcs;
} else if (display->platform.alderlake_p_raptorlake_u) {
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 59d637c6a187..6aa6a1dd6e1b 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -33,6 +33,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <video/vga.h>
#include "intel_connector.h"
#include "intel_crt.h"
@@ -55,6 +56,7 @@
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pfit.h"
+#include "intel_vga.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_ENABLE | \
@@ -691,6 +693,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
return ret;
}
+static bool intel_crt_sense_above_threshold(struct intel_display *display)
+{
+ return intel_vga_read(display, VGA_IS0_R, true) & (1 << 4);
+}
+
static enum drm_connector_status
intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
{
@@ -702,7 +709,6 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
u32 vsample;
u32 vblank, vblank_start, vblank_end;
u32 dsl;
- u8 st00;
enum drm_connector_status status;
drm_dbg_kms(display->drm, "starting load-detect on CRT\n");
@@ -736,8 +742,8 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
* border color for Color info.
*/
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe));
- st00 = intel_de_read8(display, _VGA_MSR_WRITE);
- status = ((st00 & (1 << 4)) != 0) ?
+
+ status = intel_crt_sense_above_threshold(display) ?
connector_status_connected :
connector_status_disconnected;
@@ -777,15 +783,13 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
while ((dsl = intel_de_read(display, PIPEDSL(display, pipe))) <= vsample)
;
/*
- * Watch ST00 for an entire scanline
+ * Watch sense for an entire scanline
*/
detect = 0;
count = 0;
do {
count++;
- /* Read the ST00 VGA status register */
- st00 = intel_de_read8(display, _VGA_MSR_WRITE);
- if (st00 & (1 << 4))
+ if (intel_crt_sense_above_threshold(display))
detect++;
} while ((intel_de_read(display, PIPEDSL(display, pipe)) == dsl));
diff --git a/drivers/gpu/drm/i915/display/intel_crt_regs.h b/drivers/gpu/drm/i915/display/intel_crt_regs.h
index 571a67ae9afa..9a93020b9a7e 100644
--- a/drivers/gpu/drm/i915/display/intel_crt_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_crt_regs.h
@@ -45,6 +45,4 @@
#define ADPA_VSYNC_ACTIVE_HIGH REG_BIT(4)
#define ADPA_HSYNC_ACTIVE_HIGH REG_BIT(3)
-#define _VGA_MSR_WRITE _MMIO(0x3c2)
-
#endif /* __INTEL_CRT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 53378d2dcbec..b8189cd5d864 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -747,7 +747,9 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
* which would cause the next frame to terminate already at vmin
* vblank start instead of vmax vblank start.
*/
- if (!state->base.legacy_cursor_update)
+ if (!state->base.legacy_cursor_update ||
+ (intel_psr_use_trans_push(new_crtc_state) &&
+ !new_crtc_state->vrr.enable))
intel_vrr_send_push(NULL, new_crtc_state);
local_irq_enable();
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 2c5d917fbd7e..18d1014de361 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -19,6 +19,7 @@
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
@@ -424,7 +425,7 @@ static u32 i9xx_cursor_ctl(const struct intel_plane_state *plane_state)
cntl |= MCURSOR_ROTATE_180;
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(display) == 13)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22012358565))
cntl |= MCURSOR_ARB_SLOTS(1);
return cntl;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 7288065d2461..6a471c021c0e 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -18,6 +18,7 @@
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dp.h"
+#include "intel_dpll.h"
#include "intel_hdmi.h"
#include "intel_lt_phy.h"
#include "intel_panel.h"
@@ -127,8 +128,8 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, struct
intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
-void intel_clear_response_ready_flag(struct intel_encoder *encoder,
- int lane)
+void intel_cx0_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
struct intel_display *display = to_intel_display(encoder);
@@ -155,7 +156,7 @@ void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
return;
}
- intel_clear_response_ready_flag(encoder, lane);
+ intel_cx0_clear_response_ready_flag(encoder, lane);
}
int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
@@ -222,6 +223,8 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
return -ETIMEDOUT;
}
+ intel_cx0_clear_response_ready_flag(encoder, lane);
+
intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
XELPDP_PORT_M2P_COMMAND_READ |
@@ -231,7 +234,7 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
if (ack < 0)
return ack;
- intel_clear_response_ready_flag(encoder, lane);
+ intel_cx0_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
@@ -293,6 +296,8 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
return -ETIMEDOUT;
}
+ intel_cx0_clear_response_ready_flag(encoder, lane);
+
intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
(committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
@@ -321,7 +326,7 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
return -EINVAL;
}
- intel_clear_response_ready_flag(encoder, lane);
+ intel_cx0_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
@@ -547,7 +552,6 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
*/
static const struct intel_c10pll_state mtl_c10_dp_rbr = {
- .clock = 162000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xB4,
@@ -573,7 +577,6 @@ static const struct intel_c10pll_state mtl_c10_dp_rbr = {
};
static const struct intel_c10pll_state mtl_c10_edp_r216 = {
- .clock = 216000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0x4,
@@ -599,7 +602,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r216 = {
};
static const struct intel_c10pll_state mtl_c10_edp_r243 = {
- .clock = 243000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0x34,
@@ -625,7 +627,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r243 = {
};
static const struct intel_c10pll_state mtl_c10_dp_hbr1 = {
- .clock = 270000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xF4,
@@ -651,7 +652,6 @@ static const struct intel_c10pll_state mtl_c10_dp_hbr1 = {
};
static const struct intel_c10pll_state mtl_c10_edp_r324 = {
- .clock = 324000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xB4,
@@ -677,7 +677,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r324 = {
};
static const struct intel_c10pll_state mtl_c10_edp_r432 = {
- .clock = 432000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0x4,
@@ -703,7 +702,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r432 = {
};
static const struct intel_c10pll_state mtl_c10_dp_hbr2 = {
- .clock = 540000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xF4,
@@ -729,7 +727,6 @@ static const struct intel_c10pll_state mtl_c10_dp_hbr2 = {
};
static const struct intel_c10pll_state mtl_c10_edp_r675 = {
- .clock = 675000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xB4,
@@ -755,7 +752,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r675 = {
};
static const struct intel_c10pll_state mtl_c10_dp_hbr3 = {
- .clock = 810000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0x34,
@@ -780,30 +776,62 @@ static const struct intel_c10pll_state mtl_c10_dp_hbr3 = {
.pll[19] = 0x23,
};
-static const struct intel_c10pll_state * const mtl_c10_dp_tables[] = {
- &mtl_c10_dp_rbr,
- &mtl_c10_dp_hbr1,
- &mtl_c10_dp_hbr2,
- &mtl_c10_dp_hbr3,
- NULL,
+struct intel_cx0pll_params {
+ const char *name;
+ bool is_c10;
+ bool is_hdmi;
+ int clock_rate;
+ union {
+ const struct intel_c10pll_state *c10;
+ const struct intel_c20pll_state *c20;
+ };
};
-static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = {
- &mtl_c10_dp_rbr,
- &mtl_c10_edp_r216,
- &mtl_c10_edp_r243,
- &mtl_c10_dp_hbr1,
- &mtl_c10_edp_r324,
- &mtl_c10_edp_r432,
- &mtl_c10_dp_hbr2,
- &mtl_c10_edp_r675,
- &mtl_c10_dp_hbr3,
- NULL,
+#define __C10PLL_PARAMS(__is_hdmi, __clock_rate, __state) { \
+ .name = __stringify(__state), \
+ .is_c10 = true, \
+ .is_hdmi = __is_hdmi, \
+ .clock_rate = __clock_rate, \
+ .c10 = &__state, \
+}
+
+#define __C20PLL_PARAMS(__is_hdmi, __clock_rate, __state) { \
+ .name = __stringify(__state), \
+ .is_c10 = false, \
+ .is_hdmi = __is_hdmi, \
+ .clock_rate = __clock_rate, \
+ .c20 = &__state, \
+}
+
+#define C10PLL_HDMI_PARAMS(__clock_rate, __state) __C10PLL_PARAMS(true, __clock_rate, __state)
+#define C10PLL_DP_PARAMS(__clock_rate, __state) __C10PLL_PARAMS(false, __clock_rate, __state)
+
+#define C20PLL_HDMI_PARAMS(__clock_rate, __state) __C20PLL_PARAMS(true, __clock_rate, __state)
+#define C20PLL_DP_PARAMS(__clock_rate, __state) __C20PLL_PARAMS(false, __clock_rate, __state)
+
+static const struct intel_cx0pll_params mtl_c10_dp_tables[] = {
+ C10PLL_DP_PARAMS(162000, mtl_c10_dp_rbr),
+ C10PLL_DP_PARAMS(270000, mtl_c10_dp_hbr1),
+ C10PLL_DP_PARAMS(540000, mtl_c10_dp_hbr2),
+ C10PLL_DP_PARAMS(810000, mtl_c10_dp_hbr3),
+ {}
+};
+
+static const struct intel_cx0pll_params mtl_c10_edp_tables[] = {
+ C10PLL_DP_PARAMS(162000, mtl_c10_dp_rbr),
+ C10PLL_DP_PARAMS(216000, mtl_c10_edp_r216),
+ C10PLL_DP_PARAMS(243000, mtl_c10_edp_r243),
+ C10PLL_DP_PARAMS(270000, mtl_c10_dp_hbr1),
+ C10PLL_DP_PARAMS(324000, mtl_c10_edp_r324),
+ C10PLL_DP_PARAMS(432000, mtl_c10_edp_r432),
+ C10PLL_DP_PARAMS(540000, mtl_c10_dp_hbr2),
+ C10PLL_DP_PARAMS(675000, mtl_c10_edp_r675),
+ C10PLL_DP_PARAMS(810000, mtl_c10_dp_hbr3),
+ {}
};
/* C20 basic DP 1.4 tables */
static const struct intel_c20pll_state mtl_c20_dp_rbr = {
- .clock = 162000,
.tx = { 0xbe88, /* tx cfg0 */
0x5800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -828,7 +856,6 @@ static const struct intel_c20pll_state mtl_c20_dp_rbr = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
- .clock = 270000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -853,7 +880,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
- .clock = 540000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -878,7 +904,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
- .clock = 810000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -904,7 +929,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
/* C20 basic DP 2.0 tables */
static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
- .clock = 1000000, /* 10 Gbps */
.tx = { 0xbe21, /* tx cfg0 */
0xe800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -928,7 +952,6 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
- .clock = 1350000, /* 13.5 Gbps */
.tx = { 0xbea0, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -953,7 +976,6 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
- .clock = 2000000, /* 20 Gbps */
.tx = { 0xbe20, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -976,15 +998,15 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
},
};
-static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = {
- &mtl_c20_dp_rbr,
- &mtl_c20_dp_hbr1,
- &mtl_c20_dp_hbr2,
- &mtl_c20_dp_hbr3,
- &mtl_c20_dp_uhbr10,
- &mtl_c20_dp_uhbr13_5,
- &mtl_c20_dp_uhbr20,
- NULL,
+static const struct intel_cx0pll_params mtl_c20_dp_tables[] = {
+ C20PLL_DP_PARAMS(162000, mtl_c20_dp_rbr),
+ C20PLL_DP_PARAMS(270000, mtl_c20_dp_hbr1),
+ C20PLL_DP_PARAMS(540000, mtl_c20_dp_hbr2),
+ C20PLL_DP_PARAMS(810000, mtl_c20_dp_hbr3),
+ C20PLL_DP_PARAMS(1000000, mtl_c20_dp_uhbr10),
+ C20PLL_DP_PARAMS(1350000, mtl_c20_dp_uhbr13_5),
+ C20PLL_DP_PARAMS(2000000, mtl_c20_dp_uhbr20),
+ {}
};
/*
@@ -992,7 +1014,6 @@ static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = {
*/
static const struct intel_c20pll_state xe2hpd_c20_edp_r216 = {
- .clock = 216000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1017,7 +1038,6 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r216 = {
};
static const struct intel_c20pll_state xe2hpd_c20_edp_r243 = {
- .clock = 243000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1042,7 +1062,6 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r243 = {
};
static const struct intel_c20pll_state xe2hpd_c20_edp_r324 = {
- .clock = 324000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1067,7 +1086,6 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r324 = {
};
static const struct intel_c20pll_state xe2hpd_c20_edp_r432 = {
- .clock = 432000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1092,7 +1110,6 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r432 = {
};
static const struct intel_c20pll_state xe2hpd_c20_edp_r675 = {
- .clock = 675000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1116,21 +1133,20 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r675 = {
},
};
-static const struct intel_c20pll_state * const xe2hpd_c20_edp_tables[] = {
- &mtl_c20_dp_rbr,
- &xe2hpd_c20_edp_r216,
- &xe2hpd_c20_edp_r243,
- &mtl_c20_dp_hbr1,
- &xe2hpd_c20_edp_r324,
- &xe2hpd_c20_edp_r432,
- &mtl_c20_dp_hbr2,
- &xe2hpd_c20_edp_r675,
- &mtl_c20_dp_hbr3,
- NULL,
+static const struct intel_cx0pll_params xe2hpd_c20_edp_tables[] = {
+ C20PLL_DP_PARAMS(162000, mtl_c20_dp_rbr),
+ C20PLL_DP_PARAMS(216000, xe2hpd_c20_edp_r216),
+ C20PLL_DP_PARAMS(243000, xe2hpd_c20_edp_r243),
+ C20PLL_DP_PARAMS(270000, mtl_c20_dp_hbr1),
+ C20PLL_DP_PARAMS(324000, xe2hpd_c20_edp_r324),
+ C20PLL_DP_PARAMS(432000, xe2hpd_c20_edp_r432),
+ C20PLL_DP_PARAMS(540000, mtl_c20_dp_hbr2),
+ C20PLL_DP_PARAMS(675000, xe2hpd_c20_edp_r675),
+ C20PLL_DP_PARAMS(810000, mtl_c20_dp_hbr3),
+ {}
};
static const struct intel_c20pll_state xe2hpd_c20_dp_uhbr13_5 = {
- .clock = 1350000, /* 13.5 Gbps */
.tx = { 0xbea0, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1154,30 +1170,30 @@ static const struct intel_c20pll_state xe2hpd_c20_dp_uhbr13_5 = {
},
};
-static const struct intel_c20pll_state * const xe2hpd_c20_dp_tables[] = {
- &mtl_c20_dp_rbr,
- &mtl_c20_dp_hbr1,
- &mtl_c20_dp_hbr2,
- &mtl_c20_dp_hbr3,
- &mtl_c20_dp_uhbr10,
- &xe2hpd_c20_dp_uhbr13_5,
- NULL,
-};
-
-static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = {
- &mtl_c20_dp_rbr,
- &xe2hpd_c20_edp_r216,
- &xe2hpd_c20_edp_r243,
- &mtl_c20_dp_hbr1,
- &xe2hpd_c20_edp_r324,
- &xe2hpd_c20_edp_r432,
- &mtl_c20_dp_hbr2,
- &xe2hpd_c20_edp_r675,
- &mtl_c20_dp_hbr3,
- &mtl_c20_dp_uhbr10,
- &xe2hpd_c20_dp_uhbr13_5,
- &mtl_c20_dp_uhbr20,
- NULL,
+static const struct intel_cx0pll_params xe2hpd_c20_dp_tables[] = {
+ C20PLL_DP_PARAMS(162000, mtl_c20_dp_rbr),
+ C20PLL_DP_PARAMS(270000, mtl_c20_dp_hbr1),
+ C20PLL_DP_PARAMS(540000, mtl_c20_dp_hbr2),
+ C20PLL_DP_PARAMS(810000, mtl_c20_dp_hbr3),
+ C20PLL_DP_PARAMS(1000000, mtl_c20_dp_uhbr10),
+ C20PLL_DP_PARAMS(1350000, xe2hpd_c20_dp_uhbr13_5),
+ {}
+};
+
+static const struct intel_cx0pll_params xe3lpd_c20_dp_edp_tables[] = {
+ C20PLL_DP_PARAMS(162000, mtl_c20_dp_rbr),
+ C20PLL_DP_PARAMS(216000, xe2hpd_c20_edp_r216),
+ C20PLL_DP_PARAMS(243000, xe2hpd_c20_edp_r243),
+ C20PLL_DP_PARAMS(270000, mtl_c20_dp_hbr1),
+ C20PLL_DP_PARAMS(324000, xe2hpd_c20_edp_r324),
+ C20PLL_DP_PARAMS(432000, xe2hpd_c20_edp_r432),
+ C20PLL_DP_PARAMS(540000, mtl_c20_dp_hbr2),
+ C20PLL_DP_PARAMS(675000, xe2hpd_c20_edp_r675),
+ C20PLL_DP_PARAMS(810000, mtl_c20_dp_hbr3),
+ C20PLL_DP_PARAMS(1000000, mtl_c20_dp_uhbr10),
+ C20PLL_DP_PARAMS(1350000, xe2hpd_c20_dp_uhbr13_5),
+ C20PLL_DP_PARAMS(2000000, mtl_c20_dp_uhbr20),
+ {}
};
/*
@@ -1185,7 +1201,6 @@ static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = {
*/
static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = {
- .clock = 25200,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x4,
@@ -1211,7 +1226,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = {
- .clock = 27000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34,
@@ -1237,7 +1251,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = {
- .clock = 74250,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4,
@@ -1263,7 +1276,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = {
- .clock = 148500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4,
@@ -1289,7 +1301,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_594 = {
- .clock = 594000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4,
@@ -1316,7 +1327,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_594 = {
/* Precomputed C10 HDMI PLL tables */
static const struct intel_c10pll_state mtl_c10_hdmi_27027 = {
- .clock = 27027,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1326,7 +1336,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_27027 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_28320 = {
- .clock = 28320,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xCC, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1336,7 +1345,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_28320 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_30240 = {
- .clock = 30240,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xDC, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1346,7 +1354,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_30240 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_31500 = {
- .clock = 31500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x62, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1356,7 +1363,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_31500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_36000 = {
- .clock = 36000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xC4, .pll[1] = 0x00, .pll[2] = 0x76, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1366,7 +1372,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_36000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_40000 = {
- .clock = 40000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1376,7 +1381,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_40000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_49500 = {
- .clock = 49500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1386,7 +1390,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_49500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_50000 = {
- .clock = 50000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xB0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1396,7 +1399,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_50000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_57284 = {
- .clock = 57284,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xCE, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1406,7 +1408,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_57284 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_58000 = {
- .clock = 58000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1416,7 +1417,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_58000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_65000 = {
- .clock = 65000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x66, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1426,7 +1426,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_65000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_71000 = {
- .clock = 71000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x72, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1436,7 +1435,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_71000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_74176 = {
- .clock = 74176,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1446,7 +1444,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_74176 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_75000 = {
- .clock = 75000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7C, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1456,7 +1453,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_75000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_78750 = {
- .clock = 78750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x84, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1466,7 +1462,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_78750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_85500 = {
- .clock = 85500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x92, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1476,7 +1471,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_85500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_88750 = {
- .clock = 88750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0x98, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1486,7 +1480,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_88750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_106500 = {
- .clock = 106500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBC, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1496,7 +1489,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_106500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_108000 = {
- .clock = 108000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1506,7 +1498,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_108000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_115500 = {
- .clock = 115500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1516,7 +1507,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_115500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_119000 = {
- .clock = 119000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD6, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1526,7 +1516,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_119000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_135000 = {
- .clock = 135000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6C, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1536,7 +1525,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_135000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_138500 = {
- .clock = 138500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x70, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1546,7 +1534,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_138500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_147160 = {
- .clock = 147160,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x78, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1556,7 +1543,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_147160 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_148352 = {
- .clock = 148352,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1566,7 +1552,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_148352 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_154000 = {
- .clock = 154000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x80, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1576,7 +1561,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_154000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_162000 = {
- .clock = 162000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x88, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1586,7 +1570,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_162000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_167000 = {
- .clock = 167000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x8C, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1596,7 +1579,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_167000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_197802 = {
- .clock = 197802,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1606,7 +1588,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_197802 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_198000 = {
- .clock = 198000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1616,7 +1597,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_198000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_209800 = {
- .clock = 209800,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBA, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1626,7 +1606,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_209800 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_241500 = {
- .clock = 241500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xDA, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1636,7 +1615,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_241500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_262750 = {
- .clock = 262750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x68, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1646,7 +1624,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_262750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_268500 = {
- .clock = 268500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1656,7 +1633,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_268500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_296703 = {
- .clock = 296703,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1666,7 +1642,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_296703 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_297000 = {
- .clock = 297000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1676,7 +1651,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_297000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_319750 = {
- .clock = 319750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1686,7 +1660,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_319750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_497750 = {
- .clock = 497750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xE2, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1696,7 +1669,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_497750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_592000 = {
- .clock = 592000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1706,7 +1678,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_592000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_593407 = {
- .clock = 593407,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1715,82 +1686,56 @@ static const struct intel_c10pll_state mtl_c10_hdmi_593407 = {
.pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
};
-static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = {
- &mtl_c10_hdmi_25_2, /* Consolidated Table */
- &mtl_c10_hdmi_27_0, /* Consolidated Table */
- &mtl_c10_hdmi_27027,
- &mtl_c10_hdmi_28320,
- &mtl_c10_hdmi_30240,
- &mtl_c10_hdmi_31500,
- &mtl_c10_hdmi_36000,
- &mtl_c10_hdmi_40000,
- &mtl_c10_hdmi_49500,
- &mtl_c10_hdmi_50000,
- &mtl_c10_hdmi_57284,
- &mtl_c10_hdmi_58000,
- &mtl_c10_hdmi_65000,
- &mtl_c10_hdmi_71000,
- &mtl_c10_hdmi_74176,
- &mtl_c10_hdmi_74_25, /* Consolidated Table */
- &mtl_c10_hdmi_75000,
- &mtl_c10_hdmi_78750,
- &mtl_c10_hdmi_85500,
- &mtl_c10_hdmi_88750,
- &mtl_c10_hdmi_106500,
- &mtl_c10_hdmi_108000,
- &mtl_c10_hdmi_115500,
- &mtl_c10_hdmi_119000,
- &mtl_c10_hdmi_135000,
- &mtl_c10_hdmi_138500,
- &mtl_c10_hdmi_147160,
- &mtl_c10_hdmi_148352,
- &mtl_c10_hdmi_148_5, /* Consolidated Table */
- &mtl_c10_hdmi_154000,
- &mtl_c10_hdmi_162000,
- &mtl_c10_hdmi_167000,
- &mtl_c10_hdmi_197802,
- &mtl_c10_hdmi_198000,
- &mtl_c10_hdmi_209800,
- &mtl_c10_hdmi_241500,
- &mtl_c10_hdmi_262750,
- &mtl_c10_hdmi_268500,
- &mtl_c10_hdmi_296703,
- &mtl_c10_hdmi_297000,
- &mtl_c10_hdmi_319750,
- &mtl_c10_hdmi_497750,
- &mtl_c10_hdmi_592000,
- &mtl_c10_hdmi_593407,
- &mtl_c10_hdmi_594, /* Consolidated Table */
- NULL,
-};
-
-static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
- .clock = 25175,
- .tx = { 0xbe88, /* tx cfg0 */
- 0x9800, /* tx cfg1 */
- 0x0000, /* tx cfg2 */
- },
- .cmn = { 0x0500, /* cmn cfg0*/
- 0x0005, /* cmn cfg1 */
- 0x0000, /* cmn cfg2 */
- 0x0000, /* cmn cfg3 */
- },
- .mpllb = { 0xa0d2, /* mpllb cfg0 */
- 0x7d80, /* mpllb cfg1 */
- 0x0906, /* mpllb cfg2 */
- 0xbe40, /* mpllb cfg3 */
- 0x0000, /* mpllb cfg4 */
- 0x0000, /* mpllb cfg5 */
- 0x0200, /* mpllb cfg6 */
- 0x0001, /* mpllb cfg7 */
- 0x0000, /* mpllb cfg8 */
- 0x0000, /* mpllb cfg9 */
- 0x0001, /* mpllb cfg10 */
- },
+static const struct intel_cx0pll_params mtl_c10_hdmi_tables[] = {
+ C10PLL_HDMI_PARAMS(25200, mtl_c10_hdmi_25_2), /* Consolidated Table */
+ C10PLL_HDMI_PARAMS(27000, mtl_c10_hdmi_27_0), /* Consolidated Table */
+ C10PLL_HDMI_PARAMS(27027, mtl_c10_hdmi_27027),
+ C10PLL_HDMI_PARAMS(28320, mtl_c10_hdmi_28320),
+ C10PLL_HDMI_PARAMS(30240, mtl_c10_hdmi_30240),
+ C10PLL_HDMI_PARAMS(31500, mtl_c10_hdmi_31500),
+ C10PLL_HDMI_PARAMS(36000, mtl_c10_hdmi_36000),
+ C10PLL_HDMI_PARAMS(40000, mtl_c10_hdmi_40000),
+ C10PLL_HDMI_PARAMS(49500, mtl_c10_hdmi_49500),
+ C10PLL_HDMI_PARAMS(50000, mtl_c10_hdmi_50000),
+ C10PLL_HDMI_PARAMS(57284, mtl_c10_hdmi_57284),
+ C10PLL_HDMI_PARAMS(58000, mtl_c10_hdmi_58000),
+ C10PLL_HDMI_PARAMS(65000, mtl_c10_hdmi_65000),
+ C10PLL_HDMI_PARAMS(71000, mtl_c10_hdmi_71000),
+ C10PLL_HDMI_PARAMS(74176, mtl_c10_hdmi_74176),
+ C10PLL_HDMI_PARAMS(74250, mtl_c10_hdmi_74_25), /* Consolidated Table */
+ C10PLL_HDMI_PARAMS(75000, mtl_c10_hdmi_75000),
+ C10PLL_HDMI_PARAMS(78750, mtl_c10_hdmi_78750),
+ C10PLL_HDMI_PARAMS(85500, mtl_c10_hdmi_85500),
+ C10PLL_HDMI_PARAMS(88750, mtl_c10_hdmi_88750),
+ C10PLL_HDMI_PARAMS(106500, mtl_c10_hdmi_106500),
+ C10PLL_HDMI_PARAMS(108000, mtl_c10_hdmi_108000),
+ C10PLL_HDMI_PARAMS(115500, mtl_c10_hdmi_115500),
+ C10PLL_HDMI_PARAMS(119000, mtl_c10_hdmi_119000),
+ C10PLL_HDMI_PARAMS(135000, mtl_c10_hdmi_135000),
+ C10PLL_HDMI_PARAMS(138500, mtl_c10_hdmi_138500),
+ C10PLL_HDMI_PARAMS(147160, mtl_c10_hdmi_147160),
+ C10PLL_HDMI_PARAMS(148352, mtl_c10_hdmi_148352),
+ C10PLL_HDMI_PARAMS(148500, mtl_c10_hdmi_148_5), /* Consolidated Table */
+ C10PLL_HDMI_PARAMS(154000, mtl_c10_hdmi_154000),
+ C10PLL_HDMI_PARAMS(162000, mtl_c10_hdmi_162000),
+ C10PLL_HDMI_PARAMS(167000, mtl_c10_hdmi_167000),
+ C10PLL_HDMI_PARAMS(197802, mtl_c10_hdmi_197802),
+ C10PLL_HDMI_PARAMS(198000, mtl_c10_hdmi_198000),
+ C10PLL_HDMI_PARAMS(209800, mtl_c10_hdmi_209800),
+ C10PLL_HDMI_PARAMS(241500, mtl_c10_hdmi_241500),
+ C10PLL_HDMI_PARAMS(262750, mtl_c10_hdmi_262750),
+ C10PLL_HDMI_PARAMS(268500, mtl_c10_hdmi_268500),
+ C10PLL_HDMI_PARAMS(296703, mtl_c10_hdmi_296703),
+ C10PLL_HDMI_PARAMS(297000, mtl_c10_hdmi_297000),
+ C10PLL_HDMI_PARAMS(319750, mtl_c10_hdmi_319750),
+ C10PLL_HDMI_PARAMS(497750, mtl_c10_hdmi_497750),
+ C10PLL_HDMI_PARAMS(592000, mtl_c10_hdmi_592000),
+ C10PLL_HDMI_PARAMS(593407, mtl_c10_hdmi_593407),
+ C10PLL_HDMI_PARAMS(594000, mtl_c10_hdmi_594), /* Consolidated Table */
+ {}
};
static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
- .clock = 27000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1815,7 +1760,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
- .clock = 74250,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1840,7 +1784,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
- .clock = 148500,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1865,7 +1808,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
- .clock = 594000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1890,7 +1832,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
- .clock = 3000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1915,7 +1856,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
- .clock = 6000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1940,7 +1880,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
- .clock = 8000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1965,7 +1904,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
- .clock = 10000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1990,7 +1928,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
- .clock = 12000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -2014,21 +1951,20 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
},
};
-static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = {
- &mtl_c20_hdmi_25_175,
- &mtl_c20_hdmi_27_0,
- &mtl_c20_hdmi_74_25,
- &mtl_c20_hdmi_148_5,
- &mtl_c20_hdmi_594,
- &mtl_c20_hdmi_300,
- &mtl_c20_hdmi_600,
- &mtl_c20_hdmi_800,
- &mtl_c20_hdmi_1000,
- &mtl_c20_hdmi_1200,
- NULL,
+static const struct intel_cx0pll_params mtl_c20_hdmi_tables[] = {
+ C20PLL_HDMI_PARAMS(27000, mtl_c20_hdmi_27_0),
+ C20PLL_HDMI_PARAMS(74250, mtl_c20_hdmi_74_25),
+ C20PLL_HDMI_PARAMS(148500, mtl_c20_hdmi_148_5),
+ C20PLL_HDMI_PARAMS(594000, mtl_c20_hdmi_594),
+ C20PLL_HDMI_PARAMS(300000, mtl_c20_hdmi_300),
+ C20PLL_HDMI_PARAMS(600000, mtl_c20_hdmi_600),
+ C20PLL_HDMI_PARAMS(800000, mtl_c20_hdmi_800),
+ C20PLL_HDMI_PARAMS(1000000, mtl_c20_hdmi_1000),
+ C20PLL_HDMI_PARAMS(1200000, mtl_c20_hdmi_1200),
+ {}
};
-static const struct intel_c10pll_state * const *
+static const struct intel_cx0pll_params *
intel_c10pll_tables_get(const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -2103,21 +2039,99 @@ static bool cx0pll_state_is_dp(const struct intel_cx0pll_state *pll_state)
return c20pll_state_is_dp(&pll_state->c20);
}
+static int intel_c10pll_calc_port_clock(const struct intel_c10pll_state *pll_state)
+{
+ unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
+ unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
+ int tmpclk = 0;
+
+ if (pll_state->pll[0] & C10_PLL0_FRACEN) {
+ frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11];
+ frac_rem = pll_state->pll[14] << 8 | pll_state->pll[13];
+ frac_den = pll_state->pll[10] << 8 | pll_state->pll[9];
+ }
+
+ multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 |
+ pll_state->pll[2]) / 2 + 16;
+
+ tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]);
+ hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]);
+
+ tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
+ DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
+ 10 << (tx_clk_div + 16));
+ tmpclk *= (hdmi_div ? 2 : 1);
+
+ return tmpclk;
+}
+
+static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state)
+{
+ return state->tx[0] & C20_PHY_USE_MPLLB;
+}
+
+static int intel_c20pll_calc_port_clock(const struct intel_c20pll_state *pll_state)
+{
+ unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
+ unsigned int multiplier, refclk = 38400;
+ unsigned int tx_clk_div;
+ unsigned int ref_clk_mpllb_div;
+ unsigned int fb_clk_div4_en;
+ unsigned int ref, vco;
+ unsigned int tx_rate_mult;
+ unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
+
+ if (intel_c20phy_use_mpllb(pll_state)) {
+ tx_rate_mult = 1;
+ frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
+ frac_quot = pll_state->mpllb[8];
+ frac_rem = pll_state->mpllb[9];
+ frac_den = pll_state->mpllb[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
+ fb_clk_div4_en = 0;
+ } else {
+ tx_rate_mult = 2;
+ frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
+ frac_quot = pll_state->mplla[8];
+ frac_rem = pll_state->mplla[9];
+ frac_den = pll_state->mplla[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
+ fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
+ }
+
+ if (frac_en)
+ frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
+ else
+ frac = 0;
+
+ ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
+ vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
+
+ return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
+}
+
/*
* TODO: Convert the following to align with intel_c20pll_find_table() and
* intel_c20pll_calc_state_from_table().
*/
static int intel_c10pll_calc_state_from_table(struct intel_encoder *encoder,
- const struct intel_c10pll_state * const *tables,
+ const struct intel_cx0pll_params *tables,
bool is_dp, int port_clock, int lane_count,
struct intel_cx0pll_state *pll_state)
{
struct intel_display *display = to_intel_display(encoder);
int i;
- for (i = 0; tables[i]; i++) {
- if (port_clock == tables[i]->clock) {
- pll_state->c10 = *tables[i];
+ for (i = 0; tables[i].name; i++) {
+ int clock = intel_c10pll_calc_port_clock(tables[i].c10);
+
+ drm_WARN_ON(display->drm, !intel_dpll_clock_matches(clock, tables[i].clock_rate));
+ if (intel_dpll_clock_matches(port_clock, clock)) {
+ pll_state->c10 = *tables[i].c10;
intel_cx0pll_update_ssc(encoder, pll_state, is_dp);
intel_c10pll_update_pll(encoder, pll_state);
@@ -2139,7 +2153,7 @@ static int intel_c10pll_calc_state(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(encoder);
bool is_dp = intel_crtc_has_dp_encoder(crtc_state);
- const struct intel_c10pll_state * const *tables;
+ const struct intel_cx0pll_params *tables;
int err;
tables = intel_c10pll_tables_get(crtc_state, encoder);
@@ -2166,33 +2180,6 @@ static int intel_c10pll_calc_state(const struct intel_crtc_state *crtc_state,
return 0;
}
-static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c10pll_state *pll_state)
-{
- unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
- unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
- int tmpclk = 0;
-
- if (pll_state->pll[0] & C10_PLL0_FRACEN) {
- frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11];
- frac_rem = pll_state->pll[14] << 8 | pll_state->pll[13];
- frac_den = pll_state->pll[10] << 8 | pll_state->pll[9];
- }
-
- multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 |
- pll_state->pll[2]) / 2 + 16;
-
- tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]);
- hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]);
-
- tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
- DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
- 10 << (tx_clk_div + 16));
- tmpclk *= (hdmi_div ? 2 : 1);
-
- return tmpclk;
-}
-
static int readout_enabled_lane_count(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
@@ -2275,8 +2262,6 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
intel_cx0_phy_transaction_end(encoder, wakeref);
- pll_state->clock = intel_c10pll_calc_port_clock(encoder, pll_state);
-
cx0pll_state->ssc_enabled = readout_ssc_state(encoder, true);
if (cx0pll_state->ssc_enabled != intel_c10pll_ssc_enabled(pll_state))
@@ -2321,8 +2306,7 @@ static void intel_c10pll_dump_hw_state(struct drm_printer *p,
unsigned int multiplier, tx_clk_div;
fracen = hw_state->pll[0] & C10_PLL0_FRACEN;
- drm_printf(p, "c10pll_hw_state: clock: %d, fracen: %s, ",
- hw_state->clock, str_yes_no(fracen));
+ drm_printf(p, "c10pll_hw_state: fracen: %s, ", str_yes_no(fracen));
if (fracen) {
frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11];
@@ -2364,9 +2348,8 @@ static bool is_arrowlake_s_by_host_bridge(void)
return pdev && IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(host_bridge_pci_dev_id);
}
-static u16 intel_c20_hdmi_tmds_tx_cgf_1(const struct intel_crtc_state *crtc_state)
+static u16 intel_c20_hdmi_tmds_tx_cgf_1(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(crtc_state);
u16 tx_misc;
u16 tx_dcc_cal_dac_ctrl_range = 8;
u16 tx_term_ctrl = 2;
@@ -2388,7 +2371,8 @@ static u16 intel_c20_hdmi_tmds_tx_cgf_1(const struct intel_crtc_state *crtc_stat
C20_PHY_TX_DCC_BYPASS | C20_PHY_TX_TERM_CTL(tx_term_ctrl));
}
-static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_state,
+static int intel_c20_compute_hdmi_tmds_pll(struct intel_display *display,
+ int port_clock,
struct intel_c20pll_state *pll_state)
{
u64 datarate;
@@ -2402,10 +2386,10 @@ static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_s
u8 mpllb_ana_freq_vco;
u8 mpll_div_multiplier;
- if (crtc_state->port_clock < 25175 || crtc_state->port_clock > 600000)
+ if (port_clock < 25175 || port_clock > 600000)
return -EINVAL;
- datarate = ((u64)crtc_state->port_clock * 1000) * 10;
+ datarate = ((u64)port_clock * 1000) * 10;
mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate));
vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate));
vco_freq = (datarate << vco_freq_shift) >> 8;
@@ -2427,9 +2411,8 @@ static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_s
else
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0;
- pll_state->clock = crtc_state->port_clock;
pll_state->tx[0] = 0xbe88;
- pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(crtc_state);
+ pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(display);
pll_state->tx[2] = 0x0000;
pll_state->cmn[0] = 0x0500;
pll_state->cmn[1] = 0x0005;
@@ -2457,7 +2440,7 @@ static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_s
return 0;
}
-static const struct intel_c20pll_state * const *
+static const struct intel_cx0pll_params *
intel_c20_pll_tables_get(const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -2625,20 +2608,25 @@ static void intel_c20_program_vdr_params(struct intel_encoder *encoder,
MB_WRITE_COMMITTED);
}
-static const struct intel_c20pll_state *
+static const struct intel_cx0pll_params *
intel_c20_pll_find_table(const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_c20pll_state * const *tables;
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct intel_cx0pll_params *tables;
int i;
tables = intel_c20_pll_tables_get(crtc_state, encoder);
if (!tables)
return NULL;
- for (i = 0; tables[i]; i++)
- if (crtc_state->port_clock == tables[i]->clock)
- return tables[i];
+ for (i = 0; tables[i].name; i++) {
+ int clock = intel_c20pll_calc_port_clock(tables[i].c20);
+
+ drm_WARN_ON(display->drm, !intel_dpll_clock_matches(clock, tables[i].clock_rate));
+ if (intel_dpll_clock_matches(crtc_state->port_clock, clock))
+ return &tables[i];
+ }
return NULL;
}
@@ -2647,13 +2635,13 @@ static int intel_c20pll_calc_state_from_table(const struct intel_crtc_state *crt
struct intel_encoder *encoder,
struct intel_cx0pll_state *pll_state)
{
- const struct intel_c20pll_state *table;
+ const struct intel_cx0pll_params *table;
table = intel_c20_pll_find_table(crtc_state, encoder);
if (!table)
return -EINVAL;
- pll_state->c20 = *table;
+ pll_state->c20 = *table->c20;
intel_cx0pll_update_ssc(encoder, pll_state, intel_crtc_has_dp_encoder(crtc_state));
@@ -2681,7 +2669,8 @@ static int intel_c20pll_calc_state(const struct intel_crtc_state *crtc_state,
/* TODO: Update SSC state for HDMI as well */
if (!is_dp && err)
- err = intel_c20_compute_hdmi_tmds_pll(crtc_state, &hw_state->cx0pll.c20);
+ err = intel_c20_compute_hdmi_tmds_pll(display, crtc_state->port_clock,
+ &hw_state->cx0pll.c20);
if (err)
return err;
@@ -2705,56 +2694,6 @@ int intel_cx0pll_calc_state(const struct intel_crtc_state *crtc_state,
return intel_c20pll_calc_state(crtc_state, encoder, hw_state);
}
-static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state)
-{
- return state->tx[0] & C20_PHY_USE_MPLLB;
-}
-
-static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state)
-{
- unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
- unsigned int multiplier, refclk = 38400;
- unsigned int tx_clk_div;
- unsigned int ref_clk_mpllb_div;
- unsigned int fb_clk_div4_en;
- unsigned int ref, vco;
- unsigned int tx_rate_mult;
- unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
-
- if (intel_c20phy_use_mpllb(pll_state)) {
- tx_rate_mult = 1;
- frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
- frac_quot = pll_state->mpllb[8];
- frac_rem = pll_state->mpllb[9];
- frac_den = pll_state->mpllb[7];
- multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
- tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
- ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
- fb_clk_div4_en = 0;
- } else {
- tx_rate_mult = 2;
- frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
- frac_quot = pll_state->mplla[8];
- frac_rem = pll_state->mplla[9];
- frac_den = pll_state->mplla[7];
- multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
- tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
- ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
- fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
- }
-
- if (frac_en)
- frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
- else
- frac = 0;
-
- ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
- vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
-
- return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
-}
-
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_cx0pll_state *cx0pll_state)
{
@@ -2823,8 +2762,6 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
}
}
- pll_state->clock = intel_c20pll_calc_port_clock(encoder, pll_state);
-
intel_cx0_phy_transaction_end(encoder, wakeref);
cx0pll_state->ssc_enabled = readout_ssc_state(encoder, intel_c20phy_use_mpllb(pll_state));
@@ -2835,7 +2772,7 @@ static void intel_c20pll_dump_hw_state(struct drm_printer *p,
{
int i;
- drm_printf(p, "c20pll_hw_state: clock: %d\n", hw_state->clock);
+ drm_printf(p, "c20pll_hw_state:\n");
drm_printf(p,
"tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n",
hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]);
@@ -2971,6 +2908,12 @@ static void intel_c20_pll_program(struct intel_display *display,
MB_WRITE_COMMITTED);
}
+static bool is_mplla_clock_rate(int clock)
+{
+ return intel_dpll_clock_matches(clock, 1000000) ||
+ intel_dpll_clock_matches(clock, 2000000);
+}
+
static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state,
int port_clock,
@@ -2996,7 +2939,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
- if (port_clock == 1000000 || port_clock == 2000000)
+ if (is_mplla_clock_rate(port_clock))
val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
else
val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
@@ -3223,7 +3166,6 @@ static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask)
static void intel_cx0pll_enable(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state)
{
- int port_clock = pll_state->use_c10 ? pll_state->c10.clock : pll_state->c20.clock;
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -3231,6 +3173,12 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
INTEL_CX0_LANE0;
struct ref_tracker *wakeref = intel_cx0_phy_transaction_begin(encoder);
+ int port_clock;
+
+ if (pll_state->use_c10)
+ port_clock = intel_c10pll_calc_port_clock(&pll_state->c10);
+ else
+ port_clock = intel_c20pll_calc_port_clock(&pll_state->c20);
/*
* Lane reversal is never used in DP-alt mode, in that case the
@@ -3730,9 +3678,9 @@ int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state)
{
if (intel_encoder_is_c10phy(encoder))
- return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
+ return intel_c10pll_calc_port_clock(&pll_state->c10);
- return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
+ return intel_c20pll_calc_port_clock(&pll_state->c20);
}
/*
@@ -3786,3 +3734,124 @@ void intel_cx0_pll_power_save_wa(struct intel_display *display)
intel_cx0pll_disable(encoder);
}
}
+
+static void intel_c10pll_verify_clock(struct intel_display *display,
+ int precomputed_clock,
+ const char *pll_state_name,
+ const struct intel_c10pll_state *pll_state,
+ bool is_precomputed_state)
+{
+ struct drm_printer p;
+ int clock;
+
+ clock = intel_c10pll_calc_port_clock(pll_state);
+
+ if (intel_dpll_clock_matches(clock, precomputed_clock))
+ return;
+
+ drm_warn(display->drm,
+ "PLL state %s (%s): clock difference too high: computed %d, pre-computed %d\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed",
+ clock, precomputed_clock);
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "PLL state %s (%s):\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed");
+ intel_c10pll_dump_hw_state(&p, pll_state);
+}
+
+static void intel_c10pll_verify_params(struct intel_display *display,
+ const struct intel_cx0pll_params *pll_params)
+{
+ struct intel_c10pll_state pll_state;
+
+ intel_c10pll_verify_clock(display, pll_params->clock_rate, pll_params->name, pll_params->c10, true);
+
+ if (!pll_params->is_hdmi)
+ return;
+
+ intel_snps_hdmi_pll_compute_c10pll(&pll_state, pll_params->clock_rate);
+
+ intel_c10pll_verify_clock(display, pll_params->clock_rate, pll_params->name, &pll_state, false);
+}
+
+static void intel_c20pll_verify_clock(struct intel_display *display,
+ int precomputed_clock,
+ const char *pll_state_name,
+ const struct intel_c20pll_state *pll_state,
+ bool is_precomputed_state)
+{
+ struct drm_printer p;
+ int clock;
+
+ clock = intel_c20pll_calc_port_clock(pll_state);
+
+ if (intel_dpll_clock_matches(clock, precomputed_clock))
+ return;
+
+ drm_warn(display->drm,
+ "PLL state %s (%s): clock difference too high: computed %d, pre-computed %d\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed",
+ clock, precomputed_clock);
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "PLL state %s (%s):\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed");
+ intel_c20pll_dump_hw_state(&p, pll_state);
+}
+
+static void intel_c20pll_verify_params(struct intel_display *display,
+ const struct intel_cx0pll_params *pll_params)
+{
+ struct intel_c20pll_state pll_state;
+
+ intel_c20pll_verify_clock(display, pll_params->clock_rate, pll_params->name, pll_params->c20, true);
+
+ if (!pll_params->is_hdmi)
+ return;
+
+ if (intel_c20_compute_hdmi_tmds_pll(display, pll_params->clock_rate, &pll_state) != 0)
+ return;
+
+ intel_c20pll_verify_clock(display, pll_params->clock_rate, pll_params->name, &pll_state, false);
+}
+
+static void intel_cx0pll_verify_tables(struct intel_display *display,
+ const struct intel_cx0pll_params *tables)
+{
+ int i;
+
+ for (i = 0; tables[i].name; i++) {
+ if (tables[i].is_c10)
+ intel_c10pll_verify_params(display, &tables[i]);
+ else
+ intel_c20pll_verify_params(display, &tables[i]);
+ }
+}
+
+void intel_cx0pll_verify_plls(struct intel_display *display)
+{
+ /* C10 */
+ intel_cx0pll_verify_tables(display, mtl_c10_edp_tables);
+ intel_cx0pll_verify_tables(display, mtl_c10_dp_tables);
+ intel_cx0pll_verify_tables(display, mtl_c10_hdmi_tables);
+
+ /* C20 */
+ intel_cx0pll_verify_tables(display, xe2hpd_c20_edp_tables);
+ intel_cx0pll_verify_tables(display, mtl_c20_dp_tables);
+ intel_cx0pll_verify_tables(display, xe2hpd_c20_dp_tables);
+ intel_cx0pll_verify_tables(display, xe3lpd_c20_dp_edp_tables);
+ intel_cx0pll_verify_tables(display, mtl_c20_hdmi_tables);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index ae98ac23ea22..1d4480b8bf39 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -25,8 +25,8 @@ struct intel_dpll_hw_state;
struct intel_encoder;
struct intel_hdmi;
-void intel_clear_response_ready_flag(struct intel_encoder *encoder,
- int lane);
+void intel_cx0_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane);
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
struct intel_dpll *pll,
@@ -77,6 +77,7 @@ bool intel_mtl_tbt_pll_readout_hw_state(struct intel_display *display,
struct intel_dpll_hw_state *hw_state);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
+void intel_cx0pll_verify_plls(struct intel_display *display);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 658890f73515..152a4e751bdc 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -78,10 +78,10 @@
#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US 3200
#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US 20
#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US 100
-#define XELPDP_PORT_RESET_START_TIMEOUT_US 5
+#define XELPDP_PORT_RESET_START_TIMEOUT_US 10
#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS 2
#define XELPDP_PORT_RESET_END_TIMEOUT_MS 15
-#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 1
+#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 10
#define _XELPDP_PORT_BUF_CTL1_LN0_A 0x64004
#define _XELPDP_PORT_BUF_CTL1_LN0_B 0x64104
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index cb91d07cdaa6..7f1576bfe4b0 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -34,7 +34,6 @@
#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_consumer.h>
-#include "i915_reg.h"
#include "icl_dsi.h"
#include "intel_alpm.h"
#include "intel_audio.h"
@@ -53,6 +52,7 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
@@ -1402,8 +1402,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
int level;
/* Wa_16011342517:adl-p */
- if (display->platform.alderlake_p &&
- IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16011342517)) {
if ((intel_encoder_is_hdmi(encoder) &&
crtc_state->port_clock == 594000) ||
(intel_encoder_is_dp(encoder) &&
@@ -4247,13 +4246,15 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
static void xe3plpd_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
+
intel_lt_phy_pll_readout_hw_state(encoder, crtc_state, &crtc_state->dpll_hw_state.ltpll);
if (crtc_state->dpll_hw_state.ltpll.tbt_mode)
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
else
crtc_state->port_clock =
- intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_lt_phy_calc_port_clock(display, &crtc_state->dpll_hw_state.ltpll);
intel_ddi_get_config(encoder, crtc_state);
}
@@ -4586,8 +4587,10 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
/*
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
+ * From, xe3lpd onwards we have defeatured this with reference to
+ * Wa_16024710867
*/
- if (DISPLAY_VER(display) < 9)
+ if (!IS_DISPLAY_VER(display, 9, 20))
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index a7ce3b875e06..f30f3f8ebee1 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,6 +6,8 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
+#include <drm/drm_print.h>
+
#include "intel_display_core.h"
#include "intel_dmc_wl.h"
#include "intel_dsb.h"
@@ -34,15 +36,18 @@ intel_de_read(struct intel_display *display, i915_reg_t reg)
static inline u8
intel_de_read8(struct intel_display *display, i915_reg_t reg)
{
- u8 val;
-
- intel_dmc_wl_get(display, reg);
+ /* this is only used on VGA registers (possible on pre-g4x) */
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 5 || display->platform.g4x);
- val = intel_uncore_read8(__to_uncore(display), reg);
+ return intel_uncore_read8(__to_uncore(display), reg);
+}
- intel_dmc_wl_put(display, reg);
+static inline void
+intel_de_write8(struct intel_display *display, i915_reg_t reg, u8 val)
+{
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 5 || display->platform.g4x);
- return val;
+ intel_uncore_write8(__to_uncore(display), reg, val);
}
static inline u64
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3b8ba8ab76a1..b18ce0c36a64 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -50,7 +50,6 @@
#include "g4x_hdmi.h"
#include "hsw_ips.h"
#include "i915_config.h"
-#include "i915_reg.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
@@ -86,7 +85,6 @@
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
-#include "intel_dpt_common.h"
#include "intel_drrs.h"
#include "intel_dsb.h"
#include "intel_dsi.h"
@@ -455,7 +453,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
}
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(display) == 13)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22012358565))
intel_de_rmw(display, PIPE_ARB_CTL(display, pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
@@ -709,7 +707,7 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
/* Wa_14010547955:dg2 */
- if (display->platform.dg2)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010547955))
tmp |= DG2_RENDER_CCSTAG_4_3_EN;
intel_de_write(display, PIPE_CHICKEN(pipe), tmp);
@@ -1008,6 +1006,28 @@ static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state,
return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
}
+static bool intel_crtc_lobf_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(has_lobf, old_crtc_state, new_crtc_state) ||
+ (new_crtc_state->has_lobf &&
+ (new_crtc_state->update_lrr || new_crtc_state->update_m_n));
+}
+
+static bool intel_crtc_lobf_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!old_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(has_lobf, old_crtc_state, new_crtc_state) ||
+ (old_crtc_state->has_lobf &&
+ (new_crtc_state->update_lrr || new_crtc_state->update_m_n));
+}
+
#undef is_disabling
#undef is_enabling
@@ -1050,12 +1070,13 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
- if (intel_display_wa(display, 14011503117)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011503117)) {
if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled)
adl_scaler_ecc_unmask(new_crtc_state);
}
- intel_alpm_post_plane_update(state, crtc);
+ if (intel_crtc_lobf_enabling(old_crtc_state, new_crtc_state))
+ intel_alpm_lobf_enable(new_crtc_state);
intel_psr_post_plane_update(state, crtc);
}
@@ -1152,7 +1173,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- intel_alpm_pre_plane_update(state, crtc);
+ if (intel_crtc_lobf_disabling(old_crtc_state, new_crtc_state))
+ intel_alpm_lobf_disable(new_crtc_state);
+
intel_psr_pre_plane_update(state, crtc);
if (intel_crtc_vrr_disabling(state, crtc)) {
@@ -1614,7 +1637,6 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
@@ -4325,43 +4347,58 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return 0;
}
-static int
-compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
- struct intel_crtc_state *crtc_state)
+static int bpc_to_bpp(int bpc)
{
- struct intel_display *display = to_intel_display(crtc_state);
- struct drm_connector *connector = conn_state->connector;
- const struct drm_display_info *info = &connector->display_info;
- int bpp;
-
- switch (conn_state->max_bpc) {
+ switch (bpc) {
case 6 ... 7:
- bpp = 6 * 3;
- break;
+ return 6 * 3;
case 8 ... 9:
- bpp = 8 * 3;
- break;
+ return 8 * 3;
case 10 ... 11:
- bpp = 10 * 3;
- break;
+ return 10 * 3;
case 12 ... 16:
- bpp = 12 * 3;
- break;
+ return 12 * 3;
default:
- MISSING_CASE(conn_state->max_bpc);
+ MISSING_CASE(bpc);
return -EINVAL;
}
+}
- if (bpp < crtc_state->pipe_bpp) {
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_info *info = &connector->display_info;
+ int edid_bpc = info->bpc ? : 8;
+ int target_pipe_bpp;
+ int max_edid_bpp;
+
+ max_edid_bpp = bpc_to_bpp(edid_bpc);
+ if (max_edid_bpp < 0)
+ return max_edid_bpp;
+
+ target_pipe_bpp = bpc_to_bpp(conn_state->max_bpc);
+ if (target_pipe_bpp < 0)
+ return target_pipe_bpp;
+
+ /*
+ * The maximum pipe BPP is the minimum of the max platform BPP and
+ * the max EDID BPP.
+ */
+ crtc_state->max_pipe_bpp = min(crtc_state->pipe_bpp, max_edid_bpp);
+
+ if (target_pipe_bpp < crtc_state->pipe_bpp) {
drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] Limiting display bpp to %d "
+ "[CONNECTOR:%d:%s] Limiting target display pipe bpp to %d "
"(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
connector->base.id, connector->name,
- bpp, 3 * info->bpc,
+ target_pipe_bpp, 3 * info->bpc,
3 * conn_state->max_requested_bpc,
crtc_state->pipe_bpp);
- crtc_state->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = target_pipe_bpp;
}
return 0;
@@ -5459,7 +5496,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
- PIPE_CONF_CHECK_I(dsc.num_streams);
+ PIPE_CONF_CHECK_I(dsc.slice_config.streams_per_pipe);
PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
@@ -7357,9 +7394,6 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
state, crtc);
- intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit,
- new_crtc_state);
-
if (new_crtc_state->use_dsb)
intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
@@ -7390,9 +7424,37 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
new_crtc_state->dsb_color);
if (new_crtc_state->use_dsb && !intel_color_uses_chained_dsb(new_crtc_state)) {
- intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+ /*
+ * Dsb wait vblank may or may not skip. Let's remove it for PSR
+ * trans push case to ensure we are not waiting two vblanks
+ */
+ if (!intel_psr_use_trans_push(new_crtc_state))
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
+
+ /*
+ * Wait for idle is needed for corner case where PSR HW
+ * is transitioning into DEEP_SLEEP/SRDENT_OFF when
+ * new Frame Change event comes in. It is ok to do it
+ * here for both Frame Change mechanism (trans push
+ * and register write).
+ */
+ intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
+ /*
+ * In case PSR uses trans push as a "frame change" event and
+ * VRR is not in use we need to wait vblank. Otherwise we may
+ * miss selective updates. DSB skips all waits while PSR is
+ * active. Check push send is skipped as well because trans push
+ * send bit is not reset by the HW if VRR is not
+ * enabled -> we may start configuring new selective
+ * update while previous is not complete.
+ */
+ if (intel_psr_use_trans_push(new_crtc_state))
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+
intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit);
intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
new_crtc_state);
@@ -8001,6 +8063,25 @@ void intel_setup_outputs(struct intel_display *display)
drm_helper_move_panel_connectors_to_head(display->drm);
}
+int intel_max_uncompressed_dotclock(struct intel_display *display)
+{
+ int max_dotclock = display->cdclk.max_dotclk_freq;
+ int limit = max_dotclock;
+
+ if (DISPLAY_VERx100(display) == 3002)
+ limit = 937500;
+ else if (DISPLAY_VER(display) >= 30)
+ limit = 1350000;
+ /*
+ * Note: For other platforms though there are limits given
+ * in the Bspec, however the limit is intentionally not
+ * enforced to avoid regressions, unless real issues are
+ * observed.
+ */
+
+ return min(max_dotclock, limit);
+}
+
static int max_dotclock(struct intel_display *display)
{
int max_dotclock = display->cdclk.max_dotclk_freq;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f8e6e4e82722..552a59d19e0f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -135,32 +135,6 @@ enum tc_port {
I915_MAX_TC_PORTS
};
-enum aux_ch {
- AUX_CH_NONE = -1,
-
- AUX_CH_A,
- AUX_CH_B,
- AUX_CH_C,
- AUX_CH_D,
- AUX_CH_E, /* ICL+ */
- AUX_CH_F,
- AUX_CH_G,
- AUX_CH_H,
- AUX_CH_I,
-
- /* tgl+ */
- AUX_CH_USBC1 = AUX_CH_D,
- AUX_CH_USBC2,
- AUX_CH_USBC3,
- AUX_CH_USBC4,
- AUX_CH_USBC5,
- AUX_CH_USBC6,
-
- /* XE_LPD repositions D/E offsets and bitfields */
- AUX_CH_D_XELPD = AUX_CH_USBC5,
- AUX_CH_E_XELPD,
-};
-
enum phy {
PHY_NONE = -1,
@@ -488,6 +462,7 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
+int intel_max_uncompressed_dotclock(struct intel_display *display);
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index aba13e8a9051..2614c4863c87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -13,9 +13,9 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "hsw_ips.h"
-#include "i915_reg.h"
#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
#include "intel_bo.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index c32d65727642..a8ef1e6193b8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -10,7 +10,6 @@
#include <drm/drm_print.h>
#include <drm/intel/pciids.h>
-#include "i915_reg.h"
#include "intel_cx0_phy_regs.h"
#include "intel_de.h"
#include "intel_display.h"
@@ -20,6 +19,7 @@
#include "intel_display_reg_defs.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -1540,9 +1540,9 @@ probe_gmdid_display(struct intel_display *display, struct intel_display_ip_ver *
return NULL;
}
- gmd_id.ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val);
- gmd_id.rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
- gmd_id.step = REG_FIELD_GET(GMD_ID_STEP, val);
+ gmd_id.ver = REG_FIELD_GET(GMD_ID_DISPLAY_ARCH_MASK, val);
+ gmd_id.rel = REG_FIELD_GET(GMD_ID_DISPLAY_RELEASE_MASK, val);
+ gmd_id.step = REG_FIELD_GET(GMD_ID_DISPLAY_STEP, val);
for (i = 0; i < ARRAY_SIZE(gmdid_display_map); i++) {
if (gmd_id.ver == gmdid_display_map[i].ver &&
@@ -1774,7 +1774,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
display_runtime->port_mask |= BIT(PORT_F);
/* Wa_14011765242: adl-s A0,A1 */
- if (display->platform.alderlake_s && IS_DISPLAY_STEP(display, STEP_A0, STEP_A2))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011765242))
for_each_pipe(display, pipe)
display_runtime->num_scalers[pipe] = 0;
else if (DISPLAY_VER(display) >= 11) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 13f2a629981f..e84c190dcc4f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -197,6 +197,7 @@ struct intel_display_platforms {
#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_PSR_TRANS_PUSH_FRAME_CHANGE(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_SAGV(__display) (DISPLAY_VER(__display) >= 9 && \
!(__display)->platform.broxton && !(__display)->platform.geminilake)
#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 268b1de45b81..23bfecc983e8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -208,16 +208,12 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
intel_bios_init(display);
- ret = intel_vga_register(display);
- if (ret)
- goto cleanup_bios;
-
intel_psr_dc5_dc6_wa_init(display);
/* FIXME: completely on the wrong abstraction layer */
ret = intel_power_domains_init(display);
if (ret < 0)
- goto cleanup_vga;
+ goto cleanup_bios;
intel_pmdemand_init_early(display);
@@ -229,7 +225,7 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
if (!display->hotplug.dp_wq) {
ret = -ENOMEM;
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_pw_domain_dmc;
}
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
@@ -245,13 +241,13 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
goto cleanup_wq_modeset;
}
- display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
+ display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI | WQ_PERCPU, 0);
if (!display->wq.cleanup) {
ret = -ENOMEM;
goto cleanup_wq_flip;
}
- display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
+ display->wq.unordered = alloc_workqueue("display_unordered", WQ_PERCPU, 0);
if (!display->wq.unordered) {
ret = -ENOMEM;
goto cleanup_wq_cleanup;
@@ -301,11 +297,9 @@ cleanup_wq_modeset:
destroy_workqueue(display->wq.modeset);
cleanup_wq_dp:
destroy_workqueue(display->hotplug.dp_wq);
-cleanup_vga_client_pw_domain_dmc:
+cleanup_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
-cleanup_vga:
- intel_vga_unregister(display);
cleanup_bios:
intel_bios_driver_remove(display);
@@ -554,6 +548,8 @@ void intel_display_driver_register(struct intel_display *display)
if (!HAS_DISPLAY(display))
return;
+ intel_vga_register(display);
+
/* Must be done after probing outputs */
intel_opregion_register(display);
intel_acpi_video_register(display);
@@ -646,8 +642,6 @@ void intel_display_driver_remove_nogem(struct intel_display *display)
intel_power_domains_driver_remove(display);
- intel_vga_unregister(display);
-
intel_bios_driver_remove(display);
}
@@ -675,6 +669,8 @@ void intel_display_driver_unregister(struct intel_display *display)
acpi_video_unregister();
intel_opregion_unregister(display);
+
+ intel_vga_unregister(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 6e7e4654eb79..70c1bba7c0a8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -5,8 +5,8 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
-#include "i915_reg.h"
#include "icl_dsi_regs.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -1619,7 +1619,7 @@ static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
*/
if (display->irq.vblank_enabled++ == 0)
intel_de_write(display, SCPD0,
- _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ REG_MASKED_FIELD_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
@@ -1628,7 +1628,7 @@ static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
if (--display->irq.vblank_enabled == 0)
intel_de_write(display, SCPD0,
- _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ REG_MASKED_FIELD_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
@@ -2472,6 +2472,7 @@ void intel_display_irq_init(struct intel_display *display)
struct intel_display_irq_snapshot {
u32 derrmr;
+ u32 err_int;
};
struct intel_display_irq_snapshot *
@@ -2486,6 +2487,9 @@ intel_display_irq_snapshot_capture(struct intel_display *display)
if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display))
snapshot->derrmr = intel_de_read(display, DERRMR);
+ if (DISPLAY_VER(display) == 7)
+ snapshot->err_int = intel_de_read(display, GEN7_ERR_INT);
+
return snapshot;
}
@@ -2496,4 +2500,5 @@ void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *s
return;
drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr);
+ drm_printf(p, "ERR_INT: 0x%08x\n", snapshot->err_int);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
index cb3c9c665c44..453f7b720815 100644
--- a/drivers/gpu/drm/i915/display/intel_display_limits.h
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -138,6 +138,32 @@ enum hpd_pin {
HPD_NUM_PINS
};
+enum aux_ch {
+ AUX_CH_NONE = -1,
+
+ AUX_CH_A,
+ AUX_CH_B,
+ AUX_CH_C,
+ AUX_CH_D,
+ AUX_CH_E, /* ICL+ */
+ AUX_CH_F,
+ AUX_CH_G,
+ AUX_CH_H,
+ AUX_CH_I,
+
+ /* tgl+ */
+ AUX_CH_USBC1 = AUX_CH_D,
+ AUX_CH_USBC2,
+ AUX_CH_USBC3,
+ AUX_CH_USBC4,
+ AUX_CH_USBC5,
+ AUX_CH_USBC6,
+
+ /* XE_LPD repositions D/E offsets and bitfields */
+ AUX_CH_D_XELPD = AUX_CH_USBC5,
+ AUX_CH_E_XELPD,
+};
+
enum intel_color_block {
INTEL_PLANE_CB_PRE_CSC_LUT,
INTEL_PLANE_CB_CSC,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index d27397f43863..ec96b141c74c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -7,8 +7,8 @@
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
@@ -21,12 +21,12 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_parent.h"
#include "intel_pch_refclk.h"
-#include "intel_pcode.h"
#include "intel_pmdemand.h"
#include "intel_pps_regs.h"
#include "intel_snps_phy.h"
@@ -646,7 +646,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
power.domains);
drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
- drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
+ drm_WARN_ON(display->drm, !queue_delayed_work(system_dfl_wq,
&power_domains->async_put_work,
msecs_to_jiffies(delay_ms)));
}
@@ -1260,7 +1260,7 @@ static u32 hsw_read_dcomp(struct intel_display *display)
static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
if (display->platform.haswell) {
- if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
+ if (intel_parent_pcode_write(display, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
intel_de_write(display, D_COMP_BDW, val);
@@ -1622,8 +1622,7 @@ static void tgl_bw_buddy_init(struct intel_display *display)
if (display->platform.dgfx && !display->platform.dg1)
return;
- if (display->platform.alderlake_s ||
- (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_1409767108))
/* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
@@ -1646,7 +1645,7 @@ static void tgl_bw_buddy_init(struct intel_display *display)
table[config].page_mask);
/* Wa_22010178259:tgl,dg1,rkl,adl-s */
- if (DISPLAY_VER(display) == 12)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22010178259))
intel_de_rmw(display, BW_BUDDY_CTL(i),
BW_BUDDY_TLB_REQ_TIMER_MASK,
BW_BUDDY_TLB_REQ_TIMER(0x8));
@@ -1663,8 +1662,7 @@ static void icl_display_core_init(struct intel_display *display,
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
- INTEL_PCH_TYPE(display) < PCH_DG1)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011294188))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
@@ -1718,17 +1716,17 @@ static void icl_display_core_init(struct intel_display *display,
intel_dmc_load_program(display);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
- if (IS_DISPLAY_VERx100(display, 1200, 1300))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011508470))
intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
- if (DISPLAY_VER(display) == 13)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011503030))
intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
/* Wa_15013987218 */
- if (DISPLAY_VER(display) == 20) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_15013987218)) {
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
@@ -2267,8 +2265,9 @@ void intel_display_power_suspend_late(struct intel_display *display, bool s2idle
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
- intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010685332))
+ intel_de_rmw(display, SOUTH_CHICKEN1,
+ SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct intel_display *display)
@@ -2282,7 +2281,7 @@ void intel_display_power_resume_early(struct intel_display *display)
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010685332))
intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
intel_power_domains_resume(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index c559ff000e67..65204d68a759 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -112,7 +112,6 @@ static const struct i915_power_well_desc hsw_power_wells_main[] = {
.id = HSW_DISP_PW_GLOBAL),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
},
};
@@ -146,7 +145,6 @@ static const struct i915_power_well_desc bdw_power_wells_main[] = {
.id = HSW_DISP_PW_GLOBAL),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
},
};
@@ -390,7 +388,6 @@ static const struct i915_power_well_desc skl_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.has_fuses = true,
}, {
@@ -469,7 +466,6 @@ static const struct i915_power_well_desc bxt_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.has_fuses = true,
}, {
@@ -572,7 +568,6 @@ static const struct i915_power_well_desc glk_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.has_fuses = true,
}, {
@@ -748,7 +743,6 @@ static const struct i915_power_well_desc icl_power_wells_main[] = {
.id = ICL_DISP_PW_3),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B),
.has_fuses = true,
}, {
@@ -914,7 +908,6 @@ static const struct i915_power_well_desc tgl_power_wells_main[] = {
.id = ICL_DISP_PW_3),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B),
.has_fuses = true,
}, {
@@ -1071,7 +1064,6 @@ static const struct i915_power_well_desc rkl_power_wells_main[] = {
),
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_B),
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1166,7 +1158,6 @@ static const struct i915_power_well_desc dg1_power_wells_main[] = {
),
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_B),
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1325,7 +1316,6 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1482,7 +1472,6 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1649,7 +1638,6 @@ static const struct i915_power_well_desc xe3lpd_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1722,7 +1710,6 @@ static const struct i915_power_well_desc wcl_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index db185a859133..f855f0f88694 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -6,8 +6,8 @@
#include <linux/iopoll.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
@@ -18,6 +18,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dmc.h"
@@ -27,7 +28,6 @@
#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_parent.h"
-#include "intel_pcode.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_tc.h"
@@ -195,6 +195,48 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
return power_well->count;
}
+static u32 dss_pipe_gating_bits(u8 irq_pipe_mask)
+{
+ u32 bits = 0;
+
+ if (irq_pipe_mask & BIT(PIPE_A))
+ bits |= DSS_PIPE_A_GATING_DISABLED;
+ if (irq_pipe_mask & BIT(PIPE_B))
+ bits |= DSS_PIPE_B_GATING_DISABLED;
+ if (irq_pipe_mask & BIT(PIPE_C))
+ bits |= DSS_PIPE_C_GATING_DISABLED;
+ if (irq_pipe_mask & BIT(PIPE_D))
+ bits |= DSS_PIPE_D_GATING_DISABLED;
+
+ return bits;
+}
+
+static void dss_pipe_gating_enable_disable(struct intel_display *display,
+ u8 irq_pipe_mask,
+ bool disable)
+{
+ u32 bits = dss_pipe_gating_bits(irq_pipe_mask);
+ u32 clear, set;
+
+ if (!bits)
+ return;
+
+ /*
+ * Single intel_de_rmw() for both enable/disable:
+ * - disable == true, set bits (disable clock gating)
+ * - disable == false, clear bits (re-enable clock gating)
+ */
+ set = disable ? bits : 0;
+ clear = disable ? 0 : bits;
+
+ intel_de_rmw(display, CLKGATE_DIS_DSSDSC, clear, set);
+
+ drm_dbg_kms(display->drm,
+ "DSS clock gating %sd for pipe_mask=0x%x (CLKGATE_DIS_DSSDSC=0x%08x)\n",
+ str_enable_disable(!disable), irq_pipe_mask,
+ intel_de_read(display, CLKGATE_DIS_DSSDSC));
+}
+
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
@@ -202,20 +244,25 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
* requesting it to be enabled.
*/
static void hsw_power_well_post_enable(struct intel_display *display,
- u8 irq_pipe_mask, bool has_vga)
+ u8 irq_pipe_mask)
{
- if (has_vga)
- intel_vga_reset_io_mem(display);
-
- if (irq_pipe_mask)
+ if (irq_pipe_mask) {
gen8_irq_power_well_post_enable(display, irq_pipe_mask);
+
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22021048059))
+ dss_pipe_gating_enable_disable(display, irq_pipe_mask, false);
+ }
}
static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
- if (irq_pipe_mask)
+ if (irq_pipe_mask) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22021048059))
+ dss_pipe_gating_enable_disable(display, irq_pipe_mask, true);
+
gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
+ }
}
#define ICL_AUX_PW_TO_PHY(pw_idx) \
@@ -418,8 +465,7 @@ static void hsw_power_well_enable(struct intel_display *display,
}
hsw_power_well_post_enable(display,
- power_well->desc->irq_pipe_mask,
- power_well->desc->has_vga);
+ power_well->desc->irq_pipe_mask);
}
static void hsw_power_well_disable(struct intel_display *display,
@@ -522,7 +568,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
int ret, tries = 0;
while (1) {
- ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0);
+ ret = intel_parent_pcode_write(display, ICL_PCODE_EXIT_TCCOLD, 0);
if (ret != -EAGAIN || ++tries == 3)
break;
msleep(1);
@@ -806,7 +852,7 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
power_domains->dc_state, val & mask);
enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
- dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
+ dc6_was_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6;
if (!dc6_was_enabled && enable_dc6)
intel_dmc_update_dc6_allowed_count(display, true);
@@ -1230,7 +1276,7 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
* Disable trickle feed and enable pnd deadline calculation
*/
intel_de_write(display, MI_ARB_VLV,
- MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE_VLV);
intel_de_write(display, CBR1_VLV, 0);
drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0);
@@ -1795,7 +1841,7 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
- ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ ret = intel_parent_pcode_read(display, TGL_PCODE_TCCOLD, &low_val, &high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index ec8e508d0593..8f5524da2d06 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -103,8 +103,6 @@ struct i915_power_well_desc {
* the well enabled.
*/
u16 fixed_enable_delay:1;
- /* The pw is backing the VGA functionality */
- u16 has_vga:1;
u16 has_fuses:1;
/*
* The pw is for an ICL+ TypeC PHY port in
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
index 9e0d853f4b61..4746e9ebd920 100644
--- a/drivers/gpu/drm/i915/display/intel_display_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -6,6 +6,9 @@
#include "intel_display_reg_defs.h"
+#define GU_CNTL_PROTECTED _MMIO(0x10100C)
+#define DEPRESENT REG_BIT(9)
+
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
@@ -79,6 +82,29 @@
#define DERRMR_PIPEC_VBLANK (1 << 21)
#define DERRMR_PIPEC_HBLANK (1 << 22)
+#define GEN7_ERR_INT _MMIO(0x44040)
+#define ERR_INT_POISON (1 << 31)
+#define ERR_INT_INVALID_GTT_PTE (1 << 29)
+#define ERR_INT_INVALID_PTE_DATA (1 << 28)
+#define ERR_INT_SPRITE_C_FAULT (1 << 23)
+#define ERR_INT_PRIMARY_C_FAULT (1 << 22)
+#define ERR_INT_CURSOR_C_FAULT (1 << 21)
+#define ERR_INT_SPRITE_B_FAULT (1 << 20)
+#define ERR_INT_PRIMARY_B_FAULT (1 << 19)
+#define ERR_INT_CURSOR_B_FAULT (1 << 18)
+#define ERR_INT_SPRITE_A_FAULT (1 << 17)
+#define ERR_INT_PRIMARY_A_FAULT (1 << 16)
+#define ERR_INT_CURSOR_A_FAULT (1 << 15)
+#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
+#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
+#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
+#define ERR_INT_PIPE_CRC_DONE_B (1 << 5)
+#define ERR_INT_FIFO_UNDERRUN_B (1 << 3)
+#define ERR_INT_PIPE_CRC_DONE_A (1 << 2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3))
+#define ERR_INT_FIFO_UNDERRUN_A (1 << 0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
+
#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
VLV_IER, \
VLV_IIR)
@@ -160,6 +186,47 @@
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+#define DSPCLK_GATE_D _MMIO(0x6200)
+#define VLV_DSPCLK_GATE_D _MMIO(VLV_DISPLAY_BASE + 0x6200)
+# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
+# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
+# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
+# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */
+# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
+# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
+# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
+# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
+# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
+# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */
+# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */
+# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */
+# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */
+# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */
+# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */
+# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */
+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */
+# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */
+# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
+# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10)
+# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8)
+# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */
+# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */
+# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
+# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
+/*
+ * This bit must be set on the 830 to prevent hangs when turning off the
+ * overlay scaler.
+ */
+# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
+# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2)
+# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1)
+# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
+# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
+
/* Additional CHV pll/phy registers */
#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
@@ -281,6 +348,7 @@
#define FW_CSPWRDWNEN (1 << 15)
#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
+#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE_VLV (1 << 2)
#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
#define CDCLK_FREQ_SHIFT 4
@@ -311,6 +379,46 @@
#define OGAMC1 _MMIO(0x30020)
#define OGAMC0 _MMIO(0x30024)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
+#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
+#define _LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
+#define _LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
+#define _LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define LATENCY_REPORTING_REMOVED(pipe) _PICK((pipe), \
+ _LATENCY_REPORTING_REMOVED_PIPE_A, \
+ _LATENCY_REPORTING_REMOVED_PIPE_B, \
+ _LATENCY_REPORTING_REMOVED_PIPE_C, \
+ _LATENCY_REPORTING_REMOVED_PIPE_D)
+#define ICL_DELAY_PMRSP REG_BIT(22)
+#define DISABLE_FLR_SRC REG_BIT(15)
+#define MASK_WAKEMEM REG_BIT(13)
+#define DDI_CLOCK_REG_ACCESS REG_BIT(7)
+
+#define CHICKEN_PAR1_1 _MMIO(0x42080)
+#define IGNORE_KVMR_PIPE_A REG_BIT(23)
+#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
+#define DIS_RAM_BYPASS_PSR2_MAN_TRACK REG_BIT(16)
+#define SKL_DE_COMPRESSED_HASH_MODE REG_BIT(15)
+#define HSW_MASK_VBL_TO_PIPE_IN_SRD REG_BIT(15) /* hsw/bdw */
+#define FORCE_ARB_IDLE_PLANES REG_BIT(14)
+#define SKL_EDP_PSR_FIX_RDWRAP REG_BIT(3)
+#define IGNORE_PSR2_HW_TRACKING REG_BIT(1)
+
+/*
+ * GEN9 clock gating regs
+ */
+#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
+#define DARBF_GATING_DIS REG_BIT(27)
+#define DMG_GATING_DIS REG_BIT(21)
+#define MTL_PIPEDMC_GATING_DIS(pipe) REG_BIT(15 - (pipe))
+#define PWM2_GATING_DIS REG_BIT(14)
+#define PWM1_GATING_DIS REG_BIT(13)
+
+#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
+#define TGL_VRH_GATING_DIS REG_BIT(31)
+#define DPT_GATING_DIS REG_BIT(22)
+
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
#define DG2_DPFC_GATING_DIS REG_BIT(31)
@@ -1003,6 +1111,15 @@
#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
+#define DEISR _MMIO(0x44000)
+#define DEIMR _MMIO(0x44004)
+#define DEIIR _MMIO(0x44008)
+#define DEIER _MMIO(0x4400c)
+
+#define DE_IRQ_REGS I915_IRQ_REGS(DEIMR, \
+ DEIER, \
+ DEIIR)
+
#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
@@ -1333,6 +1450,44 @@
GEN8_DE_PORT_IER, \
GEN8_DE_PORT_IIR)
+/* interrupts */
+#define DE_MASTER_IRQ_CONTROL (1 << 31)
+#define DE_SPRITEB_FLIP_DONE (1 << 29)
+#define DE_SPRITEA_FLIP_DONE (1 << 28)
+#define DE_PLANEB_FLIP_DONE (1 << 27)
+#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
+#define DE_PCU_EVENT (1 << 25)
+#define DE_GTT_FAULT (1 << 24)
+#define DE_POISON (1 << 23)
+#define DE_PERFORM_COUNTER (1 << 22)
+#define DE_PCH_EVENT (1 << 21)
+#define DE_AUX_CHANNEL_A (1 << 20)
+#define DE_DP_A_HOTPLUG (1 << 19)
+#define DE_GSE (1 << 18)
+#define DE_PIPEB_VBLANK (1 << 15)
+#define DE_PIPEB_EVEN_FIELD (1 << 14)
+#define DE_PIPEB_ODD_FIELD (1 << 13)
+#define DE_PIPEB_LINE_COMPARE (1 << 12)
+#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_CRC_DONE (1 << 10)
+#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
+#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe)))
+#define DE_PIPEA_EVEN_FIELD (1 << 6)
+#define DE_PIPEA_ODD_FIELD (1 << 5)
+#define DE_PIPEA_LINE_COMPARE (1 << 4)
+#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_CRC_DONE (1 << 2)
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
+#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
+
+/* Display Internal Timeout Register */
+#define RM_TIMEOUT _MMIO(0x42060)
+#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
+#define MMIO_TIMEOUT_US(us) ((us) << 0)
+
#define GEN8_DE_MISC_ISR _MMIO(0x44460)
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
@@ -1466,6 +1621,29 @@
#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
+#define _CHICKEN_PIPESL_1_A 0x420b0
+#define _CHICKEN_PIPESL_1_B 0x420b4
+#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27)
+#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0)
+#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1)
+#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2)
+#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3)
+#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25)
+#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0)
+#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1)
+#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2)
+#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
+#define HSW_FBCQ_DIS REG_BIT(22)
+#define HSW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(15) /* hsw */
+#define SKL_PSR_MASK_PLANE_FLIP REG_BIT(11) /* skl+ */
+#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0)
+#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
+#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
+#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
+#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
+#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */
+
#define _CHICKEN_TRANS_A 0x420c0
#define _CHICKEN_TRANS_B 0x420c4
#define _CHICKEN_TRANS_C 0x420c8
@@ -1552,6 +1730,11 @@
#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
+#define GMD_ID_DISPLAY _MMIO(0x510a0)
+#define GMD_ID_DISPLAY_ARCH_MASK REG_GENMASK(31, 22)
+#define GMD_ID_DISPLAY_RELEASE_MASK REG_GENMASK(21, 14)
+#define GMD_ID_DISPLAY_STEP REG_GENMASK(5, 0)
+
#define XE2LPD_DE_CAP _MMIO(0x41100)
#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
@@ -1685,6 +1868,13 @@
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
+/* PCH */
+
+#define SDEISR _MMIO(0xc4000)
+#define SDEIMR _MMIO(0xc4004)
+#define SDEIIR _MMIO(0xc4008)
+#define SDEIER _MMIO(0xc400c)
+
#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
SDEIER, \
SDEIIR)
@@ -2021,6 +2211,28 @@
#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
+/* Icelake PPS_DATA and _ECC DIP Registers.
+ * These are available for transcoders B,C and eDP.
+ * Adding the _A so as to reuse the _MMIO_TRANS2
+ * definition, with which it offsets to the right location.
+ */
+
+#define _TRANSA_CHICKEN1 0xf0060
+#define _TRANSB_CHICKEN1 0xf1060
+#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE REG_BIT(10)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE REG_BIT(4)
+
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE REG_BIT(31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED REG_BIT(29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
+#define TRANS_CHICKEN2_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_CHICKEN2_FRAME_START_DELAY_MASK, (x)) /* 0-3 */
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER REG_BIT(26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH REG_BIT(25)
+
#define PCH_DP_B _MMIO(0xe4100)
#define PCH_DP_C _MMIO(0xe4200)
#define PCH_DP_D _MMIO(0xe4300)
@@ -2211,6 +2423,13 @@
#define HSW_PWR_WELL_FORCE_ON (1 << 19)
#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
+/* clock gating DSS DSC disable register */
+#define CLKGATE_DIS_DSSDSC _MMIO(0x46548)
+#define DSS_PIPE_D_GATING_DISABLED REG_BIT(31)
+#define DSS_PIPE_C_GATING_DISABLED REG_BIT(29)
+#define DSS_PIPE_B_GATING_DISABLED REG_BIT(27)
+#define DSS_PIPE_A_GATING_DISABLED REG_BIT(25)
+
/* SKL Fuse Status */
enum skl_power_gate {
SKL_PG0,
@@ -2854,6 +3073,42 @@ enum skl_power_gate {
#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
+#define SOUTH_CHICKEN1 _MMIO(0xc2000)
+#define FDIA_PHASE_SYNC_SHIFT_OVR 19
+#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define INVERT_DDIE_HPD REG_BIT(28)
+#define INVERT_DDID_HPD_MTP REG_BIT(27)
+#define INVERT_TC4_HPD REG_BIT(26)
+#define INVERT_TC3_HPD REG_BIT(25)
+#define INVERT_TC2_HPD REG_BIT(24)
+#define INVERT_TC1_HPD REG_BIT(23)
+#define INVERT_DDID_HPD (1 << 18)
+#define INVERT_DDIC_HPD (1 << 17)
+#define INVERT_DDIB_HPD (1 << 16)
+#define INVERT_DDIA_HPD (1 << 15)
+#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_BC_BIFURCATION_SELECT (1 << 12)
+#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
+#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
+#define SBCLK_RUN_REFCLK_DIS (1 << 7)
+#define ICP_SECOND_PPS_IO_SELECT REG_BIT(2)
+#define SPT_PWM_GRANULARITY (1 << 0)
+#define SOUTH_CHICKEN2 _MMIO(0xc2004)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12)
+#define LPT_PWM_GRANULARITY (1 << 5)
+#define DPLS_EDP_PPS_FIX_DIS (1 << 0)
+
+#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define PCH_DPMGUNIT_CLOCK_GATE_DISABLE (1 << 15)
+#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
+
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -2940,6 +3195,12 @@ enum skl_power_gate {
#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
#define MTL_DPFC_GATING_DIS REG_BIT(6)
+#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
+#define XE3P_ECC_IMPACTING_DE REG_BIT(12)
+#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
+#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
+#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
+
#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
#define MTL_TRCD_MASK REG_GENMASK(31, 24)
@@ -2950,6 +3211,11 @@ enum skl_power_gate {
#define MTL_TRAS_MASK REG_GENMASK(16, 8)
#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
-
+#define FW_BLC _MMIO(0x20d8)
+#define FW_BLC2 _MMIO(0x20dc)
+#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
+#define FW_BLC_SELF_EN_MASK REG_BIT(31)
+#define FW_BLC_SELF_FIFO_MASK REG_BIT(16) /* 945 only */
+#define FW_BLC_SELF_EN REG_BIT(15) /* 945 only */
#endif /* __INTEL_DISPLAY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index b58281edc563..2f8248458826 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -8,8 +8,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
index f650f15ad394..7f423182aa29 100644
--- a/drivers/gpu/drm/i915/display/intel_display_snapshot.c
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -19,7 +19,6 @@ struct intel_display_snapshot {
struct intel_display_device_info info;
struct intel_display_runtime_info runtime_info;
struct intel_display_params params;
- struct intel_overlay_snapshot *overlay;
struct intel_dmc_snapshot *dmc;
struct intel_display_irq_snapshot *irq;
};
@@ -41,7 +40,6 @@ struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_displ
intel_display_params_copy(&snapshot->params);
snapshot->irq = intel_display_irq_snapshot_capture(display);
- snapshot->overlay = intel_overlay_snapshot_capture(display);
snapshot->dmc = intel_dmc_snapshot_capture(display);
return snapshot;
@@ -61,7 +59,6 @@ void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot,
intel_display_params_dump(&snapshot->params, display->drm->driver->name, p);
intel_display_irq_snapshot_print(snapshot->irq, p);
- intel_overlay_snapshot_print(snapshot->overlay, p);
intel_dmc_snapshot_print(snapshot->dmc, p);
}
@@ -73,7 +70,6 @@ void intel_display_snapshot_free(struct intel_display_snapshot *snapshot)
intel_display_params_free(&snapshot->params);
kfree(snapshot->irq);
- kfree(snapshot->overlay);
kfree(snapshot->dmc);
kfree(snapshot);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 6b92f333e18b..e189f8c39ccb 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -145,7 +145,7 @@ struct intel_framebuffer {
struct intel_fb_view remapped_view;
};
- struct i915_address_space *dpt_vm;
+ struct intel_dpt *dpt;
unsigned int min_alignment;
unsigned int vtd_guard;
@@ -351,6 +351,7 @@ struct intel_vbt_panel_data {
bool low_vswing;
bool hobl;
bool dsc_disable;
+ bool pipe_joiner_enable;
} edp;
struct {
@@ -1162,6 +1163,7 @@ struct intel_crtc_state {
} dsi_pll;
int max_link_bpp_x16; /* in 1/16 bpp units */
+ int max_pipe_bpp; /* in 1 bpp units */
int pipe_bpp; /* in 1 bpp units */
int min_hblank;
struct intel_link_m_n dp_m_n;
@@ -1333,10 +1335,13 @@ struct intel_crtc_state {
/* Only used for state computation, not read out from the HW. */
bool compression_enabled_on_link;
bool compression_enable;
- int num_streams;
+ struct intel_dsc_slice_config {
+ int pipes_per_line;
+ int streams_per_pipe;
+ int slices_per_stream;
+ } slice_config;
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
u16 compressed_bpp_x16;
- u8 slice_count;
struct drm_dsc_config config;
} dsc;
@@ -1791,6 +1796,7 @@ struct intel_dp {
int link_rate;
u8 lane_count;
u8 sink_count;
+ bool downstream_port_changed;
bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index 581d943b9bdc..081a4092cd13 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -5,11 +5,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_regs.h"
#include "intel_display_wa.h"
+#include "intel_step.h"
static void gen11_display_wa_apply(struct intel_display *display)
{
@@ -32,9 +32,17 @@ static void adlp_display_wa_apply(struct intel_display *display)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
+static void xe3plpd_display_wa_apply(struct intel_display *display)
+{
+ /* Wa_22021451799 */
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, DMG_GATING_DIS);
+}
+
void intel_display_wa_apply(struct intel_display *display)
{
- if (display->platform.alderlake_p)
+ if (DISPLAY_VER(display) == 35)
+ xe3plpd_display_wa_apply(display);
+ else if (display->platform.alderlake_p)
adlp_display_wa_apply(display);
else if (DISPLAY_VER(display) == 12)
xe_d_display_wa_apply(display);
@@ -62,22 +70,89 @@ static bool intel_display_needs_wa_16025573575(struct intel_display *display)
bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name)
{
switch (wa) {
+ case INTEL_DISPLAY_WA_1409120013:
+ return IS_DISPLAY_VER(display, 11, 12);
+ case INTEL_DISPLAY_WA_1409767108:
+ return (display->platform.alderlake_s ||
+ (display->platform.rocketlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)));
case INTEL_DISPLAY_WA_13012396614:
- return DISPLAY_VERx100(display) == 3000;
+ return DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 3500;
+ case INTEL_DISPLAY_WA_14010477008:
+ return display->platform.dg1 || display->platform.rocketlake ||
+ (display->platform.tigerlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_D0));
+ case INTEL_DISPLAY_WA_14010480278:
+ return (IS_DISPLAY_VER(display, 10, 12));
+ case INTEL_DISPLAY_WA_14010547955:
+ return display->platform.dg2;
+ case INTEL_DISPLAY_WA_14010685332:
+ return INTEL_PCH_TYPE(display) >= PCH_CNP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1;
+ case INTEL_DISPLAY_WA_14011294188:
+ return INTEL_PCH_TYPE(display) >= PCH_TGP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1;
+ case INTEL_DISPLAY_WA_14011503030:
case INTEL_DISPLAY_WA_14011503117:
+ case INTEL_DISPLAY_WA_22012358565:
return DISPLAY_VER(display) == 13;
+ case INTEL_DISPLAY_WA_14011508470:
+ return (IS_DISPLAY_VERx100(display, 1200, 1300));
+ case INTEL_DISPLAY_WA_14011765242:
+ return display->platform.alderlake_s &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_A2);
+ case INTEL_DISPLAY_WA_14014143976:
+ return IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER);
+ case INTEL_DISPLAY_WA_14016740474:
+ return IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0);
+ case INTEL_DISPLAY_WA_14020863754:
+ return DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 2000 ||
+ DISPLAY_VERx100(display) == 1401;
case INTEL_DISPLAY_WA_14025769978:
return DISPLAY_VER(display) == 35;
+ case INTEL_DISPLAY_WA_15013987218:
+ return DISPLAY_VER(display) == 20;
case INTEL_DISPLAY_WA_15018326506:
return display->platform.battlemage;
+ case INTEL_DISPLAY_WA_16011303918:
+ case INTEL_DISPLAY_WA_22011320316:
+ return display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0);
+ case INTEL_DISPLAY_WA_16011181250:
+ return display->platform.rocketlake || display->platform.alderlake_s ||
+ display->platform.dg2;
+ case INTEL_DISPLAY_WA_16011342517:
+ return display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_D0);
+ case INTEL_DISPLAY_WA_16011863758:
+ return DISPLAY_VER(display) >= 11;
case INTEL_DISPLAY_WA_16023588340:
return intel_display_needs_wa_16023588340(display);
case INTEL_DISPLAY_WA_16025573575:
return intel_display_needs_wa_16025573575(display);
+ case INTEL_DISPLAY_WA_16025596647:
+ return DISPLAY_VER(display) == 20 &&
+ IS_DISPLAY_VERx100_STEP(display, 3000,
+ STEP_A0, STEP_B0);
+ case INTEL_DISPLAY_WA_18034343758:
+ return DISPLAY_VER(display) == 20 ||
+ (display->platform.pantherlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0));
+ case INTEL_DISPLAY_WA_22010178259:
+ return DISPLAY_VER(display) == 12;
+ case INTEL_DISPLAY_WA_22010947358:
+ return display->platform.alderlake_p;
+ case INTEL_DISPLAY_WA_22012278275:
+ return display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_E0);
case INTEL_DISPLAY_WA_22014263786:
return IS_DISPLAY_VERx100(display, 1100, 1400);
+ case INTEL_DISPLAY_WA_22021048059:
+ return IS_DISPLAY_VER(display, 14, 35);
default:
- drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name);
+ drm_WARN(display->drm, 1, "Missing Wa: %s\n", name);
break;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index 40f989f19df1..15fec843f15e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -27,18 +27,44 @@ bool intel_display_needs_wa_16023588340(struct intel_display *display);
* number.
*/
enum intel_display_wa {
+ INTEL_DISPLAY_WA_1409120013,
+ INTEL_DISPLAY_WA_1409767108,
INTEL_DISPLAY_WA_13012396614,
+ INTEL_DISPLAY_WA_14010477008,
+ INTEL_DISPLAY_WA_14010480278,
+ INTEL_DISPLAY_WA_14010547955,
+ INTEL_DISPLAY_WA_14010685332,
+ INTEL_DISPLAY_WA_14011294188,
+ INTEL_DISPLAY_WA_14011503030,
INTEL_DISPLAY_WA_14011503117,
+ INTEL_DISPLAY_WA_14011508470,
+ INTEL_DISPLAY_WA_14011765242,
+ INTEL_DISPLAY_WA_14014143976,
+ INTEL_DISPLAY_WA_14016740474,
+ INTEL_DISPLAY_WA_14020863754,
INTEL_DISPLAY_WA_14025769978,
+ INTEL_DISPLAY_WA_15013987218,
INTEL_DISPLAY_WA_15018326506,
+ INTEL_DISPLAY_WA_16011181250,
+ INTEL_DISPLAY_WA_16011303918,
+ INTEL_DISPLAY_WA_16011342517,
+ INTEL_DISPLAY_WA_16011863758,
INTEL_DISPLAY_WA_16023588340,
INTEL_DISPLAY_WA_16025573575,
+ INTEL_DISPLAY_WA_16025596647,
+ INTEL_DISPLAY_WA_18034343758,
+ INTEL_DISPLAY_WA_22010178259,
+ INTEL_DISPLAY_WA_22010947358,
+ INTEL_DISPLAY_WA_22011320316,
+ INTEL_DISPLAY_WA_22012278275,
+ INTEL_DISPLAY_WA_22012358565,
INTEL_DISPLAY_WA_22014263786,
+ INTEL_DISPLAY_WA_22021048059,
};
bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name);
#define intel_display_wa(__display, __wa) \
- __intel_display_wa((__display), INTEL_DISPLAY_WA_##__wa, __stringify(__wa))
+ __intel_display_wa((__display), __wa, __stringify(__wa))
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 1006b060c3f3..90ba932d940a 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -29,7 +29,6 @@
#include <drm/drm_file.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
@@ -1599,8 +1598,7 @@ static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *
return false;
mutex_lock(&power_domains->lock);
- dc6_enabled = intel_de_read(display, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC6;
+ dc6_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6;
if (dc6_enabled)
intel_dmc_update_dc6_allowed_count(display, false);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 559cf3bb23fd..4955bd8b11d7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -107,20 +107,6 @@
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
-/*
- * With Single pipe configuration, HW is capable of supporting maximum of:
- * 2 slices per line for ICL, BMG
- * 4 slices per line for other platforms.
- * For now consider a max of 2 slices per line, which works for all platforms.
- * With this we can have max of 4 DSC Slices per pipe.
- *
- * For higher resolutions where 12 slice support is required with
- * ultrajoiner, only then each pipe can support 3 slices.
- *
- * #TODO Split this better to use 4 slices/dsc engine where supported.
- */
-static const u8 valid_dsc_slicecount[] = {1, 2, 3, 4};
-
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -508,11 +494,16 @@ bool intel_dp_has_joiner(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
/* eDP MSO is not compatible with joiner */
if (intel_dp->mso_link_count)
return false;
+ if (intel_dp_is_edp(intel_dp) &&
+ !connector->panel.vbt.edp.pipe_joiner_enable)
+ return false;
+
return DISPLAY_VER(display) >= 12 ||
(DISPLAY_VER(display) == 11 &&
encoder->port != PORT_A);
@@ -959,19 +950,25 @@ u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
return max_bpp;
}
-u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
- int mode_clock, int mode_hdisplay,
- int num_joined_pipes)
+static int intel_dp_dsc_min_slice_count(const struct intel_connector *connector,
+ int mode_clock, int mode_hdisplay)
{
struct intel_display *display = to_intel_display(connector);
- u32 sink_slice_count_mask =
- drm_dp_dsc_sink_slice_count_mask(connector->dp.dsc_dpcd, false);
- u8 min_slice_count, i;
+ bool is_edp =
+ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP;
+ int min_slice_count;
int max_slice_width;
int tp_rgb_yuv444;
int tp_yuv422_420;
/*
+ * TODO: allow using less than the maximum number of slices
+ * supported by the eDP sink, to allow using fewer DSC engines.
+ */
+ if (is_edp)
+ return drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, true);
+
+ /*
* TODO: Use the throughput value specific to the actual RGB/YUV
* format of the output.
* The RGB/YUV444 throughput value should be always either equal
@@ -1011,7 +1008,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
* slice and VDSC engine, whenever we approach close enough to max CDCLK
*/
if (mode_clock >= ((display->cdclk.max_cdclk_freq * 85) / 100))
- min_slice_count = max_t(u8, min_slice_count, 2);
+ min_slice_count = max(min_slice_count, 2);
max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
@@ -1021,39 +1018,64 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
return 0;
}
/* Also take into account max slice width */
- min_slice_count = max_t(u8, min_slice_count,
- DIV_ROUND_UP(mode_hdisplay,
- max_slice_width));
+ min_slice_count = max(min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay, max_slice_width));
- /* Find the closest match to the valid slice count values */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
- u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes;
+ return min_slice_count;
+}
- /*
- * 3 DSC Slices per pipe need 3 DSC engines, which is supported only
- * with Ultrajoiner only for some platforms.
- */
- if (valid_dsc_slicecount[i] == 3 &&
- (!HAS_DSC_3ENGINES(display) || num_joined_pipes != 4))
+static bool
+intel_dp_dsc_get_slice_config(const struct intel_connector *connector,
+ int mode_clock, int mode_hdisplay,
+ int num_joined_pipes,
+ struct intel_dsc_slice_config *config_ret)
+{
+ struct intel_display *display = to_intel_display(connector);
+ int min_slice_count =
+ intel_dp_dsc_min_slice_count(connector, mode_clock, mode_hdisplay);
+ bool is_edp =
+ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP;
+ u32 sink_slice_count_mask =
+ drm_dp_dsc_sink_slice_count_mask(connector->dp.dsc_dpcd, is_edp);
+ int slices_per_pipe;
+
+ /*
+ * Find the closest match to the valid slice count values
+ *
+ * Max HW DSC-per-pipe x slice-per-DSC (= slice-per-pipe) capability:
+ * ICL: 2x2
+ * BMG: 2x2, or for ultrajoined 4 pipes: 3x1
+ * TGL+: 2x4 (TODO: Add support for this)
+ *
+ * TODO: Explore if it's worth increasing the number of slices (from 1
+ * to 2 or 3), so that multiple VDSC engines can be used, thus
+ * reducing the minimum CDCLK requirement, which in turn is determined
+ * by the 1 pixel per clock VDSC engine throughput in
+ * intel_vdsc_min_cdclk().
+ */
+ for (slices_per_pipe = 1; slices_per_pipe <= 4; slices_per_pipe++) {
+ struct intel_dsc_slice_config config;
+ int slices_per_line;
+
+ if (!intel_dsc_get_slice_config(display,
+ num_joined_pipes, slices_per_pipe,
+ &config))
continue;
- if (!(drm_dp_dsc_slice_count_to_mask(test_slice_count) &
+ slices_per_line = intel_dsc_line_slice_count(&config);
+
+ if (!(drm_dp_dsc_slice_count_to_mask(slices_per_line) &
sink_slice_count_mask))
continue;
- /*
- * Bigjoiner needs small joiner to be enabled.
- * So there should be at least 2 dsc slices per pipe,
- * whenever bigjoiner is enabled.
- */
- if (num_joined_pipes > 1 && valid_dsc_slicecount[i] < 2)
+ if (mode_hdisplay % slices_per_line)
continue;
- if (mode_hdisplay % test_slice_count)
- continue;
+ if (min_slice_count <= slices_per_line) {
+ *config_ret = config;
- if (min_slice_count <= test_slice_count)
- return test_slice_count;
+ return true;
+ }
}
/* Print slice count 1,2,4,..24 if bit#0,1,3,..23 is set in the mask. */
@@ -1064,7 +1086,21 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
min_slice_count,
(int)BITS_PER_TYPE(sink_slice_count_mask), &sink_slice_count_mask);
- return 0;
+ return false;
+}
+
+u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
+ int mode_clock, int mode_hdisplay,
+ int num_joined_pipes)
+{
+ struct intel_dsc_slice_config config;
+
+ if (!intel_dp_dsc_get_slice_config(connector,
+ mode_clock, mode_hdisplay,
+ num_joined_pipes, &config))
+ return 0;
+
+ return intel_dsc_line_slice_count(&config);
}
static bool source_can_output(struct intel_dp *intel_dp,
@@ -1335,44 +1371,9 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_OK;
}
-static
-bool intel_dp_needs_joiner(struct intel_dp *intel_dp,
- struct intel_connector *connector,
- int hdisplay, int clock,
- int num_joined_pipes)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- int hdisplay_limit;
-
- if (!intel_dp_has_joiner(intel_dp))
- return false;
-
- num_joined_pipes /= 2;
-
- hdisplay_limit = DISPLAY_VER(display) >= 30 ? 6144 : 5120;
-
- return clock > num_joined_pipes * display->cdclk.max_dotclk_freq ||
- hdisplay > num_joined_pipes * hdisplay_limit;
-}
-
-int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
- struct intel_connector *connector,
- int hdisplay, int clock)
+int intel_dp_max_hdisplay_per_pipe(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(intel_dp);
-
- if (connector->force_joined_pipes)
- return connector->force_joined_pipes;
-
- if (HAS_ULTRAJOINER(display) &&
- intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 4))
- return 4;
-
- if ((HAS_BIGJOINER(display) || HAS_UNCOMPRESSED_JOINER(display)) &&
- intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 2))
- return 2;
-
- return 1;
+ return DISPLAY_VER(display) >= 30 ? 6144 : 5120;
}
bool intel_dp_has_dsc(const struct intel_connector *connector)
@@ -1395,6 +1396,51 @@ bool intel_dp_has_dsc(const struct intel_connector *connector)
return true;
}
+static
+bool intel_dp_can_join(struct intel_dp *intel_dp,
+ int num_joined_pipes)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (num_joined_pipes > 1 && !intel_dp_has_joiner(intel_dp))
+ return false;
+
+ switch (num_joined_pipes) {
+ case 1:
+ return true;
+ case 2:
+ return HAS_BIGJOINER(display) ||
+ HAS_UNCOMPRESSED_JOINER(display);
+ case 4:
+ return HAS_ULTRAJOINER(display);
+ default:
+ return false;
+ }
+}
+
+bool intel_dp_dotclk_valid(struct intel_display *display,
+ int target_clock,
+ int htotal,
+ int dsc_slice_count,
+ int num_joined_pipes)
+{
+ int max_dotclk = display->cdclk.max_dotclk_freq;
+ int effective_dotclk_limit;
+
+ effective_dotclk_limit = max_dotclk * num_joined_pipes;
+
+ if (dsc_slice_count)
+ target_clock = intel_dsc_get_pixel_rate_with_dsc_bubbles(display,
+ target_clock,
+ htotal,
+ dsc_slice_count);
+ else
+ effective_dotclk_limit =
+ intel_max_uncompressed_dotclock(display) * num_joined_pipes;
+
+ return target_clock <= effective_dotclk_limit;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *mode)
@@ -1406,9 +1452,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = display->cdclk.max_dotclk_freq;
u16 dsc_max_compressed_bpp = 0;
- u8 dsc_slice_count = 0;
enum drm_mode_status status;
bool dsc = false;
int num_joined_pipes;
@@ -1424,6 +1468,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
+ if (intel_dp_hdisplay_bad(display, mode->hdisplay))
+ return MODE_H_ILLEGAL;
+
fixed_mode = intel_panel_fixed_mode(connector, mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
status = intel_panel_mode_valid(connector, mode);
@@ -1433,23 +1480,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock = fixed_mode->clock;
}
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
- mode->hdisplay, target_clock);
- max_dotclk *= num_joined_pipes;
-
sink_format = intel_dp_sink_format(connector, mode);
output_format = intel_dp_output_format(connector, sink_format);
- status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes);
- if (status != MODE_OK)
- return status;
-
- if (target_clock > max_dotclk)
- return MODE_CLOCK_HIGH;
-
- if (intel_dp_hdisplay_bad(display, mode->hdisplay))
- return MODE_H_ILLEGAL;
-
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
@@ -1460,52 +1493,92 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock, mode->hdisplay,
link_bpp_x16, 0);
- if (intel_dp_has_dsc(connector)) {
- int pipe_bpp;
+ /*
+ * We cannot determine the required pipe‑join count before knowing whether
+ * DSC is needed, nor can we determine DSC need without knowing the pipe
+ * count.
+ * Because of this dependency cycle, the only correct approach is to iterate
+ * over candidate pipe counts and evaluate each combination.
+ */
+ status = MODE_CLOCK_HIGH;
+ for_each_joiner_candidate(connector, mode, num_joined_pipes) {
+ int dsc_slice_count = 0;
- /*
- * TBD pass the connector BPC,
- * for now U8_MAX so that max BPC on that platform would be picked
- */
- pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
+ status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes);
+ if (status != MODE_OK)
+ continue;
- /*
- * Output bpp is stored in 6.4 format so right shift by 4 to get the
- * integer value since we support only integer values of bpp.
- */
- if (intel_dp_is_edp(intel_dp)) {
- dsc_max_compressed_bpp =
- drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4;
- dsc_slice_count =
- drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
- true);
- dsc = dsc_max_compressed_bpp && dsc_slice_count;
- } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
- unsigned long bw_overhead_flags = 0;
-
- if (!drm_dp_is_uhbr_rate(max_link_clock))
- bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
-
- dsc = intel_dp_mode_valid_with_dsc(connector,
- max_link_clock, max_lanes,
- target_clock, mode->hdisplay,
- num_joined_pipes,
- output_format, pipe_bpp,
- bw_overhead_flags);
+ if (intel_dp_has_dsc(connector)) {
+ int pipe_bpp;
+
+ dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
+ target_clock,
+ mode->hdisplay,
+ num_joined_pipes);
+
+ /*
+ * TBD pass the connector BPC,
+ * for now U8_MAX so that max BPC on that platform would be picked
+ */
+ pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
+
+ /*
+ * Output bpp is stored in 6.4 format so right shift by 4 to get the
+ * integer value since we support only integer values of bpp.
+ */
+ if (intel_dp_is_edp(intel_dp)) {
+ dsc_max_compressed_bpp =
+ drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4;
+
+ dsc = dsc_max_compressed_bpp && dsc_slice_count;
+ } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
+ unsigned long bw_overhead_flags = 0;
+
+ if (!drm_dp_is_uhbr_rate(max_link_clock))
+ bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
+
+ dsc = intel_dp_mode_valid_with_dsc(connector,
+ max_link_clock, max_lanes,
+ target_clock, mode->hdisplay,
+ num_joined_pipes,
+ output_format, pipe_bpp,
+ bw_overhead_flags);
+ }
}
- }
- if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc)
- return MODE_CLOCK_HIGH;
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
+ status = MODE_CLOCK_HIGH;
+ continue;
+ }
- if (mode_rate > max_rate && !dsc)
- return MODE_CLOCK_HIGH;
+ if (mode_rate > max_rate && !dsc) {
+ status = MODE_CLOCK_HIGH;
+ continue;
+ }
+
+ status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
+ if (status != MODE_OK)
+ continue;
+
+ if (!dsc)
+ dsc_slice_count = 0;
+
+ if (!intel_dp_dotclk_valid(display,
+ target_clock,
+ mode->htotal,
+ dsc_slice_count,
+ num_joined_pipes)) {
+ status = MODE_CLOCK_HIGH;
+ continue;
+ }
+
+ break;
+ }
- status = intel_dp_mode_valid_downstream(connector, mode, target_clock);
if (status != MODE_OK)
return status;
- return intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
+ return intel_dp_mode_valid_downstream(connector, mode, target_clock);
}
bool intel_dp_source_supports_tps3(struct intel_display *display)
@@ -1696,7 +1769,7 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
struct intel_connector *connector = intel_dp->attached_connector;
int bpp, bpc;
- bpc = crtc_state->pipe_bpp / 3;
+ bpc = crtc_state->max_pipe_bpp / 3;
if (intel_dp->dfp.max_bpc)
bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
@@ -2032,12 +2105,14 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
} else {
unsigned long bw_overhead_flags =
pipe_config->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
+ int line_slice_count =
+ intel_dsc_line_slice_count(&pipe_config->dsc.slice_config);
if (!is_bw_sufficient_for_dsc_config(intel_dp,
link_rate, lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->hdisplay,
- pipe_config->dsc.slice_count,
+ line_slice_count,
dsc_bpp_x16,
bw_overhead_flags))
continue;
@@ -2344,6 +2419,17 @@ bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
return dsc_enabled_on_crtc || intel_dsc_enabled_on_link(crtc_state);
}
+void intel_dp_dsc_reset_config(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->fec_enable = false;
+
+ crtc_state->dsc.compression_enable = false;
+ crtc_state->dsc.compressed_bpp_x16 = 0;
+
+ memset(&crtc_state->dsc.slice_config, 0, sizeof(crtc_state->dsc.slice_config));
+ memset(&crtc_state->dsc.config, 0, sizeof(crtc_state->dsc.config));
+}
+
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -2382,47 +2468,10 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
}
}
- /* Calculate Slice count */
- if (intel_dp_is_edp(intel_dp)) {
- pipe_config->dsc.slice_count =
- drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
- true);
- if (!pipe_config->dsc.slice_count) {
- drm_dbg_kms(display->drm,
- "Unsupported Slice Count %d\n",
- pipe_config->dsc.slice_count);
- return -EINVAL;
- }
- } else {
- u8 dsc_dp_slice_count;
-
- dsc_dp_slice_count =
- intel_dp_dsc_get_slice_count(connector,
- adjusted_mode->crtc_clock,
- adjusted_mode->crtc_hdisplay,
- num_joined_pipes);
- if (!dsc_dp_slice_count) {
- drm_dbg_kms(display->drm,
- "Compressed Slice Count not supported\n");
- return -EINVAL;
- }
-
- pipe_config->dsc.slice_count = dsc_dp_slice_count;
- }
- /*
- * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
- * is greater than the maximum Cdclock and if slice count is even
- * then we need to use 2 VDSC instances.
- * In case of Ultrajoiner along with 12 slices we need to use 3
- * VDSC instances.
- */
- if (pipe_config->joiner_pipes && num_joined_pipes == 4 &&
- pipe_config->dsc.slice_count == 12)
- pipe_config->dsc.num_streams = 3;
- else if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
- pipe_config->dsc.num_streams = 2;
- else
- pipe_config->dsc.num_streams = 1;
+ if (!intel_dp_dsc_get_slice_config(connector, adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay, num_joined_pipes,
+ &pipe_config->dsc.slice_config))
+ return -EINVAL;
ret = intel_dp_dsc_compute_params(connector, pipe_config);
if (ret < 0) {
@@ -2440,7 +2489,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
"Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
- pipe_config->dsc.slice_count);
+ intel_dsc_line_slice_count(&pipe_config->dsc.slice_config));
return 0;
}
@@ -2476,8 +2525,8 @@ dsc_throughput_quirk_max_bpp_x16(const struct intel_connector *connector,
return fxp_q4_from_int(12);
}
-static int compute_min_compressed_bpp_x16(struct intel_connector *connector,
- enum intel_output_format output_format)
+int intel_dp_compute_min_compressed_bpp_x16(struct intel_connector *connector,
+ enum intel_output_format output_format)
{
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int min_bpp_x16;
@@ -2543,7 +2592,8 @@ bool intel_dp_mode_valid_with_dsc(struct intel_connector *connector,
int pipe_bpp, unsigned long bw_overhead_flags)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- int min_bpp_x16 = compute_min_compressed_bpp_x16(connector, output_format);
+ int min_bpp_x16 = intel_dp_compute_min_compressed_bpp_x16(connector,
+ output_format);
int max_bpp_x16 = compute_max_compressed_bpp_x16(connector,
mode_clock, mode_hdisplay,
num_joined_pipes,
@@ -2597,7 +2647,8 @@ intel_dp_compute_config_link_bpp_limits(struct intel_connector *connector,
limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp);
} else {
limits->link.min_bpp_x16 =
- compute_min_compressed_bpp_x16(connector, crtc_state->output_format);
+ intel_dp_compute_min_compressed_bpp_x16(connector,
+ crtc_state->output_format);
max_link_bpp_x16 =
compute_max_compressed_bpp_x16(connector,
@@ -2691,7 +2742,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
* previously. This hack should be removed once we have the
* proper retry logic in place.
*/
- limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
+ limits->pipe.max_bpp = min(crtc_state->max_pipe_bpp, 24);
} else {
limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
respect_downstream_limits);
@@ -2710,9 +2761,39 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
crtc_state)));
}
+ if (limits->pipe.min_bpp <= 0 ||
+ limits->pipe.min_bpp > limits->pipe.max_bpp) {
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Invalid pipe bpp range: %d-%d\n",
+ connector->base.base.id, connector->base.name,
+ limits->pipe.min_bpp, limits->pipe.max_bpp);
+
+ return false;
+ }
+
if (dsc && !intel_dp_dsc_compute_pipe_bpp_limits(connector, limits))
return false;
+ /*
+ * crtc_state->pipe_bpp is the non-DP specific baseline (platform /
+ * EDID) maximum pipe BPP limited by the max-BPC connector property
+ * request. Since by now pipe.max_bpp is <= the above baseline
+ * maximum BPP, the only remaining reason for adjusting pipe.max_bpp
+ * is the max-BPC connector property request. Adjust pipe.max_bpp to
+ * this request within the current valid pipe.min_bpp .. pipe.max_bpp
+ * range.
+ */
+ limits->pipe.max_bpp = clamp(crtc_state->pipe_bpp, limits->pipe.min_bpp,
+ limits->pipe.max_bpp);
+ if (dsc)
+ limits->pipe.max_bpp = align_max_sink_dsc_input_bpp(connector,
+ limits->pipe.max_bpp);
+
+ if (limits->pipe.max_bpp != crtc_state->pipe_bpp)
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Adjusting requested max pipe bpp %d -> %d\n",
+ connector->base.base.id, connector->base.name,
+ crtc_state->pipe_bpp, limits->pipe.max_bpp);
+
if (is_mst || intel_dp->use_max_params) {
/*
* For MST we always configure max link bw - the spec doesn't
@@ -2764,13 +2845,13 @@ bool intel_dp_joiner_needs_dsc(struct intel_display *display,
}
static int
-intel_dp_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state,
- bool respect_downstream_limits)
+intel_dp_compute_link_for_joined_pipes(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
{
struct intel_display *display = to_intel_display(encoder);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
@@ -2778,18 +2859,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
bool dsc_needed, joiner_needs_dsc;
- int num_joined_pipes;
int ret = 0;
- if (pipe_config->fec_enable &&
- !intel_dp_supports_fec(intel_dp, connector, pipe_config))
- return -EINVAL;
-
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
- adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_clock);
- if (num_joined_pipes > 1)
- pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
+ intel_dp_dsc_reset_config(pipe_config);
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
@@ -2813,7 +2885,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
fxp_q4_from_int(pipe_config->pipe_bpp),
fxp_q4_from_int(pipe_config->pipe_bpp),
0, false);
- if (ret)
+
+ if (ret ||
+ !intel_dp_dotclk_valid(display,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_htotal,
+ 0,
+ num_joined_pipes))
dsc_needed = true;
}
@@ -2823,6 +2901,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
}
if (dsc_needed) {
+ int dsc_slice_count;
+
drm_dbg_kms(display->drm,
"Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
@@ -2838,6 +2918,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
conn_state, &limits, 64);
if (ret < 0)
return ret;
+
+ dsc_slice_count = intel_dsc_line_slice_count(&pipe_config->dsc.slice_config);
+
+ if (!intel_dp_dotclk_valid(display,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_htotal,
+ dsc_slice_count,
+ num_joined_pipes))
+ return -EINVAL;
}
drm_dbg_kms(display->drm,
@@ -2854,6 +2943,55 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+static int
+intel_dp_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int num_joined_pipes;
+ int ret = -EINVAL;
+
+ if (crtc_state->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, crtc_state))
+ return -EINVAL;
+
+ for_each_joiner_candidate(connector, adjusted_mode, num_joined_pipes) {
+ /*
+ * NOTE:
+ * The crtc_state->joiner_pipes should have been set at the end
+ * only if all the conditions are met. However that would mean
+ * that num_joined_pipes is passed around to all helpers and
+ * make them use it instead of using crtc_state->joiner_pipes
+ * directly or indirectly (via intel_crtc_num_joined_pipes()).
+ *
+ * For now, setting crtc_state->joiner_pipes to the candidate
+ * value to avoid the above churn and resetting it to 0, in case
+ * no joiner candidate is found to be suitable for the given
+ * configuration.
+ */
+ if (num_joined_pipes > 1)
+ crtc_state->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1,
+ crtc->pipe);
+
+ ret = intel_dp_compute_link_for_joined_pipes(encoder, crtc_state, conn_state,
+ respect_downstream_limits);
+ if (ret == 0 || ret == -EDEADLK)
+ break;
+ }
+
+ if (ret < 0)
+ crtc_state->joiner_pipes = 0;
+
+ return ret;
+}
+
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -4293,20 +4431,24 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
}
-static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
- u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+static int intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
+ u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
- if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, dsc_dpcd,
- DP_DSC_RECEIVER_CAP_SIZE) < 0) {
- drm_err(aux->drm_dev,
- "Failed to read DPCD register 0x%x\n",
- DP_DSC_SUPPORT);
- return;
+ int ret;
+
+ ret = drm_dp_dpcd_read_data(aux, DP_DSC_SUPPORT, dsc_dpcd,
+ DP_DSC_RECEIVER_CAP_SIZE);
+ if (ret) {
+ drm_dbg_kms(aux->drm_dev,
+ "Could not read DSC DPCD register 0x%x Error: %pe\n",
+ DP_DSC_SUPPORT, ERR_PTR(ret));
+ return ret;
}
drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n",
DP_DSC_RECEIVER_CAP_SIZE,
dsc_dpcd);
+ return 0;
}
static void init_dsc_overall_throughput_limits(struct intel_connector *connector, bool is_branch)
@@ -4357,8 +4499,9 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
if (dpcd_rev < DP_DPCD_REV_14)
return;
- intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
- connector->dp.dsc_dpcd);
+ if (intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
+ connector->dp.dsc_dpcd) < 0)
+ return;
if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY,
&connector->dp.fec_capability) < 0) {
@@ -4388,7 +4531,9 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
if (edp_dpcd_rev < DP_EDP_14)
return;
- intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
+ if (intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
+ connector->dp.dsc_dpcd) < 0)
+ return;
if (connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED)
init_dsc_overall_throughput_limits(connector, false);
@@ -4577,6 +4722,7 @@ static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
/* this function is meant to be called only once */
drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
@@ -4616,6 +4762,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
*/
intel_dp_init_source_oui(intel_dp);
+ /* Read the ALPM DPCD caps */
+ ret = drm_dp_dpcd_read_byte(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+ &intel_dp->alpm_dpcd);
+ if (ret < 0)
+ return false;
+
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
@@ -4784,6 +4936,24 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, intel_dp->is_mst);
}
+#define INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST (DP_AUTOMATED_TEST_REQUEST | \
+ DP_CP_IRQ | \
+ DP_SINK_SPECIFIC_IRQ)
+
+#define INTEL_DP_DEVICE_SERVICE_IRQ_MASK_MST (DP_CP_IRQ | \
+ DP_DOWN_REP_MSG_RDY | \
+ DP_UP_REQ_MSG_RDY)
+
+#define INTEL_DP_LINK_SERVICE_IRQ_MASK_SST (RX_CAP_CHANGED | \
+ LINK_STATUS_CHANGED | \
+ HDMI_LINK_STATUS_CHANGED | \
+ CONNECTED_OFF_ENTRY_REQUESTED | \
+ DP_TUNNELING_IRQ)
+
+#define INTEL_DP_LINK_SERVICE_IRQ_MASK_MST (RX_CAP_CHANGED | \
+ LINK_STATUS_CHANGED | \
+ DP_TUNNELING_IRQ)
+
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
{
@@ -4820,6 +4990,79 @@ static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4])
return false;
}
+/* Return %true if reading the ESI vector succeeded, %false otherwise. */
+static bool intel_dp_get_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4])
+{
+ memset(esi, 0, 4);
+
+ /*
+ * TODO: For DP_DPCD_REV >= 0x12 read
+ * DP_SINK_COUNT_ESI and DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0.
+ */
+ if (drm_dp_dpcd_read_data(&intel_dp->aux, DP_SINK_COUNT, esi, 2) != 0)
+ return false;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
+ return true;
+
+ /* TODO: Read DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 as well */
+ if (drm_dp_dpcd_read_byte(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &esi[3]) != 0)
+ return false;
+
+ return true;
+}
+
+/* Return %true if acking the ESI vector IRQ events succeeded, %false otherwise. */
+static bool intel_dp_ack_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4])
+{
+ /*
+ * TODO: For DP_DPCD_REV >= 0x12 write
+ * DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0
+ */
+ if (drm_dp_dpcd_write_byte(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, esi[1]) != 0)
+ return false;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
+ return true;
+
+ /* TODO: Read DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 as well */
+ if (drm_dp_dpcd_write_byte(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, esi[3]) != 0)
+ return false;
+
+ return true;
+}
+
+/*
+ * Return %true if reading the ESI vector and acking the ESI IRQ events succeeded,
+ * %false otherwise.
+ */
+static bool intel_dp_get_and_ack_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4])
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!intel_dp_get_sink_irq_esi_sst(intel_dp, esi))
+ return false;
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] DPRX ESI: %4ph\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ esi);
+
+ esi[1] &= INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST;
+ esi[3] &= INTEL_DP_LINK_SERVICE_IRQ_MASK_SST;
+
+ if (mem_is_zero(&esi[1], 3))
+ return true;
+
+ if (!intel_dp_ack_sink_irq_esi_sst(intel_dp, esi))
+ return false;
+
+ return true;
+}
+
bool
intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -5312,23 +5555,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
}
}
-static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- u8 link_status[DP_LINK_STATUS_SIZE] = {};
- const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
- esi_link_status_size) != esi_link_status_size) {
- drm_err(display->drm,
- "[ENCODER:%d:%s] Failed to read link status\n",
- encoder->base.base.id, encoder->base.name);
- return false;
- }
-
- return intel_dp_link_ok(intel_dp, link_status);
-}
+static bool intel_dp_handle_link_service_irq(struct intel_dp *intel_dp, u8 irq_mask);
/**
* intel_dp_check_mst_status - service any pending MST interrupts, check link status
@@ -5348,53 +5575,51 @@ static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
- bool link_ok = true;
+ bool force_retrain = intel_dp->link.force_retrain;
bool reprobe_needed = false;
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
+ bool new_irqs;
if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
drm_dbg_kms(display->drm,
"failed to get ESI - device may have failed\n");
- link_ok = false;
+ reprobe_needed = true;
break;
}
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp_mst_active_streams(intel_dp) > 0 && link_ok &&
- esi[3] & LINK_STATUS_CHANGED) {
- if (!intel_dp_mst_link_status(intel_dp))
- link_ok = false;
- ack[3] |= LINK_STATUS_CHANGED;
- }
+ ack[3] |= esi[3] & INTEL_DP_LINK_SERVICE_IRQ_MASK_MST;
intel_dp_mst_hpd_irq(intel_dp, esi, ack);
- if (esi[3] & DP_TUNNELING_IRQ) {
- if (drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
- &intel_dp->aux))
- reprobe_needed = true;
- ack[3] |= DP_TUNNELING_IRQ;
- }
+ new_irqs = !mem_is_zero(ack, sizeof(ack));
- if (mem_is_zero(ack, sizeof(ack)))
- break;
+ drm_WARN_ON(display->drm, ack[1] & ~INTEL_DP_DEVICE_SERVICE_IRQ_MASK_MST);
+ drm_WARN_ON(display->drm, ack[3] & ~INTEL_DP_LINK_SERVICE_IRQ_MASK_MST);
- if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
+ if (new_irqs && !intel_dp_ack_sink_irq_esi(intel_dp, ack))
drm_dbg_kms(display->drm, "Failed to ack ESI\n");
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst.mgr);
- }
- if (!link_ok || intel_dp->link.force_retrain)
- intel_encoder_link_check_queue_work(encoder, 0);
+ if (force_retrain) {
+ /* Defer forced retraining to the regular link status check. */
+ ack[3] |= LINK_STATUS_CHANGED;
+ force_retrain = false;
+ }
+
+ if (intel_dp_handle_link_service_irq(intel_dp, ack[3]))
+ reprobe_needed = true;
+
+ if (!new_irqs)
+ break;
+ }
return !reprobe_needed;
}
@@ -5423,6 +5648,30 @@ intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
}
}
+static int
+intel_dp_read_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ int err;
+
+ memset(link_status, 0, DP_LINK_STATUS_SIZE);
+
+ if (intel_dp_mst_active_streams(intel_dp) > 0)
+ err = drm_dp_dpcd_read_data(&intel_dp->aux, DP_LANE0_1_STATUS_ESI,
+ link_status, DP_LINK_STATUS_SIZE - 2);
+ else
+ err = drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
+ link_status);
+
+ if (err)
+ return err;
+
+ if (link_status[DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS] &
+ DP_DOWNSTREAM_PORT_STATUS_CHANGED)
+ WRITE_ONCE(intel_dp->downstream_port_changed, true);
+
+ return 0;
+}
+
static bool
intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
@@ -5445,8 +5694,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
if (intel_dp->link.force_retrain)
return true;
- if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
- link_status) < 0)
+ if (intel_dp_read_link_status(intel_dp, link_status) < 0)
return false;
/*
@@ -5643,55 +5891,57 @@ void intel_dp_check_link_state(struct intel_dp *intel_dp)
intel_encoder_link_check_queue_work(encoder, 0);
}
-static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
+static void intel_dp_handle_device_service_irq(struct intel_dp *intel_dp, u8 irq_mask)
{
struct intel_display *display = to_intel_display(intel_dp);
- u8 val;
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
- return;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
- return;
-
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
+ drm_WARN_ON(display->drm, irq_mask & ~INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST);
- if (val & DP_AUTOMATED_TEST_REQUEST)
+ if (irq_mask & DP_AUTOMATED_TEST_REQUEST)
intel_dp_test_request(intel_dp);
- if (val & DP_CP_IRQ)
+ if (irq_mask & DP_CP_IRQ)
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
- if (val & DP_SINK_SPECIFIC_IRQ)
+ if (irq_mask & DP_SINK_SPECIFIC_IRQ)
drm_dbg_kms(display->drm, "Sink specific irq unhandled\n");
}
-static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+
+/*
+ * Return %true if a full connector reprobe is required after handling a link
+ * service IRQ event.
+ */
+static bool intel_dp_handle_link_service_irq(struct intel_dp *intel_dp, u8 irq_mask)
{
struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool reprobe_needed = false;
- u8 val;
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
- return false;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
- return false;
+ drm_WARN_ON(display->drm, irq_mask & ~(INTEL_DP_LINK_SERVICE_IRQ_MASK_SST |
+ INTEL_DP_LINK_SERVICE_IRQ_MASK_MST));
- if ((val & DP_TUNNELING_IRQ) &&
- drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
- &intel_dp->aux))
+ if (irq_mask & RX_CAP_CHANGED)
reprobe_needed = true;
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
- return reprobe_needed;
+ if (irq_mask & LINK_STATUS_CHANGED)
+ intel_dp_check_link_state(intel_dp);
- if (val & HDMI_LINK_STATUS_CHANGED)
+ if (irq_mask & HDMI_LINK_STATUS_CHANGED)
intel_dp_handle_hdmi_link_status_change(intel_dp);
+ if (irq_mask & CONNECTED_OFF_ENTRY_REQUESTED)
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Allowing connected off request\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+
+ if ((irq_mask & DP_TUNNELING_IRQ) &&
+ drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
+ &intel_dp->aux))
+ reprobe_needed = true;
+
return reprobe_needed;
}
@@ -5711,32 +5961,42 @@ static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
- u8 old_sink_count = intel_dp->sink_count;
bool reprobe_needed = false;
- bool ret;
+ u8 esi[4] = {};
intel_dp_test_reset(intel_dp);
+ if (!intel_dp_get_and_ack_sink_irq_esi_sst(intel_dp, esi))
+ return false;
+
/*
- * Now read the DPCD to see if it's actually running
* If the current value of sink count doesn't match with
- * the value that was stored earlier or dpcd read failed
- * we need to do full detection
+ * the value that was stored earlier we need to do full
+ * detection.
*/
- ret = intel_dp_get_dpcd(intel_dp);
-
- if ((old_sink_count != intel_dp->sink_count) || !ret) {
+ if (intel_dp_has_sink_count(intel_dp) &&
+ DP_GET_SINK_COUNT(esi[0]) != intel_dp->sink_count)
/* No need to proceed if we are going to do full detect */
return false;
- }
- intel_dp_check_device_service_irq(intel_dp);
- reprobe_needed = intel_dp_check_link_service_irq(intel_dp);
+ intel_dp_handle_device_service_irq(intel_dp, esi[1]);
+
+ /*
+ * Force checking the link status for DPCD_REV < 1.2
+ * TODO: let the link status check depend on LINK_STATUS_CHANGED
+ * or intel_dp->link.force_retrain for DPCD_REV >= 1.2
+ */
+ esi[3] |= LINK_STATUS_CHANGED;
+ if (intel_dp_handle_link_service_irq(intel_dp, esi[3]))
+ reprobe_needed = true;
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
- intel_dp_check_link_state(intel_dp);
+ if (READ_ONCE(intel_dp->downstream_port_changed)) {
+ WRITE_ONCE(intel_dp->downstream_port_changed, false);
+ reprobe_needed = true;
+ }
intel_psr_short_pulse(intel_dp);
@@ -5763,6 +6023,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (drm_WARN_ON(display->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
+ WRITE_ONCE(intel_dp->downstream_port_changed, false);
+
intel_lspcon_resume(dig_port);
if (!intel_dp_get_dpcd(intel_dp))
@@ -6189,8 +6451,6 @@ intel_dp_detect(struct drm_connector *_connector,
if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
status = connector_status_connected;
- intel_dp_check_device_service_irq(intel_dp);
-
out_unset_edid:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
@@ -7086,6 +7346,8 @@ int intel_dp_compute_config_late(struct intel_encoder *encoder,
if (ret)
return ret;
+ intel_alpm_lobf_compute_config_late(intel_dp, crtc_state);
+
return 0;
}
@@ -7134,3 +7396,22 @@ int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
return sdp_guardband;
}
+
+bool intel_dp_joiner_candidate_valid(struct intel_connector *connector,
+ int hdisplay,
+ int num_joined_pipes)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ if (!intel_dp_can_join(intel_dp, num_joined_pipes))
+ return false;
+
+ if (hdisplay > num_joined_pipes * intel_dp_max_hdisplay_per_pipe(display))
+ return false;
+
+ if (connector->force_joined_pipes && connector->force_joined_pipes != num_joined_pipes)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 25bfbfd291b0..2849b9ecdc71 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -75,6 +75,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
bool dsc_enabled_on_crtc);
+void intel_dp_dsc_reset_config(struct intel_crtc_state *crtc_state);
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -143,6 +144,8 @@ bool intel_digital_port_connected(struct intel_encoder *encoder);
bool intel_digital_port_connected_locked(struct intel_encoder *encoder);
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 dsc_max_bpc);
+int intel_dp_compute_min_compressed_bpp_x16(struct intel_connector *connector,
+ enum intel_output_format output_format);
bool intel_dp_mode_valid_with_dsc(struct intel_connector *connector,
int link_clock, int lane_count,
int mode_clock, int mode_hdisplay,
@@ -153,10 +156,6 @@ bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
int num_joined_pipes);
-int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
- struct intel_connector *connector,
- int hdisplay, int clock);
-
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
return ~((1 << lane_count) - 1) & 0xf;
@@ -225,5 +224,18 @@ int intel_dp_compute_config_late(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
bool assume_all_enabled);
+int intel_dp_max_hdisplay_per_pipe(struct intel_display *display);
+bool intel_dp_dotclk_valid(struct intel_display *display,
+ int target_clock,
+ int htotal,
+ int dsc_slice_count,
+ int num_joined_pipes);
+bool intel_dp_joiner_candidate_valid(struct intel_connector *connector,
+ int hdisplay,
+ int num_joined_pipes);
+
+#define for_each_joiner_candidate(__connector, __mode, __num_joined_pipes) \
+ for ((__num_joined_pipes) = 1; (__num_joined_pipes) <= (I915_MAX_PIPES); (__num_joined_pipes)++) \
+ for_each_if(intel_dp_joiner_candidate_valid(__connector, (__mode)->hdisplay, __num_joined_pipes))
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index eb05ef4bd9f6..a7b186d0e3c4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -368,6 +368,16 @@ static const char *dpcd_vs_pwm_str(bool aux)
return aux ? "DPCD" : "PWM";
}
+static const char *backlight_unit_str(struct intel_panel *panel)
+{
+ if (panel->backlight.edp.vesa.info.luminance_set)
+ return "NITS";
+ else if (panel->backlight.edp.vesa.info.aux_set)
+ return "Brightness %";
+ else
+ return "PWM";
+}
+
static void
intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector)
{
@@ -459,7 +469,7 @@ static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, en
return val / 1000;
}
- return connector->panel.backlight.level;
+ return panel->backlight.level;
}
static void
@@ -486,7 +496,8 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- if (!panel->backlight.edp.vesa.info.aux_enable) {
+ if (!(panel->backlight.edp.vesa.info.aux_enable ||
+ panel->backlight.edp.vesa.info.luminance_set)) {
u32 pwm_level;
if (!panel->backlight.edp.vesa.info.aux_set)
@@ -510,7 +521,8 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
- if (!panel->backlight.edp.vesa.info.aux_enable)
+ if (!(panel->backlight.edp.vesa.info.aux_enable ||
+ panel->backlight.edp.vesa.info.luminance_set))
panel->backlight.pwm_funcs->disable(old_conn_state,
intel_backlight_invert_pwm_level(connector, 0));
}
@@ -537,11 +549,14 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable ||
+ panel->backlight.edp.vesa.info.luminance_set));
drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s using %s values\n",
connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set ||
+ panel->backlight.edp.vesa.info.luminance_set),
+ backlight_unit_str(panel));
if (!panel->backlight.edp.vesa.info.aux_set ||
!panel->backlight.edp.vesa.info.aux_enable) {
@@ -564,9 +579,6 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
}
panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, 0);
panel->backlight.enabled = panel->backlight.level != 0;
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA Nits backlight level is controlled through DPCD\n",
- connector->base.base.id, connector->base.name);
} else if (panel->backlight.edp.vesa.info.aux_set) {
panel->backlight.max = panel->backlight.edp.vesa.info.max;
panel->backlight.min = 0;
@@ -644,9 +656,10 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = connector->base.dev;
struct intel_panel *panel = &connector->panel;
- bool try_intel_interface = false, try_vesa_interface = false;
+ bool try_intel_interface = false;
- /* Check the VBT and user's module parameters to figure out which
+ /*
+ * Check the VBT and user's module parameters to figure out which
* interfaces to probe
*/
switch (display->params.enable_dpcd_backlight) {
@@ -655,7 +668,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
case INTEL_DP_AUX_BACKLIGHT_AUTO:
switch (panel->vbt.backlight.type) {
case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
- try_vesa_interface = true;
break;
case INTEL_BACKLIGHT_DISPLAY_DDI:
try_intel_interface = true;
@@ -668,20 +680,12 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
try_intel_interface = true;
- try_vesa_interface = true;
- break;
- case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA:
- try_vesa_interface = true;
break;
case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL:
try_intel_interface = true;
break;
}
- /* For eDP 1.5 and above we are supposed to use VESA interface for brightness control */
- if (intel_dp->edp_dpcd[0] >= DP_EDP_15)
- try_vesa_interface = true;
-
/*
* Since Intel has their own backlight control interface, the majority of machines out there
* using DPCD backlight controls with Intel GPUs will be using this interface as opposed to
@@ -694,16 +698,19 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
* panel with Intel's OUI - which is also required for us to be able to detect Intel's
* backlight interface at all. This means that the only sensible way for us to detect both
* interfaces is to probe for Intel's first, and VESA's second.
+ *
+ * Also there is a chance some VBTs may advertise false Intel backlight support even if the
+ * TCON DPCD says otherwise. This means we keep VESA interface as fallback in that case.
*/
- if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector) &&
- intel_dp->edp_dpcd[0] <= DP_EDP_14b) {
+ if (try_intel_interface && intel_dp->edp_dpcd[0] <= DP_EDP_14b &&
+ intel_dp_aux_supports_hdr_backlight(connector)) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n",
connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
return 0;
}
- if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
+ if (intel_dp_aux_supports_vesa_backlight(connector)) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n",
connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 70a21685a3e1..887b6de14e46 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -43,6 +43,7 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
@@ -595,39 +596,22 @@ mst_stream_compute_config_limits(struct intel_dp *intel_dp,
dsc);
}
-static int mst_stream_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int mst_stream_compute_link_for_joined_pipes(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ int num_joined_pipes)
{
struct intel_display *display = to_intel_display(encoder);
- struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
struct link_config_limits limits;
bool dsc_needed, joiner_needs_dsc;
- int num_joined_pipes;
int ret = 0;
- if (pipe_config->fec_enable &&
- !intel_dp_supports_fec(intel_dp, connector, pipe_config))
- return -EINVAL;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return -EINVAL;
-
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
- adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_clock);
- if (num_joined_pipes > 1)
- pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
-
- pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->has_pch_encoder = false;
+ intel_dp_dsc_reset_config(pipe_config);
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
@@ -642,7 +626,12 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
if (ret == -EDEADLK)
return ret;
- if (ret)
+ if (ret ||
+ !intel_dp_dotclk_valid(display,
+ adjusted_mode->clock,
+ adjusted_mode->htotal,
+ 0,
+ num_joined_pipes))
dsc_needed = true;
}
@@ -653,6 +642,8 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
+ int dsc_slice_count;
+
drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
@@ -683,6 +674,66 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits,
pipe_config->dp_m_n.tu);
+ if (ret)
+ return ret;
+
+ dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, pipe_config);
+
+ if (!intel_dp_dotclk_valid(display,
+ adjusted_mode->clock,
+ adjusted_mode->htotal,
+ dsc_slice_count,
+ num_joined_pipes))
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mst_stream_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ int num_joined_pipes;
+ int ret = -EINVAL;
+
+ if (pipe_config->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, pipe_config))
+ return -EINVAL;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->has_pch_encoder = false;
+
+ for_each_joiner_candidate(connector, adjusted_mode, num_joined_pipes) {
+ if (num_joined_pipes > 1)
+ pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1,
+ crtc->pipe);
+
+ ret = mst_stream_compute_link_for_joined_pipes(encoder,
+ pipe_config,
+ conn_state,
+ num_joined_pipes);
+ if (ret == 0 || ret == -EDEADLK)
+ break;
}
if (ret)
@@ -695,10 +746,6 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
- ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
- if (ret)
- return ret;
-
intel_vrr_compute_config(pipe_config, conn_state);
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
@@ -1230,7 +1277,7 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
/* Wa_14014143976:adlp */
- if (IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14014143976)) {
if (intel_dp_is_uhbr(crtc_state))
set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
else if (crtc_state->fec_enable)
@@ -1419,11 +1466,11 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
struct intel_dp *intel_dp = connector->mst.dp;
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
struct drm_dp_mst_port *port = connector->mst.port;
- const int min_bpp = 18;
- int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
unsigned long bw_overhead_flags =
DRM_DP_BW_OVERHEAD_MST | DRM_DP_BW_OVERHEAD_SSC_REF_CLK;
+ int min_link_bpp_x16 = fxp_q4_from_int(18);
+ static bool supports_dsc;
int ret;
bool dsc = false;
int target_clock = mode->clock;
@@ -1448,6 +1495,13 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
return 0;
}
+ supports_dsc = intel_dp_has_dsc(connector) &&
+ drm_dp_sink_supports_fec(connector->dp.fec_capability);
+
+ if (supports_dsc && connector->mst.port->passthrough_aux)
+ min_link_bpp_x16 = intel_dp_compute_min_compressed_bpp_x16(connector,
+ INTEL_OUTPUT_FORMAT_RGB);
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
@@ -1455,12 +1509,19 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(max_link_clock, max_lanes,
mode->clock, mode->hdisplay,
- fxp_q4_from_int(min_bpp),
+ min_link_bpp_x16,
bw_overhead_flags);
/*
* TODO:
* - Also check if compression would allow for the mode
+ * in non-passthrough mode, i.e. the last branch device
+ * decompressing the stream. This makes a difference only if
+ * the BW on the link between the last branch device and the
+ * sink is higher than the BW on the whole MST path from the
+ * source to the last branch device. Relying on the extra BW
+ * this provides also requires the
+ * DFP_Link_Available_Payload_Bandwidth_Number described below.
* - Calculate the overhead using drm_dp_bw_overhead() /
* drm_dp_bw_channel_coding_efficiency(), similarly to the
* compute config code, as drm_dp_calc_pbn_mode() doesn't
@@ -1470,49 +1531,73 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
- mode->hdisplay, target_clock);
- max_dotclk *= num_joined_pipes;
-
ret = drm_modeset_lock(&mgr->base.lock, ctx);
if (ret)
return ret;
- if (mode_rate > max_rate || mode->clock > max_dotclk ||
- drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
+ if (mode_rate > max_rate ||
+ drm_dp_calc_pbn_mode(mode->clock, min_link_bpp_x16) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}
- if (intel_dp_has_dsc(connector) && drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
- /*
- * TBD pass the connector BPC,
- * for now U8_MAX so that max BPC on that platform would be picked
- */
- int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
+ *status = MODE_CLOCK_HIGH;
+ for_each_joiner_candidate(connector, mode, num_joined_pipes) {
+ int dsc_slice_count = 0;
- if (!drm_dp_is_uhbr_rate(max_link_clock))
- bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
+ if (supports_dsc) {
+ /*
+ * TBD pass the connector BPC,
+ * for now U8_MAX so that max BPC on that platform would be picked
+ */
+ int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
+
+ dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
+ mode->clock,
+ mode->hdisplay,
+ num_joined_pipes);
+
+ if (!drm_dp_is_uhbr_rate(max_link_clock))
+ bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
+
+ dsc = intel_dp_mode_valid_with_dsc(connector,
+ max_link_clock, max_lanes,
+ target_clock, mode->hdisplay,
+ num_joined_pipes,
+ INTEL_OUTPUT_FORMAT_RGB, pipe_bpp,
+ bw_overhead_flags);
+ }
- dsc = intel_dp_mode_valid_with_dsc(connector,
- max_link_clock, max_lanes,
- target_clock, mode->hdisplay,
- num_joined_pipes,
- INTEL_OUTPUT_FORMAT_RGB, pipe_bpp,
- bw_overhead_flags);
- }
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
+ *status = MODE_CLOCK_HIGH;
+ continue;
+ }
- if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
- *status = MODE_CLOCK_HIGH;
- return 0;
- }
+ if (mode_rate > max_rate && !dsc) {
+ *status = MODE_CLOCK_HIGH;
+ continue;
+ }
- if (mode_rate > max_rate && !dsc) {
- *status = MODE_CLOCK_HIGH;
- return 0;
+ *status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
+
+ if (*status != MODE_OK)
+ continue;
+
+ if (!dsc)
+ dsc_slice_count = 0;
+
+ if (!intel_dp_dotclk_valid(display,
+ mode->clock,
+ mode->htotal,
+ dsc_slice_count,
+ num_joined_pipes)) {
+ *status = MODE_CLOCK_HIGH;
+ continue;
+ }
+
+ break;
}
- *status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 83865c02d477..1fd1ac8d556d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -54,31 +54,20 @@ static int kbytes_to_mbits(int kbytes)
return DIV_ROUND_UP(kbytes * 8, 1000);
}
-static int get_current_link_bw(struct intel_dp *intel_dp,
- bool *below_dprx_bw)
+static int get_current_link_bw(struct intel_dp *intel_dp)
{
int rate = intel_dp_max_common_rate(intel_dp);
int lane_count = intel_dp_max_common_lane_count(intel_dp);
- int bw;
- bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
- *below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
-
- return bw;
+ return intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
}
-static int update_tunnel_state(struct intel_dp *intel_dp)
+static int __update_tunnel_state(struct intel_dp *intel_dp, bool force_sink_update)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- bool old_bw_below_dprx;
- bool new_bw_below_dprx;
- int old_bw;
- int new_bw;
int ret;
- old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
-
ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
if (ret < 0) {
drm_dbg_kms(display->drm,
@@ -90,18 +79,26 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
return ret;
}
- if (ret == 0 ||
- !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
+ if (!force_sink_update &&
+ (ret == 0 || !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel)))
return 0;
intel_dp_update_sink_caps(intel_dp);
- new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
+ return 0;
+}
+
+static bool has_tunnel_bw_changed(struct intel_dp *intel_dp, int old_bw)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int new_bw;
+
+ new_bw = get_current_link_bw(intel_dp);
/* Suppress the notification if the mode list can't change due to bw. */
- if (old_bw_below_dprx == new_bw_below_dprx &&
- !new_bw_below_dprx)
- return 0;
+ if (old_bw == new_bw)
+ return false;
drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
@@ -109,7 +106,29 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
encoder->base.base.id, encoder->base.name,
kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
- return 1;
+ return true;
+}
+
+/*
+ * Returns:
+ * - 0 in case of success - if there wasn't any change in the tunnel state
+ * requiring a user notification
+ * - 1 in case of success - if there was a change in the tunnel state
+ * requiring a user notification
+ * - Negative error code if updating the tunnel state failed
+ */
+static int update_tunnel_state(struct intel_dp *intel_dp)
+{
+ int old_bw;
+ int err;
+
+ old_bw = get_current_link_bw(intel_dp);
+
+ err = __update_tunnel_state(intel_dp, false);
+ if (err)
+ return err;
+
+ return has_tunnel_bw_changed(intel_dp, old_bw) ? 1 : 0;
}
/*
@@ -150,11 +169,9 @@ static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pi
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
ERR_PTR(err));
-
- return err;
}
- return update_tunnel_state(intel_dp);
+ return err;
}
static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
@@ -170,13 +187,24 @@ static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
}
+/*
+ * Returns:
+ * - 0 in case of success - after any tunnel detected and added to @intel_dp
+ * - 1 in case of success - after a tunnel detected and added to @intel_dp,
+ * where the link BW via the tunnel changed in a way requiring a user
+ * notification
+ * - Negative error code if the tunnel detection failed
+ */
static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_dp_tunnel *tunnel;
+ int old_bw;
int ret;
+ old_bw = get_current_link_bw(intel_dp);
+
tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
&intel_dp->aux);
if (IS_ERR(tunnel))
@@ -200,10 +228,17 @@ static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acqui
}
ret = allocate_initial_tunnel_bw(intel_dp, ctx);
- if (ret < 0)
+ if (ret < 0) {
intel_dp_tunnel_destroy(intel_dp);
- return ret;
+ return ret;
+ }
+
+ ret = __update_tunnel_state(intel_dp, true);
+ if (ret)
+ return ret;
+
+ return has_tunnel_bw_changed(intel_dp, old_bw) ? 1 : 0;
}
/**
@@ -221,9 +256,12 @@ static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acqui
* tunnel. If the tunnel's state change requires this - for instance the
* tunnel's group ID has changed - the tunnel will be dropped and recreated.
*
- * Return 0 in case of success - after any tunnel detected and added to
- * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
- * way that requires notifying user space.
+ * Returns:
+ * - 0 in case of success - after any tunnel detected and added to @intel_dp
+ * - 1 in case the link BW via the new or an already existing tunnel has changed
+ * in a way that requires notifying user space
+ * - Negative error code, if creating a new tunnel or updating the tunnel
+ * state failed
*/
int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index a4f372c9e6fc..8433e3ff0319 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1219,6 +1219,7 @@ static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
+ struct intel_display *display = to_intel_display(encoder);
int ret;
ret = intel_lt_phy_pll_calc_state(crtc_state, encoder);
@@ -1227,7 +1228,7 @@ static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
/* TODO: Do the readback via intel_compute_shared_dplls() */
crtc_state->port_clock =
- intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_lt_phy_calc_port_clock(display, &crtc_state->dpll_hw_state.ltpll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
@@ -2333,3 +2334,8 @@ void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
{
assert_pll(display, pipe, false);
}
+
+bool intel_dpll_clock_matches(int clock1, int clock2)
+{
+ return abs(clock1 - clock2) <= 1;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 3444a2dd3166..8cd0d17e974e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -48,5 +48,6 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state);
void assert_pll_enabled(struct intel_display *display, enum pipe pipe);
void assert_pll_disabled(struct intel_display *display, enum pipe pipe);
+bool intel_dpll_clock_matches(int clock1, int clock2);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 9aa84a430f09..f35a9252f4e1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -38,6 +38,7 @@
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_hti.h"
+#include "intel_lt_phy.h"
#include "intel_mg_phy_regs.h"
#include "intel_pch_refclk.h"
#include "intel_step.h"
@@ -4613,7 +4614,7 @@ void intel_dpll_init(struct intel_display *display)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr)
- return;
+ goto out_verify;
dpll_info = dpll_mgr->dpll_info;
@@ -4632,6 +4633,14 @@ void intel_dpll_init(struct intel_display *display)
display->dpll.mgr = dpll_mgr;
display->dpll.num_dpll = i;
+
+out_verify:
+ /*
+ * TODO: Convert these to a KUnit test or dependent on a kconfig
+ * debug option.
+ */
+ intel_cx0pll_verify_plls(display);
+ intel_lt_phy_verify_plls(display);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 5b71c860515f..4cc14ce5eebe 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -241,14 +241,12 @@ struct intel_mpllb_state {
};
struct intel_c10pll_state {
- u32 clock; /* in KHz */
u8 tx;
u8 cmn;
u8 pll[20];
};
struct intel_c20pll_state {
- u32 clock; /* in kHz */
u16 tx[3];
u16 cmn[4];
union {
@@ -274,7 +272,6 @@ struct intel_cx0pll_state {
};
struct intel_lt_phy_pll_state {
- u32 clock; /* in kHz */
u8 addr_msb[13];
u8 addr_lsb[13];
u8 data[13][4];
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index da472371c7d7..145dc9511116 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -1,202 +1,51 @@
// SPDX-License-Identifier: MIT
/*
- * Copyright © 2021 Intel Corporation
+ * Copyright © 2023 Intel Corporation
*/
-#include <drm/drm_print.h>
-
-#include "gem/i915_gem_domain.h"
-#include "gem/i915_gem_internal.h"
-#include "gem/i915_gem_lmem.h"
-#include "gt/gen8_ppgtt.h"
-
-#include "i915_drv.h"
-#include "intel_display_core.h"
-#include "intel_display_rpm.h"
+#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
-#include "intel_fb.h"
-
-struct i915_dpt {
- struct i915_address_space vm;
-
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void __iomem *iomem;
-};
-
-#define i915_is_dpt(vm) ((vm)->is_dpt)
-
-static inline struct i915_dpt *
-i915_vm_to_dpt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
- drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm));
- return container_of(vm, struct i915_dpt, vm);
-}
-
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void dpt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- unsigned int pat_index,
- u32 flags)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
-
- gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
- vm->pte_encode(addr, pat_index, flags));
-}
-
-static void dpt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- unsigned int pat_index,
- u32 flags)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
- const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
- struct sgt_iter sgt_iter;
- dma_addr_t addr;
- int i;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- i = vma_res->start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
- gen8_set_pte(&base[i++], pte_encode | addr);
-}
-
-static void dpt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static void dpt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma_resource *vma_res,
- unsigned int pat_index,
- u32 flags)
-{
- u32 pte_flags;
-
- if (vma_res->bound_flags)
- return;
-
- /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
- pte_flags = 0;
- if (vm->has_read_only && vma_res->bi.readonly)
- pte_flags |= PTE_READ_ONLY;
- if (vma_res->bi.lmem)
- pte_flags |= PTE_LM;
+#include "intel_parent.h"
+#include "skl_universal_plane_regs.h"
- vm->insert_entries(vm, vma_res, pat_index, pte_flags);
-
- vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
-
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
-}
-
-static void dpt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res)
-{
- vm->clear_range(vm, vma_res->start, vma_res->vma_size);
-}
-
-static void dpt_cleanup(struct i915_address_space *vm)
+void intel_dpt_configure(struct intel_crtc *crtc)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_gem_object_put(dpt->obj);
-}
-
-struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
- unsigned int alignment)
-{
- struct drm_i915_private *i915 = vm->i915;
- struct intel_display *display = i915->display;
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- struct ref_tracker *wakeref;
- struct i915_vma *vma;
- void __iomem *iomem;
- struct i915_gem_ww_ctx ww;
- u64 pin_flags = 0;
- int err;
-
- if (i915_gem_object_is_stolen(dpt->obj))
- pin_flags |= PIN_MAPPABLE;
-
- wakeref = intel_display_rpm_get(display);
- atomic_inc(&display->restore.pending_fb_pin);
-
- for_i915_gem_ww(&ww, err, true) {
- err = i915_gem_object_lock(dpt->obj, &ww);
- if (err)
- continue;
+ struct intel_display *display = to_intel_display(crtc);
- vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0,
- alignment, pin_flags);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- continue;
- }
+ if (DISPLAY_VER(display) == 14) {
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
- iomem = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
- if (IS_ERR(iomem)) {
- err = PTR_ERR(iomem);
- continue;
+ intel_de_rmw(display, PLANE_CHICKEN(pipe, plane_id),
+ PLANE_CHICKEN_DISABLE_DPT,
+ display->params.enable_dpt ? 0 :
+ PLANE_CHICKEN_DISABLE_DPT);
}
-
- dpt->vma = vma;
- dpt->iomem = iomem;
-
- i915_vma_get(vma);
+ } else if (DISPLAY_VER(display) == 13) {
+ intel_de_rmw(display, CHICKEN_MISC_2,
+ CHICKEN_MISC_DISABLE_DPT,
+ display->params.enable_dpt ? 0 :
+ CHICKEN_MISC_DISABLE_DPT);
}
-
- dpt->obj->mm.dirty = true;
-
- atomic_dec(&display->restore.pending_fb_pin);
- intel_display_rpm_put(display, wakeref);
-
- return err ? ERR_PTR(err) : vma;
-}
-
-void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_vma_unpin_iomap(dpt->vma);
- i915_vma_put(dpt->vma);
}
/**
- * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
+ * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
* @display: display device instance
*
- * Restore the memory mapping during system resume for all framebuffers which
- * are mapped to HW via a GGTT->DPT page table. The content of these page
- * tables are not stored in the hibernation image during S4 and S3RST->S4
- * transitions, so here we reprogram the PTE entries in those tables.
+ * Suspend the memory mapping during system suspend for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table.
*
- * This function must be called after the mappings in GGTT have been restored calling
- * i915_ggtt_resume().
+ * This function must be called before the mappings in GGTT are suspended calling
+ * i915_ggtt_suspend().
*/
-void intel_dpt_resume(struct intel_display *display)
+void intel_dpt_suspend(struct intel_display *display)
{
struct drm_framebuffer *drm_fb;
@@ -204,26 +53,30 @@ void intel_dpt_resume(struct intel_display *display)
return;
mutex_lock(&display->drm->mode_config.fb_lock);
+
drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
- if (fb->dpt_vm)
- i915_ggtt_resume_vm(fb->dpt_vm, true);
+ if (fb->dpt)
+ intel_parent_dpt_suspend(display, fb->dpt);
}
+
mutex_unlock(&display->drm->mode_config.fb_lock);
}
/**
- * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
+ * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
* @display: display device instance
*
- * Suspend the memory mapping during system suspend for all framebuffers which
- * are mapped to HW via a GGTT->DPT page table.
+ * Restore the memory mapping during system resume for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table. The content of these page
+ * tables are not stored in the hibernation image during S4 and S3RST->S4
+ * transitions, so here we reprogram the PTE entries in those tables.
*
- * This function must be called before the mappings in GGTT are suspended calling
- * i915_ggtt_suspend().
+ * This function must be called after the mappings in GGTT have been restored calling
+ * i915_ggtt_resume().
*/
-void intel_dpt_suspend(struct intel_display *display)
+void intel_dpt_resume(struct intel_display *display)
{
struct drm_framebuffer *drm_fb;
@@ -231,96 +84,11 @@ void intel_dpt_suspend(struct intel_display *display)
return;
mutex_lock(&display->drm->mode_config.fb_lock);
-
drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
- if (fb->dpt_vm)
- i915_ggtt_suspend_vm(fb->dpt_vm, true);
+ if (fb->dpt)
+ intel_parent_dpt_resume(display, fb->dpt);
}
-
mutex_unlock(&display->drm->mode_config.fb_lock);
}
-
-struct i915_address_space *
-intel_dpt_create(struct intel_framebuffer *fb)
-{
- struct drm_gem_object *obj = intel_fb_bo(&fb->base);
- struct drm_i915_private *i915 = to_i915(obj->dev);
- struct drm_i915_gem_object *dpt_obj;
- struct i915_address_space *vm;
- struct i915_dpt *dpt;
- size_t size;
- int ret;
-
- if (intel_fb_needs_pot_stride_remap(fb))
- size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
- else
- size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
-
- size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
-
- dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
- if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
- dpt_obj = i915_gem_object_create_stolen(i915, size);
- if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
- drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
- dpt_obj = i915_gem_object_create_shmem(i915, size);
- }
- if (IS_ERR(dpt_obj))
- return ERR_CAST(dpt_obj);
-
- ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
- if (!ret) {
- ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
- i915_gem_object_unlock(dpt_obj);
- }
- if (ret) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(ret);
- }
-
- dpt = kzalloc_obj(*dpt);
- if (!dpt) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- vm = &dpt->vm;
-
- vm->gt = to_gt(i915);
- vm->i915 = i915;
- vm->dma = i915->drm.dev;
- vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- vm->is_dpt = true;
-
- i915_address_space_init(vm, VM_CLASS_DPT);
-
- vm->insert_page = dpt_insert_page;
- vm->clear_range = dpt_clear_range;
- vm->insert_entries = dpt_insert_entries;
- vm->cleanup = dpt_cleanup;
-
- vm->vma_ops.bind_vma = dpt_bind_vma;
- vm->vma_ops.unbind_vma = dpt_unbind_vma;
-
- vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
-
- dpt->obj = dpt_obj;
- dpt->obj->is_dpt = true;
-
- return &dpt->vm;
-}
-
-void intel_dpt_destroy(struct i915_address_space *vm)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- dpt->obj->is_dpt = false;
- i915_vm_put(&dpt->vm);
-}
-
-u64 intel_dpt_offset(struct i915_vma *dpt_vma)
-{
- return i915_vma_offset(dpt_vma);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index db521401b828..11bd495693b2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -1,26 +1,16 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2021 Intel Corporation
+ * Copyright © 2023 Intel Corporation
*/
-#ifndef __INTEL_DPT_H__
-#define __INTEL_DPT_H__
+#ifndef __INTEL_DPT_COMMON_H__
+#define __INTEL_DPT_COMMON_H__
-#include <linux/types.h>
-
-struct i915_address_space;
-struct i915_vma;
+struct intel_crtc;
struct intel_display;
-struct intel_framebuffer;
-void intel_dpt_destroy(struct i915_address_space *vm);
-struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
- unsigned int alignment);
-void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm);
+void intel_dpt_configure(struct intel_crtc *crtc);
void intel_dpt_suspend(struct intel_display *display);
void intel_dpt_resume(struct intel_display *display);
-struct i915_address_space *
-intel_dpt_create(struct intel_framebuffer *fb);
-u64 intel_dpt_offset(struct i915_vma *dpt_vma);
-#endif /* __INTEL_DPT_H__ */
+#endif /* __INTEL_DPT_COMMON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
deleted file mode 100644
index 5eb88d51dba1..000000000000
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "intel_de.h"
-#include "intel_display_regs.h"
-#include "intel_display_types.h"
-#include "intel_dpt_common.h"
-#include "skl_universal_plane_regs.h"
-
-void intel_dpt_configure(struct intel_crtc *crtc)
-{
- struct intel_display *display = to_intel_display(crtc);
-
- if (DISPLAY_VER(display) == 14) {
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
- continue;
-
- intel_de_rmw(display, PLANE_CHICKEN(pipe, plane_id),
- PLANE_CHICKEN_DISABLE_DPT,
- display->params.enable_dpt ? 0 :
- PLANE_CHICKEN_DISABLE_DPT);
- }
- } else if (DISPLAY_VER(display) == 13) {
- intel_de_rmw(display, CHICKEN_MISC_2,
- CHICKEN_MISC_DISABLE_DPT,
- display->params.enable_dpt ? 0 :
- CHICKEN_MISC_DISABLE_DPT);
- }
-}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.h b/drivers/gpu/drm/i915/display/intel_dpt_common.h
deleted file mode 100644
index 6d7de405126a..000000000000
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_DPT_COMMON_H__
-#define __INTEL_DPT_COMMON_H__
-
-struct intel_crtc;
-
-void intel_dpt_configure(struct intel_crtc *crtc);
-
-#endif /* __INTEL_DPT_COMMON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dram.c b/drivers/gpu/drm/i915/display/intel_dram.c
index 170de304fe96..bd281d4b4c05 100644
--- a/drivers/gpu/drm/i915/display/intel_dram.c
+++ b/drivers/gpu/drm/i915/display/intel_dram.c
@@ -7,13 +7,14 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_display_core.h"
#include "intel_display_utils.h"
+#include "intel_display_regs.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
-#include "intel_pcode.h"
+#include "intel_parent.h"
#include "intel_uncore.h"
#include "vlv_iosf_sb.h"
@@ -692,8 +693,8 @@ static int icl_pcode_read_mem_global_info(struct intel_display *display,
u32 val = 0;
int ret;
- ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
+ ret = intel_parent_pcode_read(display, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index b5d774706fec..c8d3968f659f 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -8,6 +8,7 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include <drm/intel/display_parent_interface.h>
#include "intel_crtc.h"
#include "intel_de.h"
@@ -15,8 +16,8 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
-#include "intel_dsb_buffer.h"
#include "intel_dsb_regs.h"
+#include "intel_psr.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_watermark.h"
@@ -75,6 +76,57 @@ struct intel_dsb {
* writes). There are no registers reads possible with DSB HW engine.
*/
+/*
+ * DSB buffer parent interface calls are here instead of intel_parent.[ch]
+ * because they're not used outside of intel_dsb.c.
+ */
+static u32 dsb_buffer_ggtt_offset(struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ return display->parent->dsb->ggtt_offset(dsb->dsb_buf);
+}
+
+static void dsb_buffer_write(struct intel_dsb *dsb, u32 idx, u32 val)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ display->parent->dsb->write(dsb->dsb_buf, idx, val);
+}
+
+static u32 dsb_buffer_read(struct intel_dsb *dsb, u32 idx)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ return display->parent->dsb->read(dsb->dsb_buf, idx);
+}
+
+static void dsb_buffer_fill(struct intel_dsb *dsb, u32 idx, u32 val, size_t size)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ display->parent->dsb->fill(dsb->dsb_buf, idx, val, size);
+}
+
+static struct intel_dsb_buffer *dsb_buffer_create(struct intel_display *display, size_t size)
+{
+ return display->parent->dsb->create(display->drm, size);
+}
+
+static void dsb_buffer_cleanup(struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ display->parent->dsb->cleanup(dsb->dsb_buf);
+}
+
+static void dsb_buffer_flush_map(struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ display->parent->dsb->flush_map(dsb->dsb_buf);
+}
+
/* DSB opcodes. */
#define DSB_OPCODE_SHIFT 24
#define DSB_OPCODE_NOOP 0x0
@@ -166,18 +218,24 @@ static int dsb_scanline_to_hw(struct intel_atomic_state *state,
* definitely do not want to skip vblank wait. We also have concern what comes
* to skipping vblank evasion. I.e. arming registers are latched before we have
* managed writing them. Due to these reasons we are not setting
- * DSB_SKIP_WAITS_EN.
+ * DSB_SKIP_WAITS_EN except when using TRANS_PUSH mechanism to trigger
+ * "frame change" event.
*/
static u32 dsb_chicken(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 chicken = intel_psr_use_trans_push(new_crtc_state) ?
+ DSB_SKIP_WAITS_EN : 0;
+
if (pre_commit_is_vrr_active(state, crtc))
- return DSB_CTRL_WAIT_SAFE_WINDOW |
+ chicken |= DSB_CTRL_WAIT_SAFE_WINDOW |
DSB_CTRL_NO_WAIT_VBLANK |
DSB_INST_WAIT_SAFE_WINDOW |
DSB_INST_NO_WAIT_VBLANK;
- else
- return 0;
+
+ return chicken;
}
static bool assert_dsb_has_room(struct intel_dsb *dsb)
@@ -211,10 +269,10 @@ static void intel_dsb_dump(struct intel_dsb *dsb)
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
drm_dbg_kms(display->drm,
" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
- intel_dsb_buffer_read(dsb->dsb_buf, i),
- intel_dsb_buffer_read(dsb->dsb_buf, i + 1),
- intel_dsb_buffer_read(dsb->dsb_buf, i + 2),
- intel_dsb_buffer_read(dsb->dsb_buf, i + 3));
+ dsb_buffer_read(dsb, i),
+ dsb_buffer_read(dsb, i + 1),
+ dsb_buffer_read(dsb, i + 2),
+ dsb_buffer_read(dsb, i + 3));
drm_dbg_kms(display->drm, "}\n");
}
@@ -231,12 +289,12 @@ unsigned int intel_dsb_size(struct intel_dsb *dsb)
unsigned int intel_dsb_head(struct intel_dsb *dsb)
{
- return intel_dsb_buffer_ggtt_offset(dsb->dsb_buf);
+ return dsb_buffer_ggtt_offset(dsb);
}
static unsigned int intel_dsb_tail(struct intel_dsb *dsb)
{
- return intel_dsb_buffer_ggtt_offset(dsb->dsb_buf) + intel_dsb_size(dsb);
+ return dsb_buffer_ggtt_offset(dsb) + intel_dsb_size(dsb);
}
static void intel_dsb_ins_align(struct intel_dsb *dsb)
@@ -263,8 +321,8 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
dsb->ins[0] = ldw;
dsb->ins[1] = udw;
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
+ dsb_buffer_write(dsb, dsb->free_pos++, dsb->ins[0]);
+ dsb_buffer_write(dsb, dsb->free_pos++, dsb->ins[1]);
}
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
@@ -335,13 +393,12 @@ void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
/* Update the count */
dsb->ins[0]++;
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->ins_start_offset + 0,
- dsb->ins[0]);
+ dsb_buffer_write(dsb, dsb->ins_start_offset + 0, dsb->ins[0]);
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, val);
+ dsb_buffer_write(dsb, dsb->free_pos++, val);
/* if number of data words is odd, then the last dword should be 0.*/
if (dsb->free_pos & 0x1)
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos, 0);
+ dsb_buffer_write(dsb, dsb->free_pos, 0);
}
void intel_dsb_reg_write(struct intel_dsb *dsb,
@@ -521,8 +578,7 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
if (aligned_tail > tail)
- intel_dsb_buffer_memset(dsb->dsb_buf, dsb->free_pos, 0,
- aligned_tail - tail);
+ dsb_buffer_fill(dsb, dsb->free_pos, 0, aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
}
@@ -541,8 +597,7 @@ static void intel_dsb_gosub_align(struct intel_dsb *dsb)
* "Ensure GOSUB is not placed in cacheline QW slot 6 or 7 (numbered 0-7)"
*/
if (aligned_tail - tail <= 2 * 8)
- intel_dsb_buffer_memset(dsb->dsb_buf, dsb->free_pos, 0,
- aligned_tail - tail);
+ dsb_buffer_fill(dsb, dsb->free_pos, 0, aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
}
@@ -606,14 +661,14 @@ void intel_dsb_gosub_finish(struct intel_dsb *dsb)
*/
intel_dsb_noop(dsb, 8);
- intel_dsb_buffer_flush_map(dsb->dsb_buf);
+ dsb_buffer_flush_map(dsb);
}
void intel_dsb_finish(struct intel_dsb *dsb)
{
intel_dsb_align_tail(dsb);
- intel_dsb_buffer_flush_map(dsb->dsb_buf);
+ dsb_buffer_flush_map(dsb);
}
static u32 dsb_error_int_status(struct intel_display *display)
@@ -917,7 +972,7 @@ void intel_dsb_wait(struct intel_dsb *dsb)
!is_busy,
100, 1000, false);
if (ret) {
- u32 offset = intel_dsb_buffer_ggtt_offset(dsb->dsb_buf);
+ u32 offset = dsb_buffer_ggtt_offset(dsb);
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
DSB_ENABLE | DSB_HALT);
@@ -983,7 +1038,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
- dsb_buf = intel_dsb_buffer_create(display->drm, size);
+ dsb_buf = dsb_buffer_create(display, size);
if (IS_ERR(dsb_buf))
goto out_put_rpm;
@@ -1021,7 +1076,7 @@ out:
*/
void intel_dsb_cleanup(struct intel_dsb *dsb)
{
- intel_dsb_buffer_cleanup(dsb->dsb_buf);
+ dsb_buffer_cleanup(dsb);
kfree(dsb);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.h b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
deleted file mode 100644
index f4577d1f25cd..000000000000
--- a/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _INTEL_DSB_BUFFER_H
-#define _INTEL_DSB_BUFFER_H
-
-#include <linux/types.h>
-
-struct drm_device;
-struct intel_dsb_buffer;
-
-u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf);
-void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
-u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx);
-void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
-struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size);
-void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf);
-void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf);
-
-#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 38c33f2ca05c..5768619f840f 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -16,9 +16,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
-#include "intel_dpt.h"
#include "intel_fb.h"
-#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
#include "intel_parent.h"
#include "intel_plane.h"
@@ -2104,16 +2102,17 @@ int intel_plane_compute_gtt(struct intel_plane_state *plane_state)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
+ struct intel_display *display = to_intel_display(fb->dev);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
if (intel_fb_uses_dpt(fb))
- intel_dpt_destroy(intel_fb->dpt_vm);
+ intel_parent_dpt_destroy(display, intel_fb->dpt);
- intel_fb_bo_framebuffer_fini(intel_fb_bo(fb));
+ intel_bo_framebuffer_fini(intel_fb_bo(fb));
- intel_frontbuffer_put(intel_fb->frontbuffer);
+ intel_parent_frontbuffer_put(display, intel_fb->frontbuffer);
kfree(intel_fb->panic);
kfree(intel_fb);
@@ -2221,16 +2220,16 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
return -ENOMEM;
/*
- * intel_frontbuffer_get() must be done before
- * intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
+ * intel_parent_frontbuffer_get() must be done before
+ * intel_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
- intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ intel_fb->frontbuffer = intel_parent_frontbuffer_get(display, obj);
if (!intel_fb->frontbuffer) {
ret = -ENOMEM;
goto err_free_panic;
}
- ret = intel_fb_bo_framebuffer_init(obj, mode_cmd);
+ ret = intel_bo_framebuffer_init(obj, mode_cmd);
if (ret)
goto err_frontbuffer_put;
@@ -2304,16 +2303,21 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err_bo_framebuffer_fini;
if (intel_fb_uses_dpt(fb)) {
- struct i915_address_space *vm;
+ struct drm_gem_object *obj = intel_fb_bo(&intel_fb->base);
+ struct intel_dpt *dpt;
+ size_t size = 0;
+
+ if (intel_fb_needs_pot_stride_remap(intel_fb))
+ size = intel_remapped_info_size(&intel_fb->remapped_view.gtt.remapped);
- vm = intel_dpt_create(intel_fb);
- if (IS_ERR(vm)) {
+ dpt = intel_parent_dpt_create(display, obj, size);
+ if (IS_ERR(dpt)) {
drm_dbg_kms(display->drm, "failed to create DPT\n");
- ret = PTR_ERR(vm);
+ ret = PTR_ERR(dpt);
goto err_frontbuffer_put;
}
- intel_fb->dpt_vm = vm;
+ intel_fb->dpt = dpt;
}
ret = drm_framebuffer_init(display->drm, fb, &intel_fb_funcs);
@@ -2326,11 +2330,11 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
err_free_dpt:
if (intel_fb_uses_dpt(fb))
- intel_dpt_destroy(intel_fb->dpt_vm);
+ intel_parent_dpt_destroy(display, intel_fb->dpt);
err_bo_framebuffer_fini:
- intel_fb_bo_framebuffer_fini(obj);
+ intel_bo_framebuffer_fini(obj);
err_frontbuffer_put:
- intel_frontbuffer_put(intel_fb->frontbuffer);
+ intel_parent_frontbuffer_put(display, intel_fb->frontbuffer);
err_free_panic:
kfree(intel_fb->panic);
@@ -2343,11 +2347,12 @@ intel_user_framebuffer_create(struct drm_device *dev,
const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_framebuffer *fb;
struct drm_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- obj = intel_fb_bo_lookup_valid_bo(dev, filp, &mode_cmd);
+ obj = intel_bo_framebuffer_lookup(display, filp, &mode_cmd);
if (IS_ERR(obj))
return ERR_CAST(obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
deleted file mode 100644
index bfecd73d5fa0..000000000000
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_print.h>
-
-#include "gem/i915_gem_object.h"
-
-#include "i915_drv.h"
-#include "intel_display_core.h"
-#include "intel_display_types.h"
-#include "intel_fb.h"
-#include "intel_fb_bo.h"
-
-void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
-{
- /* Nothing to do for i915 */
-}
-
-int intel_fb_bo_framebuffer_init(struct drm_gem_object *_obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- struct intel_display *display = to_intel_display(obj->base.dev);
- unsigned int tiling, stride;
-
- i915_gem_object_lock(obj, NULL);
- tiling = i915_gem_object_get_tiling(obj);
- stride = i915_gem_object_get_stride(obj);
- i915_gem_object_unlock(obj);
-
- if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /*
- * If there's a fence, enforce that
- * the fb modifier and tiling mode match.
- */
- if (tiling != I915_TILING_NONE &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(display->drm,
- "tiling_mode doesn't match fb modifier\n");
- return -EINVAL;
- }
- } else {
- if (tiling == I915_TILING_X) {
- mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- } else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(display->drm,
- "No Y tiling for legacy addfb\n");
- return -EINVAL;
- }
- }
-
- /*
- * gen2/3 display engine uses the fence if present,
- * so the tiling mode must match the fb modifier exactly.
- */
- if (DISPLAY_VER(display) < 4 &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(display->drm,
- "tiling_mode must match fb modifier exactly on gen2/3\n");
- return -EINVAL;
- }
-
- /*
- * If there's a fence, enforce that
- * the fb pitch and fence stride match.
- */
- if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(display->drm,
- "pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
- return -EINVAL;
- }
-
- return 0;
-}
-
-struct drm_gem_object *
-intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_i915_private *i915 = to_i915(drm);
- struct drm_i915_gem_object *obj;
-
- obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- /* object is backed with LMEM for discrete */
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
- /* object is "remote", not in local memory */
- i915_gem_object_put(obj);
- drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
- return ERR_PTR(-EREMOTE);
- }
-
- return intel_bo_to_drm_bo(obj);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
deleted file mode 100644
index d775773c6c03..000000000000
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#ifndef __INTEL_FB_BO_H__
-#define __INTEL_FB_BO_H__
-
-struct drm_device;
-struct drm_file;
-struct drm_framebuffer;
-struct drm_gem_object;
-struct drm_mode_fb_cmd2;
-
-void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj);
-
-int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
-
-struct drm_gem_object *
-intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *user_mode_cmd);
-
-#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 7249b784fbba..738d77a1468a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -17,7 +17,7 @@
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
-#include "intel_dpt.h"
+#include "i915_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_plane.h"
@@ -27,13 +27,14 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned long *out_flags,
- struct i915_address_space *vm)
+ struct intel_dpt *dpt)
{
struct drm_device *dev = fb->dev;
struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct i915_address_space *vm = i915_dpt_to_vm(dpt);
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
int ret;
@@ -284,7 +285,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
} else {
unsigned int alignment = intel_plane_fb_min_alignment(plane_state);
- vma = intel_dpt_pin_to_ggtt(fb->dpt_vm, alignment / 512);
+ vma = i915_dpt_pin_to_ggtt(fb->dpt, alignment / 512);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -292,9 +293,9 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
vma = intel_fb_pin_to_dpt(&fb->base, &plane_state->view.gtt,
alignment, &plane_state->flags,
- fb->dpt_vm);
+ fb->dpt);
if (IS_ERR(vma)) {
- intel_dpt_unpin_from_ggtt(fb->dpt_vm);
+ i915_dpt_unpin_from_ggtt(fb->dpt);
plane_state->ggtt_vma = NULL;
return PTR_ERR(vma);
}
@@ -307,7 +308,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
* The DPT object contains only one vma, and there is no VT-d
* guard, so the VMA's offset within the DPT is always 0.
*/
- drm_WARN_ON(display->drm, intel_dpt_offset(plane_state->dpt_vma));
+ drm_WARN_ON(display->drm, i915_dpt_offset(plane_state->dpt_vma));
}
/*
@@ -346,7 +347,7 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
vma = fetch_and_zero(&old_plane_state->ggtt_vma);
if (vma)
- intel_dpt_unpin_from_ggtt(fb->dpt_vm);
+ i915_dpt_unpin_from_ggtt(fb->dpt);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 91de38379282..ea0ce00c8474 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -45,7 +45,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
-#include "i915_vma.h"
#include "i9xx_plane_regs.h"
#include "intel_de.h"
#include "intel_display_device.h"
@@ -184,7 +183,7 @@ static unsigned int skl_fbc_min_cfb_stride(struct intel_display *display,
* Wa_16011863758: icl+
* Avoid some hardware segment address miscalculation.
*/
- if (DISPLAY_VER(display) >= 11)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16011863758))
stride += 64;
/*
@@ -950,7 +949,7 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
}
/* Wa_1409120013:icl,jsl,tgl,dg1 */
- if (IS_DISPLAY_VER(display, 11, 12))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_1409120013))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/*
@@ -958,7 +957,7 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
* Fixes: Screen flicker with FBC and Package C state enabled
* Workaround: Forced SLB invalidation before start of new frame.
*/
- if (intel_display_wa(display, 22014263786))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22014263786))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
@@ -980,7 +979,7 @@ static void fbc_sys_cache_update_config(struct intel_display *display, u32 reg,
* Fixes: SoC hardware issue in read caching
* Workaround: disable cache read setting which is enabled by default.
*/
- if (!intel_display_wa(display, 14025769978))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_14025769978))
/* Cache read enable is set by default */
reg |= FBC_SYS_CACHE_READ_ENABLE;
@@ -1463,7 +1462,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
!intel_fbc_has_fences(display));
if (plane_state->flags & PLANE_HAS_FENCE)
- fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma);
+ fbc_state->fence_id = intel_parent_vma_fence_id(display, plane_state->ggtt_vma);
else
fbc_state->fence_id = -1;
@@ -1490,7 +1489,7 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
*/
return DISPLAY_VER(display) >= 9 ||
(plane_state->flags & PLANE_HAS_FENCE &&
- i915_vma_fence_id(plane_state->ggtt_vma) != -1);
+ intel_parent_vma_fence_id(display, plane_state->ggtt_vma) != -1);
}
static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
@@ -1613,7 +1612,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_wa(display, 16023588340)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16023588340)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1623,7 +1622,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Fixes: Underrun during media decode
* Workaround: Do not enable FBC
*/
- if (intel_display_wa(display, 15018326506)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_15018326506)) {
plane_state->no_fbc_reason = "Wa_15018326506";
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 44f4fcce526e..bdaaf3edba0c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -47,7 +47,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
@@ -343,9 +342,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- drm_dbg_kms(display->drm, "allocated %dx%d fb: 0x%08x\n",
- fb->base.width, fb->base.height,
- i915_ggtt_offset(vma));
+ drm_dbg_kms(display->drm, "allocated %dx%d fb\n", fb->base.width, fb->base.height);
ifbdev->fb = fb;
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index c3202ba141c5..6f84eb6355de 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -17,28 +17,40 @@ u32 intel_fbdev_fb_pitch_align(u32 stride)
return ALIGN(stride, 64);
}
+bool intel_fbdev_fb_prefer_stolen(struct drm_device *drm, unsigned int size)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ /* Skip stolen on MTL as Wa_22018444074 mitigation. */
+ if (IS_METEORLAKE(i915))
+ return false;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ return i915->dsm.usable_size >= size * 2;
+}
+
struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
{
- struct drm_i915_private *dev_priv = to_i915(drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct drm_i915_gem_object *obj;
obj = ERR_PTR(-ENODEV);
- if (HAS_LMEM(dev_priv)) {
- obj = i915_gem_object_create_lmem(dev_priv, size,
+ if (HAS_LMEM(i915)) {
+ obj = i915_gem_object_create_lmem(i915, size,
I915_BO_ALLOC_CONTIGUOUS |
I915_BO_ALLOC_USER);
} else {
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- *
- * Also skip stolen on MTL as Wa_22018444074 mitigation.
- */
- if (!IS_METEORLAKE(dev_priv) && size * 2 < dev_priv->dsm.usable_size)
- obj = i915_gem_object_create_stolen(dev_priv, size);
+ if (intel_fbdev_fb_prefer_stolen(drm, size))
+ obj = i915_gem_object_create_stolen(i915, size);
+ else
+ drm_info(drm, "Allocating fbdev: Stolen memory not preferred.\n");
+
if (IS_ERR(obj))
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_shmem(i915, size);
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index fd0b3775dc1f..34ed2b9c2b4f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -19,5 +19,6 @@ struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size
void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj);
int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
+bool intel_fbdev_fb_prefer_stolen(struct drm_device *drm, unsigned int size);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 5bb0090dd5ed..24ce8a7842c7 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -8,7 +8,6 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index b413b3e871d8..bf047180def9 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -29,7 +29,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
index 1e9550cb66a3..253dc2e96d2d 100644
--- a/drivers/gpu/drm/i915/display/intel_flipq.c
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -12,6 +12,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_dsb.h"
@@ -447,19 +448,11 @@ void intel_flipq_add(struct intel_crtc *crtc,
intel_flipq_sw_dmc_wake(crtc);
}
-/* Wa_18034343758 */
-static bool need_dmc_halt_wa(struct intel_display *display)
-{
- return DISPLAY_VER(display) == 20 ||
- (display->platform.pantherlake &&
- IS_DISPLAY_STEP(display, STEP_A0, STEP_B0));
-}
-
void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- if (need_dmc_halt_wa(display))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_18034343758))
intel_dsb_wait_usec(dsb, 2);
}
@@ -467,6 +460,6 @@ void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- if (need_dmc_halt_wa(display))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_18034343758))
intel_dsb_reg_write(dsb, PIPEDMC_CTL(crtc->pipe), 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 03c4978fa5ec..705742e117ca 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -58,13 +58,13 @@
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
-#include "intel_bo.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_drrs.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_parent.h"
#include "intel_psr.h"
#include "intel_tdf.h"
@@ -123,9 +123,9 @@ void intel_frontbuffer_flip(struct intel_display *display,
frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
-void __intel_fb_invalidate(struct intel_frontbuffer *front,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
+void __intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
{
struct intel_display *display = front->display;
@@ -143,14 +143,14 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
intel_fbc_invalidate(display, frontbuffer_bits, origin);
}
-void __intel_fb_flush(struct intel_frontbuffer *front,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
+void __intel_frontbuffer_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
{
struct intel_display *display = front->display;
if (origin == ORIGIN_DIRTYFB)
- intel_bo_frontbuffer_flush_for_display(front);
+ intel_parent_frontbuffer_flush_for_display(display, front);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -164,18 +164,13 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
frontbuffer_flush(display, frontbuffer_bits, origin);
}
-static void intel_frontbuffer_ref(struct intel_frontbuffer *front)
-{
- intel_bo_frontbuffer_ref(front);
-}
-
static void intel_frontbuffer_flush_work(struct work_struct *work)
{
struct intel_frontbuffer *front =
container_of(work, struct intel_frontbuffer, flush_work);
intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
- intel_frontbuffer_put(front);
+ intel_parent_frontbuffer_put(front->display, front);
}
/**
@@ -190,9 +185,9 @@ void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front)
if (!front)
return;
- intel_frontbuffer_ref(front);
+ intel_parent_frontbuffer_ref(front->display, front);
if (!schedule_work(&front->flush_work))
- intel_frontbuffer_put(front);
+ intel_parent_frontbuffer_put(front->display, front);
}
void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm)
@@ -207,16 +202,6 @@ void intel_frontbuffer_fini(struct intel_frontbuffer *front)
drm_WARN_ON(front->display->drm, atomic_read(&front->bits));
}
-struct intel_frontbuffer *intel_frontbuffer_get(struct drm_gem_object *obj)
-{
- return intel_bo_frontbuffer_get(obj);
-}
-
-void intel_frontbuffer_put(struct intel_frontbuffer *front)
-{
- intel_bo_frontbuffer_put(front);
-}
-
/**
* intel_frontbuffer_track - update frontbuffer tracking
* @old: current buffer for the frontbuffer slots
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 22677acb4c06..a89ce352b12b 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -66,14 +66,9 @@ struct intel_frontbuffer {
void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_put(struct intel_frontbuffer *front);
-
-struct intel_frontbuffer *
-intel_frontbuffer_get(struct drm_gem_object *obj);
-
-void __intel_fb_invalidate(struct intel_frontbuffer *front,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
+void __intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
/**
* intel_frontbuffer_invalidate - invalidate frontbuffer object
@@ -98,13 +93,13 @@ static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
if (!frontbuffer_bits)
return false;
- __intel_fb_invalidate(front, origin, frontbuffer_bits);
+ __intel_frontbuffer_invalidate(front, origin, frontbuffer_bits);
return true;
}
-void __intel_fb_flush(struct intel_frontbuffer *front,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
+void __intel_frontbuffer_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
/**
* intel_frontbuffer_flush - flush frontbuffer object
@@ -126,7 +121,7 @@ static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
if (!frontbuffer_bits)
return;
- __intel_fb_flush(front, origin, frontbuffer_bits);
+ __intel_frontbuffer_flush(front, origin, frontbuffer_bits);
}
void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front);
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index a7bce0c6a17e..df48f27f1cc1 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -35,7 +35,6 @@
#include <drm/drm_print.h>
#include <drm/display/drm_hdcp_helper.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
@@ -251,7 +250,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
preserve_bits |= GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE;
/* Wa_16025573575: the masks bits need to be preserved through out */
- if (intel_display_wa(display, 16025573575))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16025573575))
preserve_bits |= GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK |
GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK;
@@ -343,7 +342,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
- if (intel_display_wa(display, 16025573575))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16025573575))
ptl_handle_mask_bits(bus, true);
set_data(bus, 1);
@@ -364,7 +363,7 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
- if (intel_display_wa(display, 16025573575))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16025573575))
ptl_handle_mask_bits(bus, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 555d35bae887..892eab4b6f92 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -17,8 +17,8 @@
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_jiffies.h"
@@ -33,7 +33,6 @@
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_parent.h"
-#include "intel_pcode.h"
#include "intel_step.h"
#define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
@@ -76,7 +75,6 @@ static int intel_conn_to_vcpi(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_topology_state *mst_state;
- int vcpi = 0;
/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
if (!connector->mst.port)
@@ -87,15 +85,9 @@ static int intel_conn_to_vcpi(struct intel_atomic_state *state,
mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
payload = drm_atomic_get_mst_payload_state(mst_state, connector->mst.port);
if (drm_WARN_ON(mgr->dev, !payload))
- goto out;
+ return 0;
- vcpi = payload->vcpi;
- if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
- vcpi = 0;
- goto out;
- }
-out:
- return vcpi;
+ return payload->vcpi;
}
/*
@@ -398,7 +390,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
* Mailbox interface.
*/
if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
- ret = intel_pcode_write(display->drm, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ ret = intel_parent_pcode_write(display, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(display->drm,
"Failed to initiate HDCP key load (%d)\n",
@@ -2239,7 +2231,7 @@ static void intel_hdcp_check_work(struct work_struct *work)
if (drm_connector_is_unregistered(&connector->base))
return;
- if (!intel_hdcp2_check_link(connector))
+ if (!hdcp->force_hdcp14 && !intel_hdcp2_check_link(connector))
queue_delayed_work(display->wq.unordered, &hdcp->check_work,
DRM_HDCP2_CHECK_PERIOD_MS);
else if (!intel_hdcp_check_link(connector))
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 055e68810d0d..05e898d10a2b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -56,6 +56,7 @@
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dp.h"
+#include "intel_dpll.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdcp_regs.h"
@@ -70,16 +71,14 @@
bool intel_hdmi_is_frl(u32 clock)
{
- switch (clock) {
- case 300000: /* 3 Gbps */
- case 600000: /* 6 Gbps */
- case 800000: /* 8 Gbps */
- case 1000000: /* 10 Gbps */
- case 1200000: /* 12 Gbps */
- return true;
- default:
- return false;
- }
+ u32 rates[] = { 300000, 600000, 800000, 1000000, 1200000 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rates); i++)
+ if (intel_dpll_clock_matches(clock, rates[i]))
+ return true;
+
+ return false;
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 82c39e4ffa37..8865cb2ac569 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -5,7 +5,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.c b/drivers/gpu/drm/i915/display/intel_lt_phy.c
index 6cdae03ee172..eced8493e566 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.c
@@ -5,7 +5,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -14,6 +13,7 @@
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_hdmi.h"
#include "intel_lt_phy.h"
@@ -60,7 +60,6 @@ struct lt_phy_params {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
- .clock = 162000,
.config = {
0x83,
0x2d,
@@ -114,7 +113,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
- .clock = 270000,
.config = {
0x8b,
0x2d,
@@ -168,7 +166,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
- .clock = 540000,
.config = {
0x93,
0x2d,
@@ -222,7 +219,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
- .clock = 810000,
.config = {
0x9b,
0x2d,
@@ -276,7 +272,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
- .clock = 1000000,
.config = {
0x43,
0x2d,
@@ -330,7 +325,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
- .clock = 1350000,
.config = {
0xcb,
0x2d,
@@ -384,7 +378,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
- .clock = 2000000,
.config = {
0x53,
0x2d,
@@ -437,19 +430,35 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
},
};
-static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = {
- &xe3plpd_lt_dp_rbr,
- &xe3plpd_lt_dp_hbr1,
- &xe3plpd_lt_dp_hbr2,
- &xe3plpd_lt_dp_hbr3,
- &xe3plpd_lt_dp_uhbr10,
- &xe3plpd_lt_dp_uhbr13_5,
- &xe3plpd_lt_dp_uhbr20,
- NULL,
+struct intel_lt_phy_pll_params {
+ const char *name;
+ bool is_hdmi;
+ int clock_rate;
+ const struct intel_lt_phy_pll_state *state;
+};
+
+#define __LT_PHY_PLL_PARAMS(__is_hdmi, __clock_rate, __state) { \
+ .name = __stringify(__state), \
+ .is_hdmi = __is_hdmi, \
+ .clock_rate = __clock_rate, \
+ .state = &__state, \
+}
+
+#define LT_PHY_PLL_HDMI_PARAMS(__clock_rate, __state) __LT_PHY_PLL_PARAMS(true, __clock_rate, __state)
+#define LT_PHY_PLL_DP_PARAMS(__clock_rate, __state) __LT_PHY_PLL_PARAMS(false, __clock_rate, __state)
+
+static const struct intel_lt_phy_pll_params xe3plpd_lt_dp_tables[] = {
+ LT_PHY_PLL_DP_PARAMS(162000, xe3plpd_lt_dp_rbr),
+ LT_PHY_PLL_DP_PARAMS(270000, xe3plpd_lt_dp_hbr1),
+ LT_PHY_PLL_DP_PARAMS(540000, xe3plpd_lt_dp_hbr2),
+ LT_PHY_PLL_DP_PARAMS(810000, xe3plpd_lt_dp_hbr3),
+ LT_PHY_PLL_DP_PARAMS(1000000, xe3plpd_lt_dp_uhbr10),
+ LT_PHY_PLL_DP_PARAMS(1350000, xe3plpd_lt_dp_uhbr13_5),
+ LT_PHY_PLL_DP_PARAMS(2000000, xe3plpd_lt_dp_uhbr20),
+ {}
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
- .clock = 216000,
.config = {
0xa3,
0x2d,
@@ -503,7 +512,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
- .clock = 243000,
.config = {
0xab,
0x2d,
@@ -557,7 +565,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
- .clock = 324000,
.config = {
0xb3,
0x2d,
@@ -611,7 +618,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
- .clock = 432000,
.config = {
0xbb,
0x2d,
@@ -665,7 +671,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
- .clock = 675000,
.config = {
0xdb,
0x2d,
@@ -718,21 +723,20 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
},
};
-static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = {
- &xe3plpd_lt_dp_rbr,
- &xe3plpd_lt_edp_2_16,
- &xe3plpd_lt_edp_2_43,
- &xe3plpd_lt_dp_hbr1,
- &xe3plpd_lt_edp_3_24,
- &xe3plpd_lt_edp_4_32,
- &xe3plpd_lt_dp_hbr2,
- &xe3plpd_lt_edp_6_75,
- &xe3plpd_lt_dp_hbr3,
- NULL,
+static const struct intel_lt_phy_pll_params xe3plpd_lt_edp_tables[] = {
+ LT_PHY_PLL_DP_PARAMS(162000, xe3plpd_lt_dp_rbr),
+ LT_PHY_PLL_DP_PARAMS(216000, xe3plpd_lt_edp_2_16),
+ LT_PHY_PLL_DP_PARAMS(243000, xe3plpd_lt_edp_2_43),
+ LT_PHY_PLL_DP_PARAMS(270000, xe3plpd_lt_dp_hbr1),
+ LT_PHY_PLL_DP_PARAMS(324000, xe3plpd_lt_edp_3_24),
+ LT_PHY_PLL_DP_PARAMS(432000, xe3plpd_lt_edp_4_32),
+ LT_PHY_PLL_DP_PARAMS(540000, xe3plpd_lt_dp_hbr2),
+ LT_PHY_PLL_DP_PARAMS(675000, xe3plpd_lt_edp_6_75),
+ LT_PHY_PLL_DP_PARAMS(810000, xe3plpd_lt_dp_hbr3),
+ {}
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
- .clock = 25200,
.config = {
0x84,
0x2d,
@@ -785,62 +789,7 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
},
};
-static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = {
- .clock = 27200,
- .config = {
- 0x84,
- 0x2d,
- 0x0,
- },
- .addr_msb = {
- 0x87,
- 0x87,
- 0x87,
- 0x87,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- },
- .addr_lsb = {
- 0x10,
- 0x0c,
- 0x14,
- 0xe4,
- 0x0c,
- 0x10,
- 0x14,
- 0x18,
- 0x48,
- 0x40,
- 0x4c,
- 0x24,
- 0x44,
- },
- .data = {
- { 0x0, 0x4c, 0x2, 0x0 },
- { 0x0b, 0x15, 0x26, 0xa0 },
- { 0x60, 0x0, 0x0, 0x0 },
- { 0x8, 0x4, 0x96, 0x28 },
- { 0xfa, 0x0c, 0x84, 0x11 },
- { 0x80, 0x0f, 0xd9, 0x53 },
- { 0x86, 0x0, 0x0, 0x0 },
- { 0x1, 0xa0, 0x1, 0x0 },
- { 0x4b, 0x0, 0x0, 0x0 },
- { 0x28, 0x0, 0x0, 0x0 },
- { 0x0, 0x14, 0x2a, 0x14 },
- { 0x0, 0x0, 0x0, 0x0 },
- { 0x0, 0x0, 0x0, 0x0 },
- },
-};
-
static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
- .clock = 74250,
.config = {
0x84,
0x2d,
@@ -894,7 +843,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
- .clock = 148500,
.config = {
0x84,
0x2d,
@@ -948,7 +896,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
- .clock = 594000,
.config = {
0x84,
0x2d,
@@ -1001,13 +948,12 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
},
};
-static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = {
- &xe3plpd_lt_hdmi_252,
- &xe3plpd_lt_hdmi_272,
- &xe3plpd_lt_hdmi_742p5,
- &xe3plpd_lt_hdmi_1p485,
- &xe3plpd_lt_hdmi_5p94,
- NULL,
+static const struct intel_lt_phy_pll_params xe3plpd_lt_hdmi_tables[] = {
+ LT_PHY_PLL_HDMI_PARAMS(25200, xe3plpd_lt_hdmi_252),
+ LT_PHY_PLL_HDMI_PARAMS(74250, xe3plpd_lt_hdmi_742p5),
+ LT_PHY_PLL_HDMI_PARAMS(148500, xe3plpd_lt_hdmi_1p485),
+ LT_PHY_PLL_HDMI_PARAMS(594000, xe3plpd_lt_hdmi_5p94),
+ {}
};
static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder)
@@ -1106,7 +1052,7 @@ static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder,
* This is the time PHY takes to settle down after programming the PHY.
*/
udelay(150);
- intel_clear_response_ready_flag(encoder, lane);
+ intel_cx0_clear_response_ready_flag(encoder, lane);
intel_lt_phy_clear_status_p2p(encoder, lane);
return 0;
@@ -1346,7 +1292,7 @@ static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, struct r
intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
-static const struct intel_lt_phy_pll_state * const *
+static const struct intel_lt_phy_pll_params *
intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -1680,7 +1626,8 @@ intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
}
static int
-intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
+intel_lt_phy_calc_hdmi_port_clock(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *lt_state)
{
#define REGVAL(i) ( \
(lt_state->data[i][3]) | \
@@ -1689,9 +1636,6 @@ intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
(lt_state->data[i][0] << 24) \
)
- struct intel_display *display = to_intel_display(crtc_state);
- const struct intel_lt_phy_pll_state *lt_state =
- &crtc_state->dpll_hw_state.ltpll;
int clk = 0;
u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int;
u64 temp0, temp1;
@@ -1737,7 +1681,7 @@ intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
if (d8 == 0) {
drm_WARN_ON(display->drm,
"Invalid port clock using lowest HDMI portclock\n");
- return xe3plpd_lt_hdmi_252.clock;
+ return xe3plpd_lt_hdmi_tables[0].clock_rate;
}
m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5;
temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32;
@@ -1749,13 +1693,10 @@ intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
}
int
-intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+intel_lt_phy_calc_port_clock(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *lt_state)
{
- struct intel_display *display = to_intel_display(encoder);
int clk;
- const struct intel_lt_phy_pll_state *lt_state =
- &crtc_state->dpll_hw_state.ltpll;
u8 mode, rate;
mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK,
@@ -1771,10 +1712,10 @@ intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
lt_state->config[0]);
clk = intel_lt_phy_get_dp_clock(rate);
} else if (mode == MODE_HDMI_20) {
- clk = intel_lt_phy_calc_hdmi_port_clock(crtc_state);
+ clk = intel_lt_phy_calc_hdmi_port_clock(display, lt_state);
} else {
drm_WARN_ON(display->drm, "Unsupported LT PHY Mode!\n");
- clk = xe3plpd_lt_hdmi_252.clock;
+ clk = 25200;
}
return clk;
@@ -1784,16 +1725,20 @@ int
intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_lt_phy_pll_state * const *tables;
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct intel_lt_phy_pll_params *tables;
int i;
tables = intel_lt_phy_pll_tables_get(crtc_state, encoder);
if (!tables)
return -EINVAL;
- for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->dpll_hw_state.ltpll = *tables[i];
+ for (i = 0; tables[i].name; i++) {
+ int clock = intel_lt_phy_calc_port_clock(display, tables[i].state);
+
+ drm_WARN_ON(display->drm, !intel_dpll_clock_matches(clock, tables[i].clock_rate));
+ if (intel_dpll_clock_matches(crtc_state->port_clock, clock)) {
+ crtc_state->dpll_hw_state.ltpll = *tables[i].state;
if (intel_crtc_has_dp_encoder(crtc_state)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
crtc_state->dpll_hw_state.ltpll.config[2] = 1;
@@ -2212,6 +2157,9 @@ bool
intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
const struct intel_lt_phy_pll_state *b)
{
+ if (a->tbt_mode || b->tbt_mode)
+ return true;
+
/*
* With LT PHY values other than VDR0_CONFIG and VDR2_CONFIG are
* unreliable. They cannot always be read back since internally
@@ -2254,8 +2202,6 @@ void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
LT_PHY_VDR_X_DATAY(i, j));
}
- pll_state->clock =
- intel_lt_phy_calc_port_clock(encoder, crtc_state);
intel_lt_phy_transaction_end(encoder, wakeref);
}
@@ -2319,3 +2265,66 @@ void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
intel_lt_phy_pll_disable(encoder);
}
+
+static void intel_lt_phy_pll_verify_clock(struct intel_display *display,
+ int precomputed_clock,
+ const char *pll_state_name,
+ const struct intel_lt_phy_pll_state *pll_state,
+ bool is_precomputed_state)
+{
+ struct drm_printer p;
+ int clock;
+
+ clock = intel_lt_phy_calc_port_clock(display, pll_state);
+
+ if (intel_dpll_clock_matches(clock, precomputed_clock))
+ return;
+
+ drm_warn(display->drm,
+ "PLL state %s (%s): clock difference too high: computed %d, pre-computed %d\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed",
+ clock, precomputed_clock);
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "PLL state %s (%s):\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed");
+ intel_lt_phy_dump_hw_state(display, pll_state);
+}
+
+static void intel_lt_phy_pll_verify_params(struct intel_display *display,
+ const struct intel_lt_phy_pll_params *pll_params)
+{
+ struct intel_lt_phy_pll_state pll_state;
+
+ intel_lt_phy_pll_verify_clock(display, pll_params->clock_rate, pll_params->name, pll_params->state, true);
+
+ if (!pll_params->is_hdmi)
+ return;
+
+ if (intel_lt_phy_calculate_hdmi_state(&pll_state, pll_params->clock_rate) != 0)
+ return;
+
+ intel_lt_phy_pll_verify_clock(display, pll_params->clock_rate, pll_params->name, &pll_state, false);
+}
+
+static void intel_lt_phy_pll_verify_tables(struct intel_display *display,
+ const struct intel_lt_phy_pll_params *tables)
+{
+ int i;
+
+ for (i = 0; tables[i].name; i++)
+ intel_lt_phy_pll_verify_params(display, &tables[i]);
+}
+
+void intel_lt_phy_verify_plls(struct intel_display *display)
+{
+ intel_lt_phy_pll_verify_tables(display, xe3plpd_lt_dp_tables);
+ intel_lt_phy_pll_verify_tables(display, xe3plpd_lt_edp_tables);
+ intel_lt_phy_pll_verify_tables(display, xe3plpd_lt_hdmi_tables);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.h b/drivers/gpu/drm/i915/display/intel_lt_phy.h
index bf41858f1bc3..db905668f86d 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.h
@@ -21,8 +21,8 @@ void intel_lt_phy_pll_disable(struct intel_encoder *encoder);
int
intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
-int intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
+int intel_lt_phy_calc_port_clock(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *lt_state);
void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_lt_phy_dump_hw_state(struct intel_display *display,
@@ -41,5 +41,6 @@ intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_xe3plpd_pll_disable(struct intel_encoder *encoder);
+void intel_lt_phy_verify_plls(struct intel_display *display);
#endif /* __INTEL_LT_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
index 37e46fb9abde..ff6d7829dbb9 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_LT_PHY_REGS_H__
#define __INTEL_LT_PHY_REGS_H__
-#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500
+#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500
#define XE3PLPD_MACCLK_TURNON_LATENCY_MS 2
-#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 1
+#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 10
#define XE3PLPD_RATE_CALIB_DONE_LATENCY_MS 1
-#define XE3PLPD_RESET_START_LATENCY_US 10
-#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 4
+#define XE3PLPD_RESET_START_LATENCY_US 10
+#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 10
#define XE3PLPD_RESET_END_LATENCY_MS 2
/* LT Phy MAC Register */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index d10cbf69a5f8..4086f16a12bf 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -11,7 +11,6 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
@@ -26,6 +25,7 @@
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_fifo_underrun.h"
#include "intel_modeset_setup.h"
@@ -914,7 +914,7 @@ static void intel_early_display_was(struct intel_display *display)
* Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
- if (IS_DISPLAY_VER(display, 10, 12))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010480278))
intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_oprom_regs.h b/drivers/gpu/drm/i915/display/intel_oprom_regs.h
new file mode 100644
index 000000000000..e6a6fb51b90c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_oprom_regs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_OPROM_REGS_H_
+#define _INTEL_OPROM_REGS_H_
+
+#define PRIMARY_SPI_TRIGGER _MMIO(0x102040)
+#define PRIMARY_SPI_ADDRESS _MMIO(0x102080)
+#define PRIMARY_SPI_REGIONID _MMIO(0x102084)
+#define SPI_STATIC_REGIONS _MMIO(0x102090)
+#define OPTIONROM_SPI_REGIONID_MASK REG_GENMASK(7, 0)
+#define OROM_OFFSET _MMIO(0x1020c0)
+#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 05c7545c49e5..12a325ceae6f 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,24 +27,16 @@
*/
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
#include <drm/drm_print.h>
-#include "gem/i915_gem_internal.h"
-#include "gem/i915_gem_object_frontbuffer.h"
-#include "gem/i915_gem_pm.h"
-
-#include "gt/intel_gpu_commands.h"
-#include "gt/intel_ring.h"
-
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
-#include "intel_pci_config.h"
+#include "intel_parent.h"
#include "intel_pfit_regs.h"
/* Limits for overlay size. According to intel doc, the real limits are:
@@ -121,9 +113,6 @@
#define RGB8I_TO_COLORKEY(c) \
((((c) & 0xff) << 16) | (((c) & 0xff) << 8) | (((c) & 0xff) << 0))
-/* overlay flip addr flag */
-#define OFC_UPDATE 0x1
-
/* polyphase filter coefficients */
#define N_HORIZ_Y_TAPS 5
#define N_VERT_Y_TAPS 3
@@ -189,312 +178,16 @@ struct overlay_registers {
struct intel_overlay {
struct intel_display *display;
- struct intel_context *context;
struct intel_crtc *crtc;
- struct i915_vma *vma;
- struct i915_vma *old_vma;
- struct intel_frontbuffer *frontbuffer;
- bool active;
bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
u32 color_key:24;
u32 color_key_enabled:1;
u32 brightness, contrast, saturation;
u32 old_xscale, old_yscale;
- /* register access */
- struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
- u32 flip_addr;
- /* flip handling */
- struct i915_active last_flip;
- void (*flip_complete)(struct intel_overlay *ovl);
};
-static void i830_overlay_clock_gating(struct intel_display *display,
- bool enable)
-{
- struct pci_dev *pdev = to_pci_dev(display->drm->dev);
- u8 val;
-
- /* WA_OVERLAY_CLKGATE:alm */
- if (enable)
- intel_de_write(display, DSPCLK_GATE_D, 0);
- else
- intel_de_write(display, DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-
- /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
- pci_bus_read_config_byte(pdev->bus,
- PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
- if (enable)
- val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
- else
- val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
- pci_bus_write_config_byte(pdev->bus,
- PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
-}
-
-static struct i915_request *
-alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
-{
- struct i915_request *rq;
- int err;
-
- overlay->flip_complete = fn;
-
- rq = i915_request_create(overlay->context);
- if (IS_ERR(rq))
- return rq;
-
- err = i915_active_add_request(&overlay->last_flip, rq);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-/* overlay needs to be disable in OCMD reg */
-static int intel_overlay_on(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
- struct i915_request *rq;
- u32 *cs;
-
- drm_WARN_ON(display->drm, overlay->active);
-
- rq = alloc_request(overlay, NULL);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- overlay->active = true;
-
- if (display->platform.i830)
- i830_overlay_clock_gating(display, false);
-
- *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
- *cs++ = overlay->flip_addr | OFC_UPDATE;
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- i915_request_add(rq);
-
- return i915_active_wait(&overlay->last_flip);
-}
-
-static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
- struct i915_vma *vma)
-{
- struct intel_display *display = overlay->display;
- enum pipe pipe = overlay->crtc->pipe;
- struct intel_frontbuffer *frontbuffer = NULL;
-
- drm_WARN_ON(display->drm, overlay->old_vma);
-
- if (vma)
- frontbuffer = intel_frontbuffer_get(intel_bo_to_drm_bo(vma->obj));
-
- intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
-
- if (overlay->frontbuffer)
- intel_frontbuffer_put(overlay->frontbuffer);
- overlay->frontbuffer = frontbuffer;
-
- overlay->old_vma = overlay->vma;
- if (vma)
- overlay->vma = i915_vma_get(vma);
- else
- overlay->vma = NULL;
-}
-
-/* overlay needs to be enabled in OCMD reg */
-static int intel_overlay_continue(struct intel_overlay *overlay,
- struct i915_vma *vma,
- bool load_polyphase_filter)
-{
- struct intel_display *display = overlay->display;
- struct i915_request *rq;
- u32 flip_addr = overlay->flip_addr;
- u32 tmp, *cs;
-
- drm_WARN_ON(display->drm, !overlay->active);
-
- if (load_polyphase_filter)
- flip_addr |= OFC_UPDATE;
-
- /* check for underruns */
- tmp = intel_de_read(display, DOVSTA);
- if (tmp & (1 << 17))
- drm_dbg(display->drm, "overlay underrun, DOVSTA: %x\n", tmp);
-
- rq = alloc_request(overlay, NULL);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
- *cs++ = flip_addr;
- intel_ring_advance(rq, cs);
-
- intel_overlay_flip_prepare(overlay, vma);
- i915_request_add(rq);
-
- return 0;
-}
-
-static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
- struct i915_vma *vma;
-
- vma = fetch_and_zero(&overlay->old_vma);
- if (drm_WARN_ON(display->drm, !vma))
- return;
-
- intel_frontbuffer_flip(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
-
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
-
-static void
-intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
-{
- intel_overlay_release_old_vma(overlay);
-}
-
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
-
- intel_overlay_release_old_vma(overlay);
-
- overlay->crtc->overlay = NULL;
- overlay->crtc = NULL;
- overlay->active = false;
-
- if (display->platform.i830)
- i830_overlay_clock_gating(display, true);
-}
-
-static void intel_overlay_last_flip_retire(struct i915_active *active)
-{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
-
- if (overlay->flip_complete)
- overlay->flip_complete(overlay);
-}
-
-/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
- struct i915_request *rq;
- u32 *cs, flip_addr = overlay->flip_addr;
-
- drm_WARN_ON(display->drm, !overlay->active);
-
- /*
- * According to intel docs the overlay hw may hang (when switching
- * off) without loading the filter coeffs. It is however unclear whether
- * this applies to the disabling of the overlay or to the switching off
- * of the hw. Do it in both cases.
- */
- flip_addr |= OFC_UPDATE;
-
- rq = alloc_request(overlay, intel_overlay_off_tail);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- /* wait for overlay to go idle */
- *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
- *cs++ = flip_addr;
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-
- /* turn overlay off */
- *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
- *cs++ = flip_addr;
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-
- intel_ring_advance(rq, cs);
-
- intel_overlay_flip_prepare(overlay, NULL);
- i915_request_add(rq);
-
- return i915_active_wait(&overlay->last_flip);
-}
-
-/*
- * Recover from an interruption due to a signal.
- * We have to be careful not to repeat work forever an make forward progress.
- */
-static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
-{
- return i915_active_wait(&overlay->last_flip);
-}
-
-/*
- * Wait for pending overlay flip and release old frame.
- * Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs.
- */
-static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
- struct i915_request *rq;
- u32 *cs;
-
- /*
- * Only wait if there is actually an old frame to release to
- * guarantee forward progress.
- */
- if (!overlay->old_vma)
- return 0;
-
- if (!(intel_de_read(display, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
- intel_overlay_release_old_vid_tail(overlay);
- return 0;
- }
-
- rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- i915_request_add(rq);
-
- return i915_active_wait(&overlay->last_flip);
-}
-
void intel_overlay_reset(struct intel_display *display)
{
struct intel_overlay *overlay = display->overlay;
@@ -505,7 +198,8 @@ void intel_overlay_reset(struct intel_display *display)
overlay->old_xscale = 0;
overlay->old_yscale = 0;
overlay->crtc = NULL;
- overlay->active = false;
+
+ intel_parent_overlay_reset(display);
}
static int packed_depth_bytes(u32 format)
@@ -767,34 +461,8 @@ static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
return cmd;
}
-static struct i915_vma *intel_overlay_pin_fb(struct drm_i915_gem_object *new_bo)
-{
- struct i915_gem_ww_ctx ww;
- struct i915_vma *vma;
- int ret;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- ret = i915_gem_object_lock(new_bo, &ww);
- if (!ret) {
- vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0, 0,
- NULL, PIN_MAPPABLE);
- ret = PTR_ERR_OR_ZERO(vma);
- }
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
- if (ret)
- return ERR_PTR(ret);
-
- return vma;
-}
-
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_i915_gem_object *new_bo,
+ struct drm_gem_object *obj,
struct drm_intel_overlay_put_image *params)
{
struct intel_display *display = overlay->display;
@@ -804,23 +472,24 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
bool scale_changed = false;
struct i915_vma *vma;
int ret, tmp_width;
+ u32 tmp, offset;
drm_WARN_ON(display->drm,
!drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
- ret = intel_overlay_release_old_vid(overlay);
+ ret = intel_parent_overlay_release_old_vid(display);
if (ret != 0)
return ret;
atomic_inc(&display->restore.pending_fb_pin);
- vma = intel_overlay_pin_fb(new_bo);
+ vma = intel_parent_overlay_pin_fb(display, obj, &offset);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
}
- if (!overlay->active) {
+ if (!intel_parent_overlay_is_active(display)) {
const struct intel_crtc_state *crtc_state =
overlay->crtc->config;
u32 oconfig = 0;
@@ -836,7 +505,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
- ret = intel_overlay_on(overlay);
+ ret = intel_parent_overlay_on(display, INTEL_FRONTBUFFER_OVERLAY(pipe));
if (ret != 0)
goto out_unpin;
}
@@ -853,7 +522,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_width;
swidthsw = calc_swidthsw(display, params->offset_Y, tmp_width);
sheight = params->src_height;
- iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
+ iowrite32(offset + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
if (params->flags & I915_OVERLAY_YUV_PLANAR) {
@@ -870,9 +539,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_width / uv_hscale);
swidthsw |= max(tmp_U, tmp_V) << 16;
- iowrite32(i915_ggtt_offset(vma) + params->offset_U,
+ iowrite32(offset + params->offset_U,
&regs->OBUF_0U);
- iowrite32(i915_ggtt_offset(vma) + params->offset_V,
+ iowrite32(offset + params->offset_V,
&regs->OBUF_0V);
ostride |= params->stride_UV << 16;
@@ -889,14 +558,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
iowrite32(overlay_cmd_reg(params), &regs->OCMD);
- ret = intel_overlay_continue(overlay, vma, scale_changed);
+ /* check for underruns */
+ tmp = intel_de_read(display, DOVSTA);
+ if (tmp & (1 << 17))
+ drm_dbg(display->drm, "overlay underrun, DOVSTA: %x\n", tmp);
+
+ ret = intel_parent_overlay_continue(display, vma, scale_changed);
if (ret)
goto out_unpin;
return 0;
out_unpin:
- i915_vma_unpin(vma);
+ intel_parent_overlay_unpin_fb(display, vma);
out_pin_section:
atomic_dec(&display->restore.pending_fb_pin);
@@ -911,20 +585,23 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
drm_WARN_ON(display->drm,
!drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
- ret = intel_overlay_recover_from_interrupt(overlay);
+ ret = intel_parent_overlay_recover_from_interrupt(display);
if (ret != 0)
return ret;
- if (!overlay->active)
+ if (!intel_parent_overlay_is_active(display))
return 0;
- ret = intel_overlay_release_old_vid(overlay);
+ ret = intel_parent_overlay_release_old_vid(display);
if (ret != 0)
return ret;
iowrite32(0, &overlay->regs->OCMD);
- return intel_overlay_off(overlay);
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+
+ return intel_parent_overlay_off(display);
}
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
@@ -1006,7 +683,7 @@ static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
static int check_overlay_src(struct intel_display *display,
struct drm_intel_overlay_put_image *rec,
- struct drm_i915_gem_object *new_bo)
+ struct drm_gem_object *obj)
{
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1091,7 +768,7 @@ static int check_overlay_src(struct intel_display *display,
return -EINVAL;
tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->base.size)
+ if (rec->offset_Y + tmp > obj->size)
return -EINVAL;
break;
@@ -1102,12 +779,12 @@ static int check_overlay_src(struct intel_display *display,
return -EINVAL;
tmp = rec->stride_Y * rec->src_height;
- if (rec->offset_Y + tmp > new_bo->base.size)
+ if (rec->offset_Y + tmp > obj->size)
return -EINVAL;
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
- if (rec->offset_U + tmp > new_bo->base.size ||
- rec->offset_V + tmp > new_bo->base.size)
+ if (rec->offset_U + tmp > obj->size ||
+ rec->offset_V + tmp > obj->size)
return -EINVAL;
break;
}
@@ -1122,8 +799,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_intel_overlay_put_image *params = data;
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
+ struct drm_gem_object *obj;
struct intel_crtc *crtc;
- struct drm_i915_gem_object *new_bo;
int ret;
overlay = display->overlay;
@@ -1145,20 +822,13 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
crtc = to_intel_crtc(drmmode_crtc);
- new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
- if (!new_bo)
- return -ENOENT;
+ obj = intel_parent_overlay_obj_lookup(display, file_priv, params->bo_handle);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
drm_modeset_lock_all(dev);
- if (i915_gem_object_is_tiled(new_bo)) {
- drm_dbg_kms(display->drm,
- "buffer used for overlay image can not be tiled\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- ret = intel_overlay_recover_from_interrupt(overlay);
+ ret = intel_parent_overlay_recover_from_interrupt(display);
if (ret != 0)
goto out_unlock;
@@ -1201,7 +871,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = check_overlay_src(display, params, new_bo);
+ ret = check_overlay_src(display, params, obj);
if (ret != 0)
goto out_unlock;
@@ -1210,18 +880,18 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
goto out_unlock;
- ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ ret = intel_overlay_do_put_image(overlay, obj, params);
if (ret != 0)
goto out_unlock;
drm_modeset_unlock_all(dev);
- i915_gem_object_put(new_bo);
+ drm_gem_object_put(obj);
return 0;
out_unlock:
drm_modeset_unlock_all(dev);
- i915_gem_object_put(new_bo);
+ drm_gem_object_put(obj);
return ret;
}
@@ -1328,7 +998,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
if (DISPLAY_VER(display) == 2)
goto out_unlock;
- if (overlay->active) {
+ if (intel_parent_overlay_is_active(display)) {
ret = -EBUSY;
goto out_unlock;
}
@@ -1354,80 +1024,31 @@ out_unlock:
return ret;
}
-static int get_registers(struct intel_overlay *overlay, bool use_phys)
-{
- struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV);
- struct i915_vma *vma;
- int err;
-
- if (!display->platform.meteorlake) /* Wa_22018444074 */
- obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_put_bo;
- }
-
- if (use_phys)
- overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
- else
- overlay->flip_addr = i915_ggtt_offset(vma);
- overlay->regs = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
-
- if (IS_ERR(overlay->regs)) {
- err = PTR_ERR(overlay->regs);
- goto err_put_bo;
- }
-
- overlay->reg_bo = obj;
- return 0;
-
-err_put_bo:
- i915_gem_object_put(obj);
- return err;
-}
-
void intel_overlay_setup(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_overlay *overlay;
- struct intel_engine_cs *engine;
- int ret;
+ void __iomem *regs;
if (!HAS_OVERLAY(display))
return;
- engine = to_gt(dev_priv)->engine[RCS0];
- if (!engine || !engine->kernel_context)
- return;
-
overlay = kzalloc_obj(*overlay);
if (!overlay)
return;
+ regs = intel_parent_overlay_setup(display,
+ OVERLAY_NEEDS_PHYSICAL(display));
+ if (IS_ERR(regs))
+ goto out_free;
+
overlay->display = display;
- overlay->context = engine->kernel_context;
+ overlay->regs = regs;
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
overlay->brightness = -19;
overlay->contrast = 75;
overlay->saturation = 146;
- i915_active_init(&overlay->last_flip,
- NULL, intel_overlay_last_flip_retire, 0);
-
- ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(display));
- if (ret)
- goto out_free;
-
memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
@@ -1447,110 +1068,11 @@ bool intel_overlay_available(struct intel_display *display)
void intel_overlay_cleanup(struct intel_display *display)
{
- struct intel_overlay *overlay;
-
- overlay = fetch_and_zero(&display->overlay);
- if (!overlay)
+ if (!display->overlay)
return;
- /*
- * The bo's should be free'd by the generic code already.
- * Furthermore modesetting teardown happens beforehand so the
- * hardware should be off already.
- */
- drm_WARN_ON(display->drm, overlay->active);
-
- i915_gem_object_put(overlay->reg_bo);
- i915_active_fini(&overlay->last_flip);
+ intel_parent_overlay_cleanup(display);
- kfree(overlay);
+ kfree(display->overlay);
+ display->overlay = NULL;
}
-
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-struct intel_overlay_snapshot {
- struct overlay_registers regs;
- unsigned long base;
- u32 dovsta;
- u32 isr;
-};
-
-struct intel_overlay_snapshot *
-intel_overlay_snapshot_capture(struct intel_display *display)
-{
- struct intel_overlay *overlay = display->overlay;
- struct intel_overlay_snapshot *error;
-
- if (!overlay || !overlay->active)
- return NULL;
-
- error = kmalloc_obj(*error, GFP_ATOMIC);
- if (error == NULL)
- return NULL;
-
- error->dovsta = intel_de_read(display, DOVSTA);
- error->isr = intel_de_read(display, GEN2_ISR);
- error->base = overlay->flip_addr;
-
- memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
-
- return error;
-}
-
-void
-intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
- struct drm_printer *p)
-{
- if (!error)
- return;
-
- drm_printf(p, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
- error->dovsta, error->isr);
- drm_printf(p, " Register file at 0x%08lx:\n", error->base);
-
-#define P(x) drm_printf(p, " " #x ": 0x%08x\n", error->regs.x)
- P(OBUF_0Y);
- P(OBUF_1Y);
- P(OBUF_0U);
- P(OBUF_0V);
- P(OBUF_1U);
- P(OBUF_1V);
- P(OSTRIDE);
- P(YRGB_VPH);
- P(UV_VPH);
- P(HORZ_PH);
- P(INIT_PHS);
- P(DWINPOS);
- P(DWINSZ);
- P(SWIDTH);
- P(SWIDTHSW);
- P(SHEIGHT);
- P(YRGBSCALE);
- P(UVSCALE);
- P(OCLRC0);
- P(OCLRC1);
- P(DCLRKV);
- P(DCLRKM);
- P(SCLRKVH);
- P(SCLRKVL);
- P(SCLRKEN);
- P(OCONFIG);
- P(OCMD);
- P(OSTART_0Y);
- P(OSTART_1Y);
- P(OSTART_0U);
- P(OSTART_0V);
- P(OSTART_1U);
- P(OSTART_1V);
- P(OTILEOFF_0Y);
- P(OTILEOFF_1Y);
- P(OTILEOFF_0U);
- P(OTILEOFF_0V);
- P(OTILEOFF_1U);
- P(OTILEOFF_1V);
- P(FASTHSCALE);
- P(UVSCALEV);
-#undef P
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
index d259e4c74b03..a4291d6dd528 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.h
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -13,9 +13,7 @@ struct drm_file;
struct drm_printer;
struct intel_display;
struct intel_overlay;
-struct intel_overlay_snapshot;
-#ifdef I915
void intel_overlay_setup(struct intel_display *display);
bool intel_overlay_available(struct intel_display *display);
void intel_overlay_cleanup(struct intel_display *display);
@@ -25,51 +23,5 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_overlay_reset(struct intel_display *display);
-#else
-static inline void intel_overlay_setup(struct intel_display *display)
-{
-}
-static inline bool intel_overlay_available(struct intel_display *display)
-{
- return false;
-}
-static inline void intel_overlay_cleanup(struct intel_display *display)
-{
-}
-static inline int intel_overlay_switch_off(struct intel_overlay *overlay)
-{
- return 0;
-}
-static inline int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-static inline int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-static inline void intel_overlay_reset(struct intel_display *display)
-{
-}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) && defined(I915)
-struct intel_overlay_snapshot *
-intel_overlay_snapshot_capture(struct intel_display *display);
-void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
- struct drm_printer *p);
-#else
-static inline struct intel_overlay_snapshot *
-intel_overlay_snapshot_capture(struct intel_display *display)
-{
- return NULL;
-}
-static inline void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
- struct drm_printer *p)
-{
-}
-#endif
#endif /* __INTEL_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_parent.c b/drivers/gpu/drm/i915/display/intel_parent.c
index 72ae553f79a4..2e3bad2b3e6b 100644
--- a/drivers/gpu/drm/i915/display/intel_parent.c
+++ b/drivers/gpu/drm/i915/display/intel_parent.c
@@ -23,6 +23,55 @@
#include "intel_display_core.h"
#include "intel_parent.h"
+/* dpt */
+struct intel_dpt *intel_parent_dpt_create(struct intel_display *display,
+ struct drm_gem_object *obj, size_t size)
+{
+ if (display->parent->dpt)
+ return display->parent->dpt->create(obj, size);
+
+ return NULL;
+}
+
+void intel_parent_dpt_destroy(struct intel_display *display, struct intel_dpt *dpt)
+{
+ if (display->parent->dpt)
+ display->parent->dpt->destroy(dpt);
+}
+
+void intel_parent_dpt_suspend(struct intel_display *display, struct intel_dpt *dpt)
+{
+ if (display->parent->dpt)
+ display->parent->dpt->suspend(dpt);
+}
+
+void intel_parent_dpt_resume(struct intel_display *display, struct intel_dpt *dpt)
+{
+ if (display->parent->dpt)
+ display->parent->dpt->resume(dpt);
+}
+
+/* frontbuffer */
+struct intel_frontbuffer *intel_parent_frontbuffer_get(struct intel_display *display, struct drm_gem_object *obj)
+{
+ return display->parent->frontbuffer->get(obj);
+}
+
+void intel_parent_frontbuffer_ref(struct intel_display *display, struct intel_frontbuffer *front)
+{
+ display->parent->frontbuffer->ref(front);
+}
+
+void intel_parent_frontbuffer_put(struct intel_display *display, struct intel_frontbuffer *front)
+{
+ display->parent->frontbuffer->put(front);
+}
+
+void intel_parent_frontbuffer_flush_for_display(struct intel_display *display, struct intel_frontbuffer *front)
+{
+ display->parent->frontbuffer->flush_for_display(front);
+}
+
/* hdcp */
ssize_t intel_parent_hdcp_gsc_msg_send(struct intel_display *display,
struct intel_hdcp_gsc_context *gsc_context,
@@ -59,6 +108,82 @@ void intel_parent_irq_synchronize(struct intel_display *display)
display->parent->irq->synchronize(display->drm);
}
+/* overlay */
+bool intel_parent_overlay_is_active(struct intel_display *display)
+{
+ return display->parent->overlay->is_active(display->drm);
+}
+
+int intel_parent_overlay_on(struct intel_display *display,
+ u32 frontbuffer_bits)
+{
+ return display->parent->overlay->overlay_on(display->drm,
+ frontbuffer_bits);
+}
+
+int intel_parent_overlay_continue(struct intel_display *display,
+ struct i915_vma *vma,
+ bool load_polyphase_filter)
+{
+ return display->parent->overlay->overlay_continue(display->drm, vma,
+ load_polyphase_filter);
+}
+
+int intel_parent_overlay_off(struct intel_display *display)
+{
+ return display->parent->overlay->overlay_off(display->drm);
+}
+
+int intel_parent_overlay_recover_from_interrupt(struct intel_display *display)
+{
+ return display->parent->overlay->recover_from_interrupt(display->drm);
+}
+
+int intel_parent_overlay_release_old_vid(struct intel_display *display)
+{
+ return display->parent->overlay->release_old_vid(display->drm);
+}
+
+void intel_parent_overlay_reset(struct intel_display *display)
+{
+ display->parent->overlay->reset(display->drm);
+}
+
+struct i915_vma *intel_parent_overlay_pin_fb(struct intel_display *display,
+ struct drm_gem_object *obj,
+ u32 *offset)
+{
+ return display->parent->overlay->pin_fb(display->drm, obj, offset);
+}
+
+void intel_parent_overlay_unpin_fb(struct intel_display *display,
+ struct i915_vma *vma)
+{
+ return display->parent->overlay->unpin_fb(display->drm, vma);
+}
+
+struct drm_gem_object *intel_parent_overlay_obj_lookup(struct intel_display *display,
+ struct drm_file *filp,
+ u32 handle)
+{
+ return display->parent->overlay->obj_lookup(display->drm,
+ filp, handle);
+}
+
+void __iomem *intel_parent_overlay_setup(struct intel_display *display,
+ bool needs_physical)
+{
+ if (drm_WARN_ON_ONCE(display->drm, !display->parent->overlay))
+ return ERR_PTR(-ENODEV);
+
+ return display->parent->overlay->setup(display->drm, needs_physical);
+}
+
+void intel_parent_overlay_cleanup(struct intel_display *display)
+{
+ display->parent->overlay->cleanup(display->drm);
+}
+
/* panic */
struct intel_panic *intel_parent_panic_alloc(struct intel_display *display)
{
@@ -92,6 +217,28 @@ void intel_parent_pc8_unblock(struct intel_display *display)
display->parent->pc8->unblock(display->drm);
}
+/* pcode */
+int intel_parent_pcode_read(struct intel_display *display, u32 mbox, u32 *val, u32 *val1)
+{
+ return display->parent->pcode->read(display->drm, mbox, val, val1);
+}
+
+int intel_parent_pcode_write_timeout(struct intel_display *display, u32 mbox, u32 val, int timeout_ms)
+{
+ return display->parent->pcode->write(display->drm, mbox, val, timeout_ms);
+}
+
+int intel_parent_pcode_write(struct intel_display *display, u32 mbox, u32 val)
+{
+ return intel_parent_pcode_write_timeout(display, mbox, val, 1);
+}
+
+int intel_parent_pcode_request(struct intel_display *display, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ return display->parent->pcode->request(display->drm, mbox, request, reply_mask, reply, timeout_base_ms);
+}
+
/* rps */
bool intel_parent_rps_available(struct intel_display *display)
{
@@ -191,6 +338,15 @@ void intel_parent_stolen_node_free(struct intel_display *display, const struct i
display->parent->stolen->node_free(node);
}
+/* vma */
+int intel_parent_vma_fence_id(struct intel_display *display, const struct i915_vma *vma)
+{
+ if (!display->parent->vma)
+ return -1;
+
+ return display->parent->vma->fence_id(vma);
+}
+
/* generic */
void intel_parent_fence_priority_display(struct intel_display *display, struct dma_fence *fence)
{
diff --git a/drivers/gpu/drm/i915/display/intel_parent.h b/drivers/gpu/drm/i915/display/intel_parent.h
index 47cdc14f9aa2..2013e5ed5aa9 100644
--- a/drivers/gpu/drm/i915/display/intel_parent.h
+++ b/drivers/gpu/drm/i915/display/intel_parent.h
@@ -7,12 +7,30 @@
#include <linux/types.h>
struct dma_fence;
+struct drm_file;
+struct drm_gem_object;
struct drm_scanout_buffer;
+struct i915_vma;
struct intel_display;
+struct intel_dpt;
+struct intel_frontbuffer;
struct intel_hdcp_gsc_context;
struct intel_panic;
struct intel_stolen_node;
+/* dpt */
+struct intel_dpt *intel_parent_dpt_create(struct intel_display *display,
+ struct drm_gem_object *obj, size_t size);
+void intel_parent_dpt_destroy(struct intel_display *display, struct intel_dpt *dpt);
+void intel_parent_dpt_suspend(struct intel_display *display, struct intel_dpt *dpt);
+void intel_parent_dpt_resume(struct intel_display *display, struct intel_dpt *dpt);
+
+/* frontbuffer */
+struct intel_frontbuffer *intel_parent_frontbuffer_get(struct intel_display *display, struct drm_gem_object *obj);
+void intel_parent_frontbuffer_ref(struct intel_display *display, struct intel_frontbuffer *front);
+void intel_parent_frontbuffer_put(struct intel_display *display, struct intel_frontbuffer *front);
+void intel_parent_frontbuffer_flush_for_display(struct intel_display *display, struct intel_frontbuffer *front);
+
/* hdcp */
ssize_t intel_parent_hdcp_gsc_msg_send(struct intel_display *display,
struct intel_hdcp_gsc_context *gsc_context,
@@ -27,6 +45,29 @@ void intel_parent_hdcp_gsc_context_free(struct intel_display *display,
bool intel_parent_irq_enabled(struct intel_display *display);
void intel_parent_irq_synchronize(struct intel_display *display);
+/* overlay */
+bool intel_parent_overlay_is_active(struct intel_display *display);
+int intel_parent_overlay_on(struct intel_display *display,
+ u32 frontbuffer_bits);
+int intel_parent_overlay_continue(struct intel_display *display,
+ struct i915_vma *vma,
+ bool load_polyphase_filter);
+int intel_parent_overlay_off(struct intel_display *display);
+int intel_parent_overlay_recover_from_interrupt(struct intel_display *display);
+int intel_parent_overlay_release_old_vid(struct intel_display *display);
+void intel_parent_overlay_reset(struct intel_display *display);
+struct i915_vma *intel_parent_overlay_pin_fb(struct intel_display *display,
+ struct drm_gem_object *obj,
+ u32 *offset);
+void intel_parent_overlay_unpin_fb(struct intel_display *display,
+ struct i915_vma *vma);
+struct drm_gem_object *intel_parent_overlay_obj_lookup(struct intel_display *display,
+ struct drm_file *filp,
+ u32 handle);
+void __iomem *intel_parent_overlay_setup(struct intel_display *display,
+ bool needs_physical);
+void intel_parent_overlay_cleanup(struct intel_display *display);
+
/* panic */
struct intel_panic *intel_parent_panic_alloc(struct intel_display *display);
int intel_parent_panic_setup(struct intel_display *display, struct intel_panic *panic, struct drm_scanout_buffer *sb);
@@ -36,6 +77,13 @@ void intel_parent_panic_finish(struct intel_display *display, struct intel_panic
void intel_parent_pc8_block(struct intel_display *display);
void intel_parent_pc8_unblock(struct intel_display *display);
+/* pcode */
+int intel_parent_pcode_read(struct intel_display *display, u32 mbox, u32 *val, u32 *val1);
+int intel_parent_pcode_write_timeout(struct intel_display *display, u32 mbox, u32 val, int timeout_ms);
+int intel_parent_pcode_write(struct intel_display *display, u32 mbox, u32 val);
+int intel_parent_pcode_request(struct intel_display *display, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
/* rps */
bool intel_parent_rps_available(struct intel_display *display);
void intel_parent_rps_boost_if_not_started(struct intel_display *display, struct dma_fence *fence);
@@ -61,6 +109,9 @@ u64 intel_parent_stolen_node_size(struct intel_display *display, const struct in
struct intel_stolen_node *intel_parent_stolen_node_alloc(struct intel_display *display);
void intel_parent_stolen_node_free(struct intel_display *display, const struct intel_stolen_node *node);
+/* vma */
+int intel_parent_vma_fence_id(struct intel_display *display, const struct i915_vma *vma);
+
/* generic */
bool intel_parent_has_auxccs(struct intel_display *display);
bool intel_parent_has_fenced_regions(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 16619f7be5f8..69c7952a1413 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -6,7 +6,6 @@
#include <drm/drm_print.h>
#include "g4x_dp.h"
-#include "i915_reg.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 9a89bb6dcf65..5f88663ef5e8 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -5,7 +5,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index f3db55710010..244806a26da3 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -15,6 +15,7 @@
#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_pmdemand.h"
#include "intel_step.h"
#include "skl_watermark.h"
@@ -129,9 +130,10 @@ int intel_pmdemand_init(struct intel_display *display)
&pmdemand_state->base,
&intel_pmdemand_funcs);
- if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
- /* Wa_14016740474 */
- intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
+ /* Wa_14016740474 */
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14016740474))
+ intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0,
+ DMD_RSP_TIMEOUT_DISABLE);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index b217ec7aa758..2d799af73bb7 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -9,7 +9,6 @@
#include <drm/drm_print.h>
#include "g4x_dp.h"
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_jiffies.h"
#include "intel_display_power_well.h"
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4ce1173a2e91..5041a5a138d1 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -29,7 +29,6 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
@@ -41,6 +40,7 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -1083,7 +1083,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22012278275)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1264,7 +1264,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16011303918))
return;
/*
@@ -1546,8 +1546,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
/* Wa_16011181250 */
- if (display->platform.rocketlake || display->platform.alderlake_s ||
- display->platform.dg2) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16011181250)) {
drm_dbg_kms(display->drm,
"PSR2 is defeatured for this platform\n");
return false;
@@ -1829,8 +1828,7 @@ void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
u8 active_pipes = 0;
/* Wa_16025596647 */
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
return;
/* Not needed by Panel Replay */
@@ -2130,6 +2128,9 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
intel_alpm_configure(intel_dp, crtc_state);
+
+ if (HAS_PSR_TRANS_PUSH_FRAME_CHANGE(display))
+ intel_vrr_psr_frame_change_enable(crtc_state);
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -2521,9 +2522,11 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
intel_pre_commit_crtc_state(state, crtc);
struct intel_display *display = to_intel_display(crtc);
- if (crtc_state->has_psr)
- intel_de_write_dsb(display, dsb,
- CURSURFLIVE(display, crtc->pipe), 0);
+ if (!crtc_state->has_psr || intel_psr_use_trans_push(crtc_state))
+ return;
+
+ intel_de_write_dsb(display, dsb,
+ CURSURFLIVE(display, crtc->pipe), 0);
}
/**
@@ -2619,6 +2622,12 @@ void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
crtc_state->pipe_srcsz_early_tpt);
+
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state,
+ drm_rect_height(&crtc_state->psr2_su_area));
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2689,11 +2698,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
overlap_damage_area->y2 = damage_area->y2;
}
-static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
+static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
+ bool su_area_changed = false;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
@@ -2702,10 +2712,18 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
else
y_alignment = crtc_state->su_y_granularity;
- crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
- if (crtc_state->psr2_su_area.y2 % y_alignment)
+ if (crtc_state->psr2_su_area.y1 % y_alignment) {
+ crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
+ su_area_changed = true;
+ }
+
+ if (crtc_state->psr2_su_area.y2 % y_alignment) {
crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
y_alignment) + 1) * y_alignment;
+ su_area_changed = true;
+ }
+
+ return su_area_changed;
}
/*
@@ -2839,7 +2857,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
- bool full_update = false, cursor_in_su_area = false;
+ bool full_update = false, su_area_changed;
int i, ret;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -2946,15 +2964,32 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
- /*
- * Adjust su area to cover cursor fully as necessary (early
- * transport). This needs to be done after
- * drm_atomic_add_affected_planes to ensure visible cursor is added into
- * affected planes even when cursor is not updated by itself.
- */
- intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
+ do {
+ bool cursor_in_su_area;
- intel_psr2_sel_fetch_pipe_alignment(crtc_state);
+ /*
+ * Adjust su area to cover cursor fully as necessary
+ * (early transport). This needs to be done after
+ * drm_atomic_add_affected_planes to ensure visible
+ * cursor is added into affected planes even when
+ * cursor is not updated by itself.
+ */
+ intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
+
+ su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state);
+
+ /*
+ * If the cursor was outside the SU area before
+ * alignment, the alignment step (which only expands
+ * SU) may pull the cursor partially inside, so we
+ * must run ET alignment again to fully cover it. But
+ * if the cursor was already fully inside before
+ * alignment, expanding the SU area won't change that,
+ * so no further work is needed.
+ */
+ if (cursor_in_su_area)
+ break;
+ } while (su_area_changed);
/*
* Now that we have the pipe damaged area check if it intersect with
@@ -3014,6 +3049,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
skip_sel_fetch_set_loop:
+ if (full_update)
+ clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src,
+ &crtc_state->pipe_src);
+
psr2_man_trk_ctl_calc(crtc_state, full_update);
crtc_state->pipe_srcsz_early_tpt =
psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
@@ -3562,7 +3601,14 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (DISPLAY_VER(display) >= 20) {
+ /*
+ * We can use PSR exit on LunarLake onwards. Also
+ * using trans push mechanism to trigger Frame Change
+ * event requires using PSR exit.
+ */
+ intel_psr_exit(intel_dp);
+ } else if (intel_dp->psr.psr2_sel_fetch_enabled) {
/* Selective fetch prior LNL */
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
@@ -3582,16 +3628,11 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
intel_psr_configure_full_frame_update(intel_dp);
intel_psr_force_update(intel_dp);
- } else if (!intel_dp->psr.psr2_sel_fetch_enabled) {
+ } else {
/*
- * PSR1 on all platforms
- * PSR2 HW tracking
- * Panel Replay Full frame update
+ * On older platforms using PSR exit was seen causing problems
*/
intel_psr_force_update(intel_dp);
- } else {
- /* Selective update LNL onwards */
- intel_psr_exit(intel_dp);
}
if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
@@ -3972,8 +4013,7 @@ static void psr_dc5_dc6_wa_work(struct work_struct *work)
*/
void intel_psr_notify_dc5_dc6(struct intel_display *display)
{
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
return;
schedule_work(&display->psr_dc5_dc6_wa_work);
@@ -3988,8 +4028,7 @@ void intel_psr_notify_dc5_dc6(struct intel_display *display)
*/
void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
{
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
return;
INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
@@ -4010,8 +4049,7 @@ void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder;
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
return;
for_each_intel_encoder_with_psr(display->drm, encoder) {
@@ -4567,3 +4605,10 @@ int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
return psr_min_guardband;
}
+
+bool intel_psr_use_trans_push(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return HAS_PSR_TRANS_PUSH_FRAME_CHANGE(display) && crtc_state->has_psr;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index b41dc4d44ff2..394b641840b3 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -85,5 +85,6 @@ bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
void intel_psr_compute_config_late(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
int intel_psr_min_guardband(struct intel_crtc_state *crtc_state);
+bool intel_psr_use_trans_push(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_rom.c b/drivers/gpu/drm/i915/display/intel_rom.c
index 05b6ea764ebb..d573059fb0d9 100644
--- a/drivers/gpu/drm/i915/display/intel_rom.c
+++ b/drivers/gpu/drm/i915/display/intel_rom.c
@@ -7,10 +7,9 @@
#include <drm/drm_device.h>
-#include "i915_reg.h"
-
#include "intel_rom.h"
#include "intel_uncore.h"
+#include "intel_oprom_regs.h"
struct intel_rom {
/* for PCI ROM */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index a201edceee10..7fe6b4a18213 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -332,8 +332,6 @@ void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u6
c10_curve_1, c10_curve_2, prescaler_divider,
&pll_params);
- pll_state->clock = pixel_clock;
-
pll_state->tx = 0x10;
pll_state->cmn = 0x1;
pll_state->pll[0] = REG_FIELD_PREP(C10_PLL0_DIV5CLK_EN, pll_params.mpll_div5_en) |
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index bd12148e42f7..a21dd4e3fe4c 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -7,7 +7,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -1845,7 +1844,7 @@ bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
if (!intel_tc_port_link_needs_reset(dig_port))
return false;
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&to_tc_port(dig_port)->link_reset_work,
msecs_to_jiffies(2000));
@@ -1926,7 +1925,7 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port)
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
- queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
+ queue_delayed_work(system_dfl_wq, &tc->disconnect_phy_work,
msecs_to_jiffies(1000));
mutex_unlock(&tc->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 57fda5824c9c..0dc13d080e8a 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -1109,6 +1109,7 @@ struct bdb_edp {
u16 edp_dsc_disable; /* 251+ */
u16 t6_delay_support; /* 260+ */
u16 link_idle_time[16]; /* 260+ */
+ u16 pipe_joiner_enable; /* 261+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 5493082f30a7..6c09c6d99ffe 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -35,6 +35,58 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
return true;
}
+int intel_dsc_line_slice_count(const struct intel_dsc_slice_config *config)
+{
+ return config->pipes_per_line * config->streams_per_pipe * config->slices_per_stream;
+}
+
+bool intel_dsc_get_slice_config(struct intel_display *display,
+ int pipes_per_line, int slices_per_pipe,
+ struct intel_dsc_slice_config *config)
+{
+ int streams_per_pipe;
+
+ /* TODO: Add support for 8 slices per pipe on TGL+. */
+ switch (slices_per_pipe) {
+ case 3:
+ /*
+ * 3 DSC Slices per pipe need 3 DSC engines, which is supported only
+ * with Ultrajoiner only for some platforms.
+ */
+ if (!HAS_DSC_3ENGINES(display) || pipes_per_line != 4)
+ return false;
+
+ streams_per_pipe = 3;
+ break;
+ case 4:
+ /* TODO: Consider using 1 DSC engine stream x 4 slices instead. */
+ case 2:
+ /* TODO: Consider using 1 DSC engine stream x 2 slices instead. */
+ streams_per_pipe = 2;
+ break;
+ case 1:
+ /*
+ * Bigjoiner needs small joiner to be enabled.
+ * So there should be at least 2 dsc slices per pipe,
+ * whenever bigjoiner is enabled.
+ */
+ if (pipes_per_line > 1)
+ return false;
+
+ streams_per_pipe = 1;
+ break;
+ default:
+ MISSING_CASE(slices_per_pipe);
+ return false;
+ }
+
+ config->pipes_per_line = pipes_per_line;
+ config->streams_per_pipe = streams_per_pipe;
+ config->slices_per_stream = slices_per_pipe / streams_per_pipe;
+
+ return true;
+}
+
static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
struct intel_display *display = to_intel_display(crtc);
@@ -278,8 +330,9 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
int ret;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
- pipe_config->dsc.slice_count);
+ vdsc_cfg->slice_width =
+ DIV_ROUND_UP(vdsc_cfg->pic_width,
+ intel_dsc_line_slice_count(&pipe_config->dsc.slice_config));
err = intel_dsc_slice_dimensions_valid(pipe_config, vdsc_cfg);
@@ -416,7 +469,7 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
static int intel_dsc_get_vdsc_per_pipe(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->dsc.num_streams;
+ return crtc_state->dsc.slice_config.streams_per_pipe;
}
int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state)
@@ -767,6 +820,29 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
sizeof(dp_dsc_pps_sdp));
}
+void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state, int su_lines)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum pipe pipe = crtc->pipe;
+ int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
+ int slice_row_per_frame = su_lines / vdsc_cfg->slice_height;
+ u32 val;
+
+ drm_WARN_ON_ONCE(display->drm, su_lines % vdsc_cfg->slice_height);
+ drm_WARN_ON_ONCE(display->drm, vdsc_instances_per_pipe > 2);
+
+ val = DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(slice_row_per_frame);
+ val |= DSC_SUPS0_SU_PIC_HEIGHT(su_lines);
+
+ intel_de_write_dsb(display, dsb, LNL_DSC0_SU_PARAMETER_SET_0(pipe), val);
+
+ if (vdsc_instances_per_pipe == 2)
+ intel_de_write_dsb(display, dsb, LNL_DSC1_SU_PARAMETER_SET_0(pipe), val);
+}
+
static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
return is_pipe_dsc(crtc, cpu_transcoder) ?
@@ -1018,12 +1094,13 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
if (!crtc_state->dsc.compression_enable)
goto out;
+ /* TODO: Read out slice_config.pipes_per_line/slices_per_stream as well */
if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & (VDSC2_ENABLE | SMALL_JOINER_CONFIG_3_ENGINES))
- crtc_state->dsc.num_streams = 3;
+ crtc_state->dsc.slice_config.streams_per_pipe = 3;
else if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & VDSC1_ENABLE)
- crtc_state->dsc.num_streams = 2;
+ crtc_state->dsc.slice_config.streams_per_pipe = 2;
else
- crtc_state->dsc.num_streams = 1;
+ crtc_state->dsc.slice_config.streams_per_pipe = 1;
intel_dsc_get_pps_config(crtc_state);
out:
@@ -1036,8 +1113,8 @@ static void intel_vdsc_dump_state(struct drm_printer *p, int indent,
drm_printf_indent(p, indent,
"dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, num_streams: %d\n",
FXP_Q4_ARGS(crtc_state->dsc.compressed_bpp_x16),
- crtc_state->dsc.slice_count,
- crtc_state->dsc.num_streams);
+ intel_dsc_line_slice_count(&crtc_state->dsc.slice_config),
+ crtc_state->dsc.slice_config.streams_per_pipe);
}
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
@@ -1050,7 +1127,6 @@ void intel_vdsc_state_dump(struct drm_printer *p, int indent,
drm_dsc_dump_config(p, indent, &crtc_state->dsc.config);
}
-static
int intel_dsc_get_pixel_rate_with_dsc_bubbles(struct intel_display *display,
int pixel_rate, int htotal,
int dsc_horizontal_slices)
@@ -1072,7 +1148,7 @@ int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
int htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
- int dsc_slices = crtc_state->dsc.slice_count;
+ int dsc_slices = intel_dsc_line_slice_count(&crtc_state->dsc.slice_config);
int pixel_rate;
int min_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 99f64ac54b27..3372f8694054 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -13,9 +13,16 @@ struct drm_printer;
enum transcoder;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
+struct intel_dsb;
+struct intel_dsc_slice_config;
struct intel_encoder;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
+int intel_dsc_line_slice_count(const struct intel_dsc_slice_config *config);
+bool intel_dsc_get_slice_config(struct intel_display *display,
+ int num_joined_pipes, int slice_per_pipe,
+ struct intel_dsc_slice_config *config);
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
@@ -31,9 +38,14 @@ void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state, int su_lines);
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state);
+int intel_dsc_get_pixel_rate_with_dsc_bubbles(struct intel_display *display,
+ int pixel_rate, int htotal,
+ int dsc_horizontal_slices);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index 2d478a84b07c..2b2e3c1b8138 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -196,6 +196,18 @@
#define DSC_PPS18_NSL_BPG_OFFSET(offset) REG_FIELD_PREP(DSC_PPS18_NSL_BPG_OFFSET_MASK, offset)
#define DSC_PPS18_SL_OFFSET_ADJ(offset) REG_FIELD_PREP(DSC_PPS18_SL_OFFSET_ADJ_MASK, offset)
+#define _LNL_DSC0_SU_PARAMETER_SET_0_PA 0x78064
+#define _LNL_DSC1_SU_PARAMETER_SET_0_PA 0x78164
+#define _LNL_DSC0_SU_PARAMETER_SET_0_PB 0x78264
+#define _LNL_DSC1_SU_PARAMETER_SET_0_PB 0x78364
+#define LNL_DSC0_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC0_SU_PARAMETER_SET_0_PA, _LNL_DSC0_SU_PARAMETER_SET_0_PB)
+#define LNL_DSC1_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC1_SU_PARAMETER_SET_0_PA, _LNL_DSC1_SU_PARAMETER_SET_0_PB)
+
+#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK REG_GENMASK(31, 20)
+#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(rows) REG_FIELD_PREP(DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK, (rows))
+#define DSC_SUPS0_SU_PIC_HEIGHT_MASK REG_GENMASK(15, 0)
+#define DSC_SUPS0_SU_PIC_HEIGHT(h) REG_FIELD_PREP(DSC_SUPS0_SU_PIC_HEIGHT_MASK, (h))
+
/* Icelake Rate Control Buffer Threshold Registers */
#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index c45c4bbc3f95..9832a4ade318 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -18,6 +18,23 @@
#include "intel_vga.h"
#include "intel_vga_regs.h"
+static unsigned int intel_gmch_ctrl_reg(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : I830_GMCH_CTRL;
+}
+
+static bool intel_vga_decode_is_enabled(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ u16 gmch_ctrl = 0;
+
+ if (pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
+ intel_gmch_ctrl_reg(display), &gmch_ctrl))
+ return false;
+
+ return !(gmch_ctrl & INTEL_GMCH_VGA_DISABLE);
+}
+
static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
{
if (display->platform.valleyview || display->platform.cherryview)
@@ -41,101 +58,266 @@ static bool has_vga_pipe_sel(struct intel_display *display)
return DISPLAY_VER(display) < 7;
}
-/* Disable the VGA plane that we never use */
-void intel_vga_disable(struct intel_display *display)
+static bool has_vga_mmio_access(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(display->drm->dev);
- i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
- enum pipe pipe;
- u32 tmp;
- u8 sr1;
+ /* WaEnableVGAAccessThroughIOPort:ctg+ */
+ return DISPLAY_VER(display) < 5 && !display->platform.g4x;
+}
- tmp = intel_de_read(display, vga_reg);
- if (tmp & VGA_DISP_DISABLE)
- return;
+static bool intel_pci_has_vga_io_decode(struct pci_dev *pdev)
+{
+ u16 cmd = 0;
- if (display->platform.cherryview)
- pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK_CHV, tmp);
- else if (has_vga_pipe_sel(display))
- pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK, tmp);
- else
- pipe = PIPE_A;
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if ((cmd & PCI_COMMAND_IO) == 0)
+ return false;
- drm_dbg_kms(display->drm, "Disabling VGA plane on pipe %c\n",
- pipe_name(pipe));
+ pdev = pdev->bus->self;
+ while (pdev) {
+ u16 ctl = 0;
- /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(0x01, VGA_SEQ_I);
- sr1 = inb(VGA_SEQ_D);
- outb(sr1 | VGA_SR01_SCREEN_OFF, VGA_SEQ_D);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- udelay(300);
+ pci_read_config_word(pdev, PCI_BRIDGE_CONTROL, &ctl);
+ if ((ctl & PCI_BRIDGE_CTL_VGA) == 0)
+ return false;
- intel_de_write(display, vga_reg, VGA_DISP_DISABLE);
- intel_de_posting_read(display, vga_reg);
+ pdev = pdev->bus->self;
+ }
+
+ return true;
+}
+
+static bool intel_pci_set_io_decode(struct pci_dev *pdev, bool enable)
+{
+ u16 old = 0, cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &old);
+ cmd = old & ~PCI_COMMAND_IO;
+ if (enable)
+ cmd |= PCI_COMMAND_IO;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+ return old & PCI_COMMAND_IO;
}
-void intel_vga_reset_io_mem(struct intel_display *display)
+static bool intel_pci_bridge_set_vga(struct pci_dev *pdev, bool enable)
+{
+ u16 old = 0, ctl;
+
+ pci_read_config_word(pdev->bus->self, PCI_BRIDGE_CONTROL, &old);
+ ctl = old & ~PCI_BRIDGE_CTL_VGA;
+ if (enable)
+ ctl |= PCI_BRIDGE_CTL_VGA;
+ pci_write_config_word(pdev->bus->self, PCI_BRIDGE_CONTROL, ctl);
+
+ return old & PCI_BRIDGE_CTL_VGA;
+}
+
+static int intel_vga_get(struct intel_display *display, bool mmio,
+ bool *old_io_decode)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ int err;
+
+ if (mmio) {
+ *old_io_decode = false;
+ return 0;
+ }
/*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
+ * Bypass the VGA arbiter on the iGPU and just enable
+ * IO decode by hand. This avoids clobbering the VGA
+ * routing for an external GPU when it's the current
+ * VGA device, and thus prevents the all 0xff/white
+ * readout from VGA memory when taking over from vgacon.
+ *
+ * The iGPU has the highest VGA decode priority so it will
+ * grab any VGA IO access when IO decode is enabled, regardless
+ * of how any other VGA routing bits are configured.
*/
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MIS_R), VGA_MIS_W);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ if (display->platform.dgfx) {
+ err = vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ if (err)
+ return err;
+ }
+
+ *old_io_decode = intel_pci_set_io_decode(pdev, true);
+
+ return 0;
}
-static int intel_gmch_vga_set_state(struct intel_display *display, bool enable_decode)
+static void intel_vga_put(struct intel_display *display, bool io_decode, bool mmio)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
- unsigned int reg = DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
- if (pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0), reg, &gmch_ctrl)) {
- drm_err(display->drm, "failed to read control word\n");
- return -EIO;
- }
+ if (mmio)
+ return;
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
- return 0;
+ /* see intel_vga_get() */
+ intel_pci_set_io_decode(pdev, io_decode);
+
+ if (display->platform.dgfx)
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+}
- if (enable_decode)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+u8 intel_vga_read(struct intel_display *display, u16 reg, bool mmio)
+{
+ if (mmio)
+ return intel_de_read8(display, _MMIO(reg));
else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+ return inb(reg);
+}
+
+static void intel_vga_write(struct intel_display *display, u16 reg, u8 val, bool mmio)
+{
+ if (mmio)
+ intel_de_write8(display, _MMIO(reg), val);
+ else
+ outb(val, reg);
+}
+
+/* Disable the VGA plane that we never use */
+void intel_vga_disable(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
+ bool mmio = has_vga_mmio_access(display);
+ bool io_decode;
+ u8 msr, sr1;
+ u32 tmp;
+ int err;
- if (pci_bus_write_config_word(pdev->bus, PCI_DEVFN(0, 0), reg, gmch_ctrl)) {
- drm_err(display->drm, "failed to write control word\n");
- return -EIO;
+ if (!intel_vga_decode_is_enabled(display)) {
+ drm_dbg_kms(display->drm, "VGA decode is disabled\n");
+
+ /*
+ * On older hardware VGA_DISP_DISABLE defaults to 0, but
+ * it *must* be set or else the pipe will be completely
+ * stuck (at least on g4x).
+ */
+ goto reset_vgacntr;
}
- return 0;
+ tmp = intel_de_read(display, vga_reg);
+
+ if ((tmp & VGA_DISP_DISABLE) == 0) {
+ enum pipe pipe;
+
+ if (display->platform.cherryview)
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK_CHV, tmp);
+ else if (has_vga_pipe_sel(display))
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK, tmp);
+ else
+ pipe = PIPE_A;
+
+ drm_dbg_kms(display->drm, "Disabling VGA plane on pipe %c\n",
+ pipe_name(pipe));
+ } else {
+ drm_dbg_kms(display->drm, "VGA plane is disabled\n");
+
+ /*
+ * Unfortunately at least some BIOSes (eg. HSW Lenovo
+ * ThinkCentre E73) set up the VGA registers even when
+ * in UEFI mode with the VGA plane disabled. So we need to
+ * always clean up the mess for iGPUs. For discrete GPUs we
+ * don't really care about the state of the VGA registers
+ * since all VGA accesses can be blocked via the bridge.
+ */
+ if (display->platform.dgfx)
+ goto reset_vgacntr;
+ }
+
+ /*
+ * This should not fail, because the vga_get() family of functions
+ * will only report errors for dGPUs that are unreachable via the
+ * bridge, and cannot be made reachable either. We shouldn't even
+ * get here for this case, but if we do, we assume that the bridge
+ * will also refuse future requests to forward VGA accesses.
+ */
+ err = intel_vga_get(display, mmio, &io_decode);
+ if (err)
+ goto reset_vgacntr;
+
+ drm_WARN_ON(display->drm, !mmio && !intel_pci_has_vga_io_decode(pdev));
+
+ intel_vga_write(display, VGA_SEQ_I, 0x01, mmio);
+ sr1 = intel_vga_read(display, VGA_SEQ_D, mmio);
+ sr1 |= VGA_SR01_SCREEN_OFF;
+ intel_vga_write(display, VGA_SEQ_D, sr1, mmio);
+
+ msr = intel_vga_read(display, VGA_MIS_R, mmio);
+ /*
+ * Always disable VGA memory decode for iGPU so that
+ * intel_vga_set_decode() doesn't need to access VGA registers.
+ * VGA_MIS_ENB_MEM_ACCESS=0 is also the reset value.
+ */
+ msr &= ~VGA_MIS_ENB_MEM_ACCESS;
+ /*
+ * VGA_MIS_COLOR controls both GPU level and display engine level
+ * MDA vs. CGA decode logic. But when the register gets reset
+ * (reset value has VGA_MIS_COLOR=0) by the power well, only the
+ * display engine level decode logic gets notified.
+ *
+ * Switch to MDA mode to make sure the GPU level decode logic will
+ * be in sync with the display engine level decode logic after the
+ * power well has been reset. Otherwise the GPU will claim CGA
+ * register accesses but the display engine will not, causing
+ * RMbus NoClaim errors.
+ */
+ msr &= ~VGA_MIS_COLOR;
+ intel_vga_write(display, VGA_MIS_W, msr, mmio);
+
+ intel_vga_put(display, io_decode, mmio);
+
+ /*
+ * Inform the arbiter about VGA memory decode being disabled so
+ * that it doesn't disable all memory decode for the iGPU when
+ * targeting another GPU.
+ */
+ if (!display->platform.dgfx)
+ vga_set_legacy_decoding(pdev, VGA_RSRC_LEGACY_IO);
+
+ udelay(300);
+
+reset_vgacntr:
+ intel_de_write(display, vga_reg, VGA_DISP_DISABLE);
+ intel_de_posting_read(display, vga_reg);
}
-static unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
+static unsigned int intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
{
struct intel_display *display = to_intel_display(pdev);
+ unsigned int decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- intel_gmch_vga_set_state(display, enable_decode);
+ drm_dbg_kms(display->drm, "%s VGA decode due to VGA arbitration\n",
+ str_enable_disable(enable_decode));
- if (enable_decode)
- return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- else
- return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ /*
+ * Can't use GMCH_CTRL INTEL_GMCH_VGA_DISABLE to disable VGA
+ * decode on ILK+ since the register is locked. Instead
+ * intel_disable_vga() will disable VGA memory decode for the
+ * iGPU, and here we just need to take care of the IO decode.
+ * For discrete GPUs we rely on the bridge VGA control.
+ *
+ * We can't disable IO decode already in intel_vga_disable()
+ * because at least some laptops (eg. CTG Dell Latitude E5400)
+ * will hang during reboot/shutfown with IO decode disabled.
+ */
+ if (display->platform.dgfx) {
+ if (!enable_decode)
+ intel_pci_bridge_set_vga(pdev, false);
+ else
+ decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ } else {
+ if (!enable_decode)
+ intel_pci_set_io_decode(pdev, false);
+ else
+ decodes |= VGA_RSRC_LEGACY_IO;
+ }
+
+ return decodes;
}
-int intel_vga_register(struct intel_display *display)
+void intel_vga_register(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
@@ -149,11 +331,8 @@ int intel_vga_register(struct intel_display *display)
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
- ret = vga_client_register(pdev, intel_gmch_vga_set_decode);
- if (ret && ret != -ENODEV)
- return ret;
-
- return 0;
+ ret = vga_client_register(pdev, intel_vga_set_decode);
+ drm_WARN_ON(display->drm, ret && ret != -ENODEV);
}
void intel_vga_unregister(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
index 16d699f3b641..72131cb536cd 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.h
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -6,11 +6,14 @@
#ifndef __INTEL_VGA_H__
#define __INTEL_VGA_H__
+#include <linux/types.h>
+
struct intel_display;
+u8 intel_vga_read(struct intel_display *display, u16 reg, bool mmio);
void intel_vga_reset_io_mem(struct intel_display *display);
void intel_vga_disable(struct intel_display *display);
-int intel_vga_register(struct intel_display *display);
+void intel_vga_register(struct intel_display *display);
void intel_vga_unregister(struct intel_display *display);
#endif /* __INTEL_VGA_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index db74744ddb31..8a957804cb97 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -6,6 +6,7 @@
#include <drm/drm_print.h>
+#include "intel_alpm.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_regs.h"
@@ -520,6 +521,7 @@ int intel_vrr_compute_optimized_guardband(struct intel_crtc_state *crtc_state)
if (intel_crtc_has_dp_encoder(crtc_state)) {
guardband = max(guardband, intel_psr_min_guardband(crtc_state));
guardband = max(guardband, intel_dp_sdp_min_guardband(crtc_state, true));
+ guardband = max(guardband, intel_alpm_lobf_min_guardband(crtc_state));
}
return guardband;
@@ -598,6 +600,18 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
return;
/*
+ * Bspec says:
+ * "(note: VRR needs to be programmed after
+ * TRANS_DDI_FUNC_CTL and before TRANS_CONF)."
+ *
+ * In practice it turns out that ICL can hang if
+ * TRANS_VRR_VMAX/FLIPLINE are written before
+ * enabling TRANS_DDI_FUNC_CTL.
+ */
+ drm_WARN_ON(display->drm,
+ !(intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE));
+
+ /*
* This bit seems to have two meanings depending on the platform:
* TGL: generate VRR "safe window" for DSB vblank waits
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
@@ -676,13 +690,32 @@ intel_vrr_dcb_reset(const struct intel_crtc_state *old_crtc_state,
intel_de_write(display, PIPEDMC_DCB_BALANCE_RESET(pipe), 0);
}
+static u32 trans_vrr_push(const struct intel_crtc_state *crtc_state,
+ bool send_push)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ u32 trans_vrr_push = 0;
+
+ if (intel_vrr_always_use_vrr_tg(display) ||
+ crtc_state->vrr.enable)
+ trans_vrr_push |= TRANS_PUSH_EN;
+
+ if (send_push)
+ trans_vrr_push |= TRANS_PUSH_SEND;
+
+ if (HAS_PSR_TRANS_PUSH_FRAME_CHANGE(display))
+ trans_vrr_push |= LNL_TRANS_PUSH_PSR_PR_EN;
+
+ return trans_vrr_push;
+}
+
void intel_vrr_send_push(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->vrr.enable)
+ if (!crtc_state->vrr.enable && !intel_psr_use_trans_push(crtc_state))
return;
if (dsb)
@@ -690,8 +723,7 @@ void intel_vrr_send_push(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb,
TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN | TRANS_PUSH_SEND);
-
+ trans_vrr_push(crtc_state, true));
if (dsb)
intel_dsb_nonpost_end(dsb);
}
@@ -876,7 +908,8 @@ static void intel_vrr_tg_enable(const struct intel_crtc_state *crtc_state,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 vrr_ctl;
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ trans_vrr_push(crtc_state, false));
vrr_ctl = VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state);
@@ -904,7 +937,8 @@ static void intel_vrr_tg_disable(const struct intel_crtc_state *old_crtc_state)
VRR_STATUS_VRR_EN_LIVE, 1000))
drm_err(display->drm, "Timed out waiting for VRR live status to clear\n");
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+ intel_de_rmw(display, TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN, 0);
}
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
@@ -939,6 +973,8 @@ void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ intel_vrr_set_transcoder_timings(crtc_state);
+
if (!intel_vrr_possible(crtc_state))
return;
@@ -957,6 +993,15 @@ void intel_vrr_transcoder_disable(const struct intel_crtc_state *old_crtc_state)
intel_vrr_tg_disable(old_crtc_state);
}
+void intel_vrr_psr_frame_change_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ trans_vrr_push(crtc_state, false));
+}
+
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
{
return crtc_state->vrr.flipline &&
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index bedcc8c4bff2..4f16ca4af91f 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -33,6 +33,7 @@ void intel_vrr_dcb_increment_flip_count(struct intel_crtc_state *crtc_state,
struct intel_crtc *crtc);
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
+void intel_vrr_psr_frame_change_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_get_config(struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_vrr_regs.h b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
index 427ada0d3973..9d4d6573a149 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
@@ -165,6 +165,7 @@
#define TRANS_PUSH(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_PUSH_A)
#define TRANS_PUSH_EN REG_BIT(31)
#define TRANS_PUSH_SEND REG_BIT(30)
+#define LNL_TRANS_PUSH_PSR_PR_EN REG_BIT(16)
#define _TRANS_VRR_VSYNC_A 0x60078
#define TRANS_VRR_VSYNC(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VSYNC_A)
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 4c4deac7f9c8..7c5cb188ebf0 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -823,7 +823,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
crtc_state->scaler_state.scaler_id < 0))
return;
- if (intel_display_wa(display, 14011503117))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011503117))
adl_scaler_ecc_mask(crtc_state);
drm_rect_init(&src, 0, 0,
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 746e942cafd2..11ba42c67e3e 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -17,7 +17,7 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
-#include "intel_dpt.h"
+#include "intel_display_wa.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -1217,7 +1217,7 @@ static u32 skl_plane_ctl(const struct intel_plane_state *plane_state)
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(display) == 13)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22012358565))
plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
return plane_ctl;
@@ -2793,8 +2793,7 @@ static bool tgl_plane_has_mc_ccs(struct intel_display *display,
enum plane_id plane_id)
{
/* Wa_14010477008 */
- if (display->platform.dg1 || display->platform.rocketlake ||
- (display->platform.tigerlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010477008))
return false;
return plane_id < PLANE_6;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index f5a6fae815d1..b1f9546b8cda 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -7,8 +7,8 @@
#include <drm/drm_blend.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
@@ -22,11 +22,12 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dram.h"
#include "intel_fb.h"
#include "intel_fixed.h"
#include "intel_flipq.h"
-#include "intel_pcode.h"
+#include "intel_parent.h"
#include "intel_plane.h"
#include "intel_vblank.h"
#include "intel_wm.h"
@@ -115,9 +116,8 @@ intel_sagv_block_time(struct intel_display *display)
u32 val = 0;
int ret;
- ret = intel_pcode_read(display->drm,
- GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
- &val, NULL);
+ ret = intel_parent_pcode_read(display, GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
if (ret) {
drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
@@ -184,8 +184,8 @@ static void skl_sagv_enable(struct intel_display *display)
return;
drm_dbg_kms(display->drm, "Enabling SAGV\n");
- ret = intel_pcode_write(display->drm, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_ENABLE);
+ ret = intel_parent_pcode_write(display, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -217,9 +217,9 @@ static void skl_sagv_disable(struct intel_display *display)
drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
- ret = intel_pcode_request(display->drm, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE,
- GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1);
+ ret = intel_parent_pcode_request(display, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1);
/*
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
@@ -3283,7 +3283,7 @@ static void skl_read_wm_latency(struct intel_display *display)
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_parent_pcode_read(display, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
@@ -3296,7 +3296,7 @@ static void skl_read_wm_latency(struct intel_display *display)
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_parent_pcode_read(display, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
@@ -3413,7 +3413,7 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
if (DISPLAY_VER(display) >= 14)
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
- else if (display->platform.alderlake_p)
+ else if (intel_display_wa(display, INTEL_DISPLAY_WA_22010947358))
/* Wa_22010947358:adl-p */
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 60857d2afdb1..36591d724638 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -33,7 +33,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 30cc08583cbd..7782ba44fabd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -22,7 +22,7 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
}
static void clflush_work(struct dma_fence_work *base)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index ef3b14ae2e0d..df7502391b50 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -68,7 +68,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
i915_vma_flush_writes(vma);
spin_unlock(&obj->vma.lock);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
break;
case I915_GEM_DOMAIN_WC:
@@ -647,7 +647,7 @@ out_unlock:
i915_gem_object_unlock(obj);
if (!err && write_domain)
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_invalidate(obj, ORIGIN_CPU);
out:
i915_gem_object_put(obj);
@@ -759,7 +759,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
}
out:
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_invalidate(obj, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 798c920160cf..5172d3982654 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -474,30 +474,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
queue_work(i915->wq, &i915->mm.free_work);
}
-void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
-{
- struct i915_frontbuffer *front;
-
- front = i915_gem_object_frontbuffer_lookup(obj);
- if (front) {
- intel_frontbuffer_flush(&front->base, origin);
- i915_gem_object_frontbuffer_put(front);
- }
-}
-
-void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
-{
- struct i915_frontbuffer *front;
-
- front = i915_gem_object_frontbuffer_lookup(obj);
- if (front) {
- intel_frontbuffer_invalidate(&front->base, origin);
- i915_gem_object_frontbuffer_put(front);
- }
-}
-
static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
index adba3fa96c05..f885c4fb1326 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
+#include <drm/intel/display_parent_interface.h>
+
#include "i915_drv.h"
#include "i915_gem_object_frontbuffer.h"
@@ -101,3 +103,70 @@ void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front)
kref_put_lock(&front->ref, frontbuffer_release,
&i915->frontbuffer_lock);
}
+
+void __i915_gem_object_frontbuffer_flush(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct i915_frontbuffer *front;
+
+ front = i915_gem_object_frontbuffer_lookup(obj);
+ if (front) {
+ intel_frontbuffer_flush(&front->base, origin);
+ i915_gem_object_frontbuffer_put(front);
+ }
+}
+
+void __i915_gem_object_frontbuffer_invalidate(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct i915_frontbuffer *front;
+
+ front = i915_gem_object_frontbuffer_lookup(obj);
+ if (front) {
+ intel_frontbuffer_invalidate(&front->base, origin);
+ i915_gem_object_frontbuffer_put(front);
+ }
+}
+
+static struct intel_frontbuffer *i915_frontbuffer_get(struct drm_gem_object *_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct i915_frontbuffer *front;
+
+ front = i915_gem_object_frontbuffer_get(obj);
+ if (!front)
+ return NULL;
+
+ return &front->base;
+}
+
+static void i915_frontbuffer_ref(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_frontbuffer_ref(front);
+}
+
+static void i915_frontbuffer_put(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ return i915_gem_object_frontbuffer_put(front);
+}
+
+static void i915_frontbuffer_flush_for_display(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_flush_if_display(front->obj);
+}
+
+const struct intel_display_frontbuffer_interface i915_display_frontbuffer_interface = {
+ .get = i915_frontbuffer_get,
+ .ref = i915_frontbuffer_ref,
+ .put = i915_frontbuffer_put,
+ .flush_for_display = i915_frontbuffer_flush_for_display,
+};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
index 2133e29047c5..9c6d91f21c19 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
@@ -20,31 +20,41 @@ struct i915_frontbuffer {
struct kref ref;
};
-void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+void __i915_gem_object_frontbuffer_flush(struct drm_i915_gem_object *obj,
enum fb_op_origin origin);
-void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+void __i915_gem_object_frontbuffer_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin);
static inline void
-i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+i915_gem_object_frontbuffer_flush(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
if (unlikely(rcu_access_pointer(obj->frontbuffer)))
- __i915_gem_object_flush_frontbuffer(obj, origin);
+ __i915_gem_object_frontbuffer_flush(obj, origin);
}
static inline void
-i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+i915_gem_object_frontbuffer_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
if (unlikely(rcu_access_pointer(obj->frontbuffer)))
- __i915_gem_object_invalidate_frontbuffer(obj, origin);
+ __i915_gem_object_frontbuffer_invalidate(obj, origin);
}
struct i915_frontbuffer *i915_gem_object_frontbuffer_get(struct drm_i915_gem_object *obj);
void i915_gem_object_frontbuffer_ref(struct i915_frontbuffer *front);
void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front);
+static inline void i915_gem_object_frontbuffer_track(struct i915_frontbuffer *_old,
+ struct i915_frontbuffer *_new,
+ unsigned int frontbuffer_bits)
+{
+ struct intel_frontbuffer *old = _old ? &_old->base : NULL;
+ struct intel_frontbuffer *new = _new ? &_new->base : NULL;
+
+ intel_frontbuffer_track(old, new, frontbuffer_bits);
+}
+
/**
* i915_gem_object_frontbuffer_lookup - Look up the object's frontbuffer
* @obj: The object whose frontbuffer to look up.
@@ -81,4 +91,6 @@ i915_gem_object_frontbuffer_lookup(const struct drm_i915_gem_object *obj)
return front;
}
+extern const struct intel_display_frontbuffer_interface i915_display_frontbuffer_interface;
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index ce2780ef97ef..e375afbf458e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -155,7 +155,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_invalidate(obj, ORIGIN_CPU);
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
@@ -163,7 +163,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(vaddr, args->size);
intel_gt_chipset_flush(to_gt(i915));
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 3a7e202ae87d..56489cc127d6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -408,7 +408,7 @@ static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) {
INIT_WORK(&copy_work->work, __memcpy_work);
- queue_work(system_unbound_wq, &copy_work->work);
+ queue_work(system_dfl_wq, &copy_work->work);
} else {
init_irq_work(&copy_work->irq_work, __memcpy_irq_work);
irq_work_queue(&copy_work->irq_work);
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index e8fab45759c3..438cd4724ac4 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -67,7 +67,7 @@ void gen6_ppgtt_enable(struct intel_gt *gt)
if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
intel_uncore_write(uncore,
GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d37966ec7a92..c0fd349a4600 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1233,7 +1233,7 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
engine->class == VIDEO_ENHANCEMENT_CLASS ||
engine->class == COMPUTE_CLASS ||
engine->class == OTHER_CLASS))
- engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
+ engine->tlb_inv.request = REG_MASKED_FIELD_ENABLE(val);
else
engine->tlb_inv.request = val;
@@ -1628,7 +1628,7 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
- intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+ intel_uncore_write_fw(uncore, mode, REG_MASKED_FIELD_ENABLE(STOP_RING));
/*
* Wa_22011802037: Prior to doing a reset, ensure CS is
@@ -1636,7 +1636,7 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
*/
if (intel_engine_reset_needs_wa_22011802037(engine->gt))
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
- _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
err = __intel_wait_for_register_fw(engine->uncore, mode,
MODE_IDLE, MODE_IDLE,
@@ -1692,7 +1692,7 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
ENGINE_TRACE(engine, "\n");
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
}
static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
@@ -1967,7 +1967,8 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
if (engine->sanitize)
engine->sanitize(engine);
- engine->set_default_submission(engine);
+ if (engine->set_default_submission)
+ engine->set_default_submission(engine);
}
}
@@ -2551,7 +2552,7 @@ void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
return;
intel_uncore_write(engine->uncore, GEN12_RCU_MODE,
- _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index fb7bff27b45a..26196a57041e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -24,7 +24,7 @@ static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine)
if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) {
intel_uncore_write(engine->gt->uncore,
RC_PSMI_CTRL_GSCCS,
- _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_DISABLE(IDLE_MSG_DISABLE));
/* hysteresis 0xA=5us as recommended in spec*/
intel_uncore_write(engine->gt->uncore,
PWRCTX_MAXCNT_GSCCS,
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index cafe0b8e6bdd..1359fc9cb88e 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2934,12 +2934,12 @@ static void enable_execlists(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
if (GRAPHICS_VER(engine->i915) >= 11)
- mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+ mode = REG_MASKED_FIELD_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
else
- mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ mode = REG_MASKED_FIELD_ENABLE(GFX_RUN_LIST_ENABLE);
ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_WRITE_FW(engine,
RING_HWS_PGA,
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index db995dce914a..ac9aede82320 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -6,6 +6,7 @@
#include <linux/highmem.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "display/intel_display.h"
#include "i915_drv.h"
@@ -915,15 +916,15 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
if (GRAPHICS_VER(i915) == 6)
intel_uncore_write(uncore,
ARB_MODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (GRAPHICS_VER(i915) == 7)
intel_uncore_write(uncore,
ARB_MODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (GRAPHICS_VER(i915) == 8)
intel_uncore_write(uncore,
GAMTARBMODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
MISSING_CASE(GRAPHICS_VER(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ac527d878820..d76121e117e1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -5,6 +5,7 @@
#include <drm/drm_managed.h>
#include <drm/intel/intel-gtt.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 75e802e10be2..d85c849c0081 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -5,6 +5,8 @@
#include <linux/sched/clock.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index c0aff4b3cbba..babaf16e72f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -8,6 +8,7 @@
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "i915_drv.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 7421ed18d8d1..3ba9b2206b79 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -61,6 +61,9 @@
#define GMD_ID_GRAPHICS _MMIO(0xd8c)
#define GMD_ID_MEDIA _MMIO(MTL_MEDIA_GSI_BASE + 0xd8c)
+#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
+#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
+#define GMD_ID_STEP REG_GENMASK(5, 0)
#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
#define MTL_STEER_SEMAPHORE _MMIO(0xfd0)
@@ -318,11 +321,9 @@
#define _RING_FAULT_REG_VCS 0x4194
#define _RING_FAULT_REG_BCS 0x4294
#define _RING_FAULT_REG_VECS 0x4394
-#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \
- _RING_FAULT_REG_RCS, \
- _RING_FAULT_REG_VCS, \
- _RING_FAULT_REG_VECS, \
- _RING_FAULT_REG_BCS))
+#define RING_FAULT_REG(engine) _MMIO(_PICK_EVEN((engine)->class, \
+ _RING_FAULT_REG_RCS, \
+ _RING_FAULT_REG_VCS))
#define RING_FAULT_VADDR_MASK REG_GENMASK(31, 12) /* pre-bdw */
#define RING_FAULT_ENGINE_ID_MASK REG_GENMASK(16, 12) /* bdw+ */
#define RING_FAULT_GTTSEL_MASK REG_BIT(11) /* pre-bdw */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 1154cd2b7c34..a48601395dce 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -7,6 +7,8 @@
#include <linux/sysfs.h>
#include <linux/printk.h>
+#include <drm/intel/intel_pcode_regs.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_sysfs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 1d19c073ba2e..bcd707e3d436 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -6,6 +6,8 @@
#include <asm/tsc.h>
#include <linux/cpufreq.h>
+#include <drm/intel/intel_pcode_regs.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index d36e543e98df..147d22907960 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -846,16 +846,16 @@ static void init_common_regs(u32 * const regs,
u32 ctl;
int loc;
- ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ ctl = REG_MASKED_FIELD_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= REG_MASKED_FIELD_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (inhibit)
ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
if (GRAPHICS_VER(engine->i915) < 11)
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
+ ctl |= REG_MASKED_FIELD_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
/* Wa_14019159160 - Case 2.*/
if (ctx_needs_runalone(ce))
- ctl |= _MASKED_BIT_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE);
+ ctl |= REG_MASKED_FIELD_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE);
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
@@ -1344,7 +1344,7 @@ gen12_invalidate_state_cache(u32 *cs)
{
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN12_CS_DEBUG_MODE2);
- *cs++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
return cs;
}
@@ -1736,22 +1736,19 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
{
COMMON_SLICE_CHICKEN2,
- __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
- 0),
+ REG_MASKED_FIELD_DISABLE(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE),
},
/* BSpec: 11391 */
{
FF_SLICE_CHICKEN,
- __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
- FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+ REG_MASKED_FIELD_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
},
/* BSpec: 11299 */
{
_3D_CHICKEN3,
- __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
- _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+ REG_MASKED_FIELD_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
}
};
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 286d49ecc449..e91e5cdca26c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -7,6 +7,8 @@
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/vlv_clock.h"
#include "gem/i915_gem_region.h"
@@ -376,9 +378,9 @@ static void chv_rc6_enable(struct intel_rc6 *rc6)
/* Allows RC6 residency counter to work */
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
/* 3: Enable RC6 */
rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
@@ -401,11 +403,11 @@ static void vlv_rc6_enable(struct intel_rc6 *rc6)
/* Allows RC6 residency counter to work */
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC0_COUNT_EN |
- VLV_RENDER_RC0_COUNT_EN |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
rc6->ctl_enable =
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
@@ -761,17 +763,17 @@ static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
* set the high bit to be safe.
*/
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
do {
tmp = upper;
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_DISABLE(VLV_COUNT_RANGE_HIGH));
lower = intel_uncore_read_fw(uncore, reg);
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
} while (upper != tmp && --loop);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 41b5036dc538..984d0056c01c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -586,7 +586,7 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
return 0;
}
- intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
+ intel_uncore_write_fw(uncore, reg, REG_MASKED_FIELD_ENABLE(request));
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
@@ -602,7 +602,7 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
intel_uncore_write_fw(engine->uncore,
RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+ REG_MASKED_FIELD_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int gen8_reset_engines(struct intel_gt *gt,
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 8314a4b0505e..064e7cce412f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -4,6 +4,8 @@
*/
#include <drm/drm_cache.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "gem/i915_gem_internal.h"
@@ -126,8 +128,7 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
engine->name);
ENGINE_WRITE_FW(engine, RING_INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
+ REG_MASKED_FIELD_ENABLE(INSTPM_TLB_INVALIDATE | INSTPM_SYNC_FLUSH));
if (__intel_wait_for_register_fw(engine->uncore,
RING_INSTPM(engine->mmio_base),
INSTPM_SYNC_FLUSH, 0,
@@ -170,7 +171,7 @@ static void set_pp_dir(struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) >= 7) {
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -274,7 +275,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) > 2) {
ENGINE_WRITE_FW(engine,
- RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_POSTING_READ(engine, RING_MI_MODE);
}
@@ -717,7 +718,7 @@ static int load_pd_dir(struct i915_request *rq,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(INSTPM_TLB_INVALIDATE);
intel_ring_advance(rq, cs);
@@ -766,8 +767,7 @@ static int mi_set_context(struct i915_request *rq,
*cs++ = i915_mmio_reg_offset(
RING_PSMI_CTL(signaller->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
} else if (GRAPHICS_VER(i915) == 5) {
@@ -820,8 +820,7 @@ static int mi_set_context(struct i915_request *rq,
last_reg = RING_PSMI_CTL(signaller->mmio_base);
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = _MASKED_BIT_DISABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
+ *cs++ = REG_MASKED_FIELD_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
@@ -1053,7 +1052,7 @@ static void gen6_bsd_submit_request(struct i915_request *request)
* will then assume that it is busy and bring it out of rc6.
*/
intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
- _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
@@ -1074,7 +1073,7 @@ static void gen6_bsd_submit_request(struct i915_request *request)
* and so let it sleep to conserve power when idle.
*/
intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
- _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ REG_MASKED_FIELD_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 90b7eee78f1f..844f2716a386 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,6 +7,7 @@
#include <drm/intel/i915_drm.h>
#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "display/intel_display_rps.h"
#include "display/vlv_clock.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index b0ee3d0ae681..24ea5d8d529c 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -3,6 +3,8 @@
* Copyright © 2014-2018 Intel Corporation
*/
+#include <drm/intel/intel_gmd_misc_regs.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_mmio_range.h"
@@ -298,39 +300,39 @@ wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD_ENABLE(val), val, true);
}
static void
wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD_ENABLE(val), val, true);
}
static void
wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD_DISABLE(val), val, true);
}
static void
wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD_DISABLE(val), val, true);
}
static void
wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
u32 mask, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD(mask, val), mask, true);
}
static void
wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
u32 mask, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD(mask, val), mask, true);
}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -664,7 +666,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaEnableFloatBlendOptimization:icl */
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
+ REG_MASKED_FIELD_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
0 /* write-only, so skip validation */,
true);
@@ -1129,7 +1131,7 @@ hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
wa_add(wal,
HSW_ROW_CHICKEN3, 0,
- _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+ REG_MASKED_FIELD_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
0 /* XXX does this reg exist? */, true);
/* WaVSRefCountFullforceMissDisable:hsw */
@@ -2270,7 +2272,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
IS_DG2(i915)) {
/* Wa_14015150844 */
wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
- _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
+ REG_MASKED_FIELD_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
0, true);
}
@@ -2661,7 +2663,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_GRAPHICS_VER(i915, 4, 6))
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
- 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
+ 0, REG_MASKED_FIELD_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
@@ -2677,7 +2679,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* enabled.
*/
wa_add(wal, ECOSKPD(RENDER_RING_BASE),
- 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
+ 0, REG_MASKED_FIELD_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
0 /* XXX bit doesn't stick on Broadwater */,
true);
}
@@ -2877,7 +2879,7 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* we need to explicitly skip the readback.
*/
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ REG_MASKED_FIELD_ENABLE(ENABLE_PREFETCH_INTO_IC),
0 /* write-only, so skip validation */,
true);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 52ec4421a211..1c2764440323 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -624,7 +624,7 @@ int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action)
else
guc_err(guc, "Unknown crash notification: 0x%04X\n", action);
- queue_work(system_unbound_wq, &guc->dead_guc_worker);
+ queue_work(system_dfl_wq, &guc->dead_guc_worker);
return 0;
}
@@ -646,7 +646,7 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
guc_err(guc, "Received early exception notification!\n");
if (msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | INTEL_GUC_RECV_MSG_EXCEPTION))
- queue_work(system_unbound_wq, &guc->dead_guc_worker);
+ queue_work(system_dfl_wq, &guc->dead_guc_worker);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 8c4da526d461..1c455d84bf9d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -31,7 +31,7 @@ static void ct_dead_ct_worker_func(struct work_struct *w);
do { \
if (!(ct)->dead_ct_reported) { \
(ct)->dead_ct_reason |= 1 << CT_DEAD_##reason; \
- queue_work(system_unbound_wq, &(ct)->dead_ct_worker); \
+ queue_work(system_dfl_wq, &(ct)->dead_ct_worker); \
} \
} while (0)
#else
@@ -1238,7 +1238,7 @@ static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *requ
list_add_tail(&request->link, &ct->requests.incoming);
spin_unlock_irqrestore(&ct->requests.lock, flags);
- queue_work(system_unbound_wq, &ct->requests.worker);
+ queue_work(system_dfl_wq, &ct->requests.worker);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 142183d3f7fb..788e59cdfac9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -3385,7 +3385,7 @@ static void guc_context_sched_disable(struct intel_context *ce)
} else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
delay) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- mod_delayed_work(system_unbound_wq,
+ mod_delayed_work(system_dfl_wq,
&ce->guc_state.sched_disable_delay_work,
msecs_to_jiffies(delay));
} else {
@@ -3611,7 +3611,7 @@ static void guc_context_destroy(struct kref *kref)
* take the GT PM for the first time which isn't allowed from an atomic
* context.
*/
- queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
+ queue_work(system_dfl_wq, &guc->submission_state.destroyed_worker);
}
static int guc_context_alloc(struct intel_context *ce)
@@ -4414,9 +4414,9 @@ static void start_engine(struct intel_engine_cs *engine)
{
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ REG_MASKED_FIELD_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_POSTING_READ(engine, RING_MI_MODE);
}
@@ -5380,7 +5380,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
* A GT reset flushes this worker queue (G2H handler) so we must use
* another worker to trigger a GT reset.
*/
- queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
+ queue_work(system_dfl_wq, &guc->submission_state.reset_fail_worker);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 5a9f7749acff..7fac97fe30a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1065,7 +1065,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Start the DMA */
intel_uncore_write_fw(uncore, DMA_CTRL,
- _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+ REG_MASKED_FIELD_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100, NULL);
@@ -1075,7 +1075,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
- intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+ intel_uncore_write_fw(uncore, DMA_CTRL, REG_MASKED_FIELD_DISABLE(dma_flags));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b868a0501886..e4ed47ffd2d8 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -37,6 +37,7 @@
#include <linux/slab.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "display/i9xx_plane_regs.h"
#include "display/intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index fe4302c8cae5..00451fcb1005 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -39,7 +39,6 @@
#include "display/i9xx_plane_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
-#include "display/intel_display.h"
#include "display/intel_display_regs.h"
#include "display/intel_dpio_phy.h"
#include "display/intel_dpll_mgr.h"
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index b0d8d3e74ae7..a34f56630af9 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -40,16 +40,18 @@
#include <drm/display/drm_dp.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display_regs.h"
-#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_dpll_mgr.h"
#include "display/intel_fbc.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_pps_regs.h"
@@ -79,6 +81,9 @@
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
#define PCH_PP_DIVISOR _MMIO(0xc7210)
+#define pipe_name(p) ((p) + 'A')
+#define port_name(p) ((p) + 'A')
+
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
struct drm_i915_private *i915 = gvt->gt->i915;
@@ -558,7 +563,7 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
int refclk = 100000;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
- struct dpll clock = {};
+ int m1, m2, n, p1, p2, m, p, vco, dot;
u32 temp;
/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
@@ -587,30 +592,25 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
goto out;
}
- clock.m1 = 2;
- clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
+ m1 = 2;
+ m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
- clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
- clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
- clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
- clock.m = clock.m1 * clock.m2;
- clock.p = clock.p1 * clock.p2 * 5;
-
- if (clock.n == 0 || clock.p == 0) {
+ m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
+ n = REG_FIELD_GET(PORT_PLL_N_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
+ p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
+ p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
+ m = m1 * m2;
+ p = p1 * p2 * 5;
+
+ if (n == 0 || p == 0) {
gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
goto out;
}
- clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
- clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
+ vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, m), n << 22);
+ dot = DIV_ROUND_CLOSEST(vco, p);
- dp_br = clock.dot;
+ dp_br = dot;
out:
return dp_br;
@@ -2047,10 +2047,10 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
int ret;
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(1);
if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
IS_COMETLAKE(vgpu->gvt->gt->i915))
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
if (IS_MASKED_BITS_ENABLED(data, 1)) {
@@ -2139,7 +2139,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
data |= RESET_CTL_READY_TO_RESET;
- else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+ else if (data & REG_MASKED_FIELD_DISABLE(RESET_CTL_REQUEST_RESET))
data &= ~RESET_CTL_READY_TO_RESET;
vgpu_vreg(vgpu, offset) = data;
@@ -2152,7 +2152,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
{
u32 data = *(u32 *)p_data;
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
@@ -2534,7 +2534,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
#define RING_REG(base) _MMIO((base) + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
- ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ~REG_MASKED_FIELD_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
#undef RING_REG
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 91d22b1c62e2..f85113218037 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -32,6 +32,7 @@
#include <linux/eventfd.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index d4e9d485d382..a93999ba8092 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -34,6 +34,7 @@
*/
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
@@ -475,7 +476,7 @@ bool is_inhibit_context(struct intel_context *ce)
{
const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
return inhibit_mask ==
(reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 90d8eb1761a3..a4cf15e43990 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -91,9 +91,9 @@
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
#define IS_MASKED_BITS_ENABLED(_val, _b) \
- (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+ (((_val) & REG_MASKED_FIELD_ENABLE(_b)) == REG_MASKED_FIELD_ENABLE(_b))
#define IS_MASKED_BITS_DISABLED(_val, _b) \
- ((_val) & _MASKED_BIT_DISABLE(_b))
+ ((_val) & REG_MASKED_FIELD_DISABLE(_b))
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index cd44cbfb53b5..5cb7a72774a0 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -193,7 +193,7 @@ active_retire(struct i915_active *ref)
return;
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
- queue_work(system_unbound_wq, &ref->work);
+ queue_work(system_dfl_wq, &ref->work);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_bo.c b/drivers/gpu/drm/i915/i915_bo.c
new file mode 100644
index 000000000000..1789f7cab05c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_bo.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
+
+#include "display/intel_fb.h"
+#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_object_frontbuffer.h"
+#include "pxp/intel_pxp.h"
+
+#include "i915_bo.h"
+#include "i915_debugfs.h"
+#include "i915_drv.h"
+
+static bool i915_bo_is_tiled(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_tiled(to_intel_bo(obj));
+}
+
+static bool i915_bo_is_userptr(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_userptr(to_intel_bo(obj));
+}
+
+static bool i915_bo_is_shmem(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_shmem(to_intel_bo(obj));
+}
+
+static bool i915_bo_is_protected(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_protected(to_intel_bo(obj));
+}
+
+static int i915_bo_key_check(struct drm_gem_object *obj)
+{
+ return intel_pxp_key_check(obj, false);
+}
+
+static int i915_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ return i915_gem_fb_mmap(to_intel_bo(obj), vma);
+}
+
+static int i915_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
+{
+ return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size);
+}
+
+static void i915_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+{
+ i915_debugfs_describe_obj(m, to_intel_bo(obj));
+}
+
+static int i915_bo_framebuffer_init(struct drm_gem_object *_obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int tiling, stride;
+
+ i915_gem_object_lock(obj, NULL);
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
+
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode doesn't match fb modifier\n");
+ return -EINVAL;
+ }
+ } else {
+ if (tiling == I915_TILING_X) {
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ } else if (tiling == I915_TILING_Y) {
+ drm_dbg_kms(&i915->drm,
+ "No Y tiling for legacy addfb\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (GRAPHICS_VER(i915) < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ drm_dbg_kms(&i915->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void i915_bo_framebuffer_fini(struct drm_gem_object *obj)
+{
+ /* Nothing to do for i915 */
+}
+
+static struct drm_gem_object *
+i915_bo_framebuffer_lookup(struct drm_device *drm,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ /* object is backed with LMEM for discrete */
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
+ return ERR_PTR(-EREMOTE);
+ }
+
+ return intel_bo_to_drm_bo(obj);
+}
+
+const struct intel_display_bo_interface i915_display_bo_interface = {
+ .is_tiled = i915_bo_is_tiled,
+ .is_userptr = i915_bo_is_userptr,
+ .is_shmem = i915_bo_is_shmem,
+ .is_protected = i915_bo_is_protected,
+ .key_check = i915_bo_key_check,
+ .fb_mmap = i915_bo_fb_mmap,
+ .read_from_page = i915_bo_read_from_page,
+ .describe = i915_bo_describe,
+ .framebuffer_init = i915_bo_framebuffer_init,
+ .framebuffer_fini = i915_bo_framebuffer_fini,
+ .framebuffer_lookup = i915_bo_framebuffer_lookup,
+};
diff --git a/drivers/gpu/drm/i915/i915_bo.h b/drivers/gpu/drm/i915/i915_bo.h
new file mode 100644
index 000000000000..57255d052dd9
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_bo.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __I915_BO_H__
+#define __I915_BO_H__
+
+extern const struct intel_display_bo_interface i915_display_bo_interface;
+
+#endif /* __I915_BO_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 42f6b44f0027..4778ba664ec7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -33,6 +33,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/i915_dpt.c b/drivers/gpu/drm/i915/i915_dpt.c
new file mode 100644
index 000000000000..9f47bb563c85
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dpt.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
+
+#include "display/intel_display_core.h"
+#include "gem/i915_gem_domain.h"
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
+#include "gt/gen8_ppgtt.h"
+
+#include "i915_dpt.h"
+#include "i915_drv.h"
+
+struct intel_dpt {
+ struct i915_address_space vm;
+
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+};
+
+#define i915_is_dpt(vm) ((vm)->is_dpt)
+
+static inline struct intel_dpt *
+i915_vm_to_dpt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct intel_dpt, vm));
+ drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm));
+ return container_of(vm, struct intel_dpt, vm);
+}
+
+struct i915_address_space *i915_dpt_to_vm(struct intel_dpt *dpt)
+{
+ return &dpt->vm;
+}
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void dpt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ unsigned int pat_index,
+ u32 flags)
+{
+ struct intel_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+
+ gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
+ vm->pte_encode(addr, pat_index, flags));
+}
+
+static void dpt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ unsigned int pat_index,
+ u32 flags)
+{
+ struct intel_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+ const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ i = vma_res->start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
+ gen8_set_pte(&base[i++], pte_encode | addr);
+}
+
+static void dpt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void dpt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ unsigned int pat_index,
+ u32 flags)
+{
+ u32 pte_flags;
+
+ if (vma_res->bound_flags)
+ return;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (vm->has_read_only && vma_res->bi.readonly)
+ pte_flags |= PTE_READ_ONLY;
+ if (vma_res->bi.lmem)
+ pte_flags |= PTE_LM;
+
+ vm->insert_entries(vm, vma_res, pat_index, pte_flags);
+
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+}
+
+static void dpt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+}
+
+static void dpt_cleanup(struct i915_address_space *vm)
+{
+ struct intel_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_gem_object_put(dpt->obj);
+}
+
+struct i915_vma *i915_dpt_pin_to_ggtt(struct intel_dpt *dpt, unsigned int alignment)
+{
+ struct drm_i915_private *i915 = dpt->vm.i915;
+ struct intel_display *display = i915->display;
+ struct ref_tracker *wakeref;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+ struct i915_gem_ww_ctx ww;
+ u64 pin_flags = 0;
+ int err;
+
+ if (i915_gem_object_is_stolen(dpt->obj))
+ pin_flags |= PIN_MAPPABLE;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ atomic_inc(&display->restore.pending_fb_pin);
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(dpt->obj, &ww);
+ if (err)
+ continue;
+
+ vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0,
+ alignment, pin_flags);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ continue;
+ }
+
+ iomem = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(iomem)) {
+ err = PTR_ERR(iomem);
+ continue;
+ }
+
+ dpt->vma = vma;
+ dpt->iomem = iomem;
+
+ i915_vma_get(vma);
+ }
+
+ dpt->obj->mm.dirty = true;
+
+ atomic_dec(&display->restore.pending_fb_pin);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return err ? ERR_PTR(err) : vma;
+}
+
+void i915_dpt_unpin_from_ggtt(struct intel_dpt *dpt)
+{
+ i915_vma_unpin_iomap(dpt->vma);
+ i915_vma_put(dpt->vma);
+}
+
+static struct intel_dpt *i915_dpt_create(struct drm_gem_object *obj, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct drm_i915_gem_object *dpt_obj;
+ struct i915_address_space *vm;
+ struct intel_dpt *dpt;
+ int ret;
+
+ if (!size)
+ size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
+
+ size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
+
+ dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
+ dpt_obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
+ drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
+ dpt_obj = i915_gem_object_create_shmem(i915, size);
+ }
+ if (IS_ERR(dpt_obj))
+ return ERR_CAST(dpt_obj);
+
+ ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
+ if (!ret) {
+ ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ i915_gem_object_unlock(dpt_obj);
+ }
+ if (ret) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(ret);
+ }
+
+ dpt = kzalloc_obj(*dpt);
+ if (!dpt) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vm = &dpt->vm;
+
+ vm->gt = to_gt(i915);
+ vm->i915 = i915;
+ vm->dma = i915->drm.dev;
+ vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ vm->is_dpt = true;
+
+ i915_address_space_init(vm, VM_CLASS_DPT);
+
+ vm->insert_page = dpt_insert_page;
+ vm->clear_range = dpt_clear_range;
+ vm->insert_entries = dpt_insert_entries;
+ vm->cleanup = dpt_cleanup;
+
+ vm->vma_ops.bind_vma = dpt_bind_vma;
+ vm->vma_ops.unbind_vma = dpt_unbind_vma;
+
+ vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
+
+ dpt->obj = dpt_obj;
+ dpt->obj->is_dpt = true;
+
+ return dpt;
+}
+
+static void i915_dpt_destroy(struct intel_dpt *dpt)
+{
+ dpt->obj->is_dpt = false;
+ i915_vm_put(&dpt->vm);
+}
+
+static void i915_dpt_suspend(struct intel_dpt *dpt)
+{
+ i915_ggtt_suspend_vm(&dpt->vm, true);
+}
+
+static void i915_dpt_resume(struct intel_dpt *dpt)
+{
+ i915_ggtt_resume_vm(&dpt->vm, true);
+}
+
+u64 i915_dpt_offset(struct i915_vma *dpt_vma)
+{
+ return i915_vma_offset(dpt_vma);
+}
+
+const struct intel_display_dpt_interface i915_display_dpt_interface = {
+ .create = i915_dpt_create,
+ .destroy = i915_dpt_destroy,
+ .suspend = i915_dpt_suspend,
+ .resume = i915_dpt_resume,
+};
diff --git a/drivers/gpu/drm/i915/i915_dpt.h b/drivers/gpu/drm/i915/i915_dpt.h
new file mode 100644
index 000000000000..08dbe444fe18
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dpt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2026 Intel Corporation */
+
+#ifndef __I915_DPT_H__
+#define __I915_DPT_H__
+
+#include <linux/types.h>
+
+struct i915_address_space;
+struct i915_vma;
+struct intel_dpt;
+
+struct i915_address_space *i915_dpt_to_vm(struct intel_dpt *dpt);
+struct i915_vma *i915_dpt_pin_to_ggtt(struct intel_dpt *dpt, unsigned int alignment);
+void i915_dpt_unpin_from_ggtt(struct intel_dpt *dpt);
+u64 i915_dpt_offset(struct i915_vma *dpt_vma);
+
+extern const struct intel_display_dpt_interface i915_display_dpt_interface;
+
+#endif /* __I915_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index f0105c5b49a7..385a634c3ed0 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -48,6 +48,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/intel/display_member.h>
#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "display/i9xx_display_sr.h"
#include "display/intel_bw.h"
@@ -77,6 +78,7 @@
#include "gem/i915_gem_dmabuf.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
@@ -88,11 +90,14 @@
#include "pxp/intel_pxp_debugfs.h"
#include "pxp/intel_pxp_pm.h"
+#include "i915_bo.h"
#include "i915_debugfs.h"
#include "i915_display_pc8.h"
+#include "i915_dpt.h"
#include "i915_driver.h"
#include "i915_drm_client.h"
#include "i915_drv.h"
+#include "i915_dsb_buffer.h"
#include "i915_edram.h"
#include "i915_file_private.h"
#include "i915_getparam.h"
@@ -104,6 +109,7 @@
#include "i915_ioctl.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
+#include "i915_overlay.h"
#include "i915_panic.h"
#include "i915_perf.h"
#include "i915_query.h"
@@ -147,10 +153,11 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
/*
* The unordered i915 workqueue should be used for all work
* scheduling that do not require running in order, which used
- * to be scheduled on the system_wq before moving to a driver
+ * to be scheduled on the system_percpu_wq before moving to a driver
* instance due deprecation of flush_scheduled_work().
*/
- dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
+ dev_priv->unordered_wq = alloc_workqueue("i915-unordered", WQ_PERCPU,
+ 0);
if (dev_priv->unordered_wq == NULL)
goto out_free_wq;
@@ -556,10 +563,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
drm_dbg(&dev_priv->drm, "can't enable MSI");
}
- ret = intel_gvt_init(dev_priv);
- if (ret)
- goto err_msi;
-
intel_opregion_setup(display);
ret = i915_pcode_init(dev_priv);
@@ -580,7 +583,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_opregion:
intel_opregion_cleanup(display);
-err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
err_mem_regions:
@@ -764,14 +766,21 @@ static bool vgpu_active(struct drm_device *drm)
}
static const struct intel_display_parent_interface parent = {
+ .bo = &i915_display_bo_interface,
+ .dpt = &i915_display_dpt_interface,
+ .dsb = &i915_display_dsb_interface,
+ .frontbuffer = &i915_display_frontbuffer_interface,
.hdcp = &i915_display_hdcp_interface,
.initial_plane = &i915_display_initial_plane_interface,
.irq = &i915_display_irq_interface,
+ .overlay = &i915_display_overlay_interface,
.panic = &i915_display_panic_interface,
.pc8 = &i915_display_pc8_interface,
+ .pcode = &i915_display_pcode_interface,
.rpm = &i915_display_rpm_interface,
.rps = &i915_display_rps_interface,
.stolen = &i915_display_stolen_interface,
+ .vma = &i915_display_vma_interface,
.fence_priority_display = fence_priority_display,
.has_auxccs = has_auxccs,
@@ -868,9 +877,13 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
+ ret = intel_gvt_init(i915);
+ if (ret)
+ goto out_cleanup_hw;
+
ret = intel_display_driver_probe_noirq(display);
if (ret < 0)
- goto out_cleanup_hw;
+ goto out_cleanup_gvt;
ret = intel_irq_install(i915);
if (ret)
@@ -919,6 +932,8 @@ out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
intel_display_driver_remove_nogem(display);
+out_cleanup_gvt:
+ intel_gvt_driver_remove(i915);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -926,7 +941,6 @@ out_cleanup_hw:
i915_gem_drain_freed_objects(i915);
i915_ggtt_driver_late_release(i915);
out_cleanup_mmio:
- intel_gvt_driver_remove(i915);
i915_driver_mmio_release(i915);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 44ba620325bc..dafee3dcd1c5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -61,6 +61,7 @@
#include "intel_uncore.h"
struct drm_i915_clock_gating_funcs;
+struct i915_overlay;
struct intel_display;
struct intel_pxp;
struct vlv_s0ix_state;
@@ -248,7 +249,7 @@ struct drm_i915_private {
*
* This workqueue should be used for all unordered work
* scheduling within i915, which used to be scheduled on the
- * system_wq before moving to a driver instance due
+ * system_percpu_wq before moving to a driver instance due
* deprecation of flush_scheduled_work().
*/
struct workqueue_struct *unordered_wq;
@@ -307,6 +308,8 @@ struct drm_i915_private {
struct intel_pxp *pxp;
+ struct i915_overlay *overlay;
+
struct i915_pmu pmu;
/* The TTM device structure. */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.c b/drivers/gpu/drm/i915/i915_dsb_buffer.c
index 9b6060af250d..b797e0ccb70c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb_buffer.c
+++ b/drivers/gpu/drm/i915/i915_dsb_buffer.c
@@ -3,11 +3,13 @@
* Copyright 2023, Intel Corporation.
*/
+#include <drm/intel/display_parent_interface.h>
+
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+#include "i915_dsb_buffer.h"
#include "i915_vma.h"
-#include "intel_dsb_buffer.h"
struct intel_dsb_buffer {
u32 *cmd_buf;
@@ -15,29 +17,29 @@ struct intel_dsb_buffer {
size_t buf_size;
};
-u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+static u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
return i915_ggtt_offset(dsb_buf->vma);
}
-void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+static void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
dsb_buf->cmd_buf[idx] = val;
}
-u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+static u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
{
return dsb_buf->cmd_buf[idx];
}
-void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+static void intel_dsb_buffer_fill(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
memset(&dsb_buf->cmd_buf[idx], val, size);
}
-struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
+static struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
{
struct drm_i915_private *i915 = to_i915(drm);
struct intel_dsb_buffer *dsb_buf;
@@ -93,13 +95,23 @@ err:
return ERR_PTR(ret);
}
-void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+static void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
{
i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
kfree(dsb_buf);
}
-void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+static void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
i915_gem_object_flush_map(dsb_buf->vma->obj);
}
+
+const struct intel_display_dsb_interface i915_display_dsb_interface = {
+ .ggtt_offset = intel_dsb_buffer_ggtt_offset,
+ .write = intel_dsb_buffer_write,
+ .read = intel_dsb_buffer_read,
+ .fill = intel_dsb_buffer_fill,
+ .create = intel_dsb_buffer_create,
+ .cleanup = intel_dsb_buffer_cleanup,
+ .flush_map = intel_dsb_buffer_flush_map,
+};
diff --git a/drivers/gpu/drm/i915/i915_dsb_buffer.h b/drivers/gpu/drm/i915/i915_dsb_buffer.h
new file mode 100644
index 000000000000..a01b4d8de947
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dsb_buffer.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __I915_DSB_BUFFER_H__
+#define __I915_DSB_BUFFER_H__
+
+extern const struct intel_display_dsb_interface i915_display_dsb_interface;
+
+#endif /* __I915_DSB_BUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 160733619a4a..761491750914 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -579,7 +579,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
goto out_rpm;
}
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_invalidate(obj, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -626,7 +626,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
}
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
i915_gem_gtt_cleanup(obj, &node, vma);
out_rpm:
@@ -714,7 +714,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
i915_gem_object_unpin_pages(obj);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a99b4e45d26c..0469c4467f2b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -824,9 +824,6 @@ static void err_print_gt_global(struct drm_i915_error_state_buf *m,
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
gt->fault_data1, gt->fault_data0);
- if (GRAPHICS_VER(m->i915) == 7)
- err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
-
if (IS_GRAPHICS_VER(m->i915, 8, 11))
err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
@@ -1929,9 +1926,6 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
if (IS_VALLEYVIEW(i915))
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
- if (GRAPHICS_VER(i915) == 7)
- gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
-
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
XEHP_FAULT_TLB_DATA0);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 91b3df621a49..26970c5e291e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -149,7 +149,6 @@ struct intel_gt_coredump {
u32 gtier[6], ngtier;
u32 forcewake;
u32 error; /* gen6+ */
- u32 err_int; /* gen7 */
u32 fault_data0; /* gen8, gen9 */
u32 fault_data1; /* gen8, gen9 */
u32 done_reg;
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 16e64d752e12..c4a799f5fe92 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -9,6 +9,8 @@
#include <linux/types.h>
#include <linux/units.h>
+#include <drm/intel/intel_pcode_regs.h>
+
#include "i915_drv.h"
#include "i915_hwmon.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/i915_initial_plane.c b/drivers/gpu/drm/i915/i915_initial_plane.c
index 7fb52d81f7b6..5594548f51d8 100644
--- a/drivers/gpu/drm/i915/i915_initial_plane.c
+++ b/drivers/gpu/drm/i915/i915_initial_plane.c
@@ -9,6 +9,7 @@
#include "display/intel_crtc.h"
#include "display/intel_display_types.h"
#include "display/intel_fb.h"
+#include "display/intel_fbdev_fb.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
@@ -116,7 +117,7 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
- size * 2 > i915->dsm.usable_size) {
+ !intel_fbdev_fb_prefer_stolen(&i915->drm, size)) {
drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fe978d4ea53..d4d8dd0a4174 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,6 +34,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/intel_display_irq.h"
#include "display/intel_hotplug.h"
diff --git a/drivers/gpu/drm/i915/i915_overlay.c b/drivers/gpu/drm/i915/i915_overlay.c
new file mode 100644
index 000000000000..c2d712bd2b0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_overlay.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2026, Intel Corporation.
+ */
+
+#include <drm/drm_print.h>
+
+#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_object_frontbuffer.h"
+#include "gem/i915_gem_pm.h"
+
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
+
+#include "i915_drv.h"
+#include "i915_overlay.h"
+#include "i915_reg.h"
+#include "intel_pci_config.h"
+
+#include "display/intel_frontbuffer.h"
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+struct i915_overlay {
+ struct drm_i915_private *i915;
+ struct intel_context *context;
+ struct i915_vma *vma;
+ struct i915_vma *old_vma;
+ struct i915_frontbuffer *frontbuffer;
+ /* register access */
+ struct drm_i915_gem_object *reg_bo;
+ void __iomem *regs;
+ u32 flip_addr;
+ u32 frontbuffer_bits;
+ /* flip handling */
+ struct i915_active last_flip;
+ void (*flip_complete)(struct i915_overlay *overlay);
+};
+
+static void i830_overlay_clock_gating(struct drm_i915_private *i915,
+ bool enable)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ u8 val;
+
+ /*
+ * WA_OVERLAY_CLKGATE:alm
+ *
+ * FIXME should perhaps be done on the display side?
+ */
+ if (enable)
+ intel_uncore_write(&i915->uncore, DSPCLK_GATE_D, 0);
+ else
+ intel_uncore_write(&i915->uncore, DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+
+ /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
+ pci_bus_read_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
+ if (enable)
+ val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ else
+ val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ pci_bus_write_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
+}
+
+static struct i915_request *
+alloc_request(struct i915_overlay *overlay, void (*fn)(struct i915_overlay *))
+{
+ struct i915_request *rq;
+ int err;
+
+ overlay->flip_complete = fn;
+
+ rq = i915_request_create(overlay->context);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = i915_active_add_request(&overlay->last_flip, rq);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static bool i915_overlay_is_active(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+
+ return overlay->frontbuffer_bits;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int i915_overlay_on(struct drm_device *drm,
+ u32 frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+ struct i915_request *rq;
+ u32 *cs;
+
+ drm_WARN_ON(drm, i915_overlay_is_active(drm));
+
+ rq = alloc_request(overlay, NULL);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ overlay->frontbuffer_bits = frontbuffer_bits;
+
+ if (IS_I830(i915))
+ i830_overlay_clock_gating(i915, false);
+
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+ *cs++ = overlay->flip_addr | OFC_UPDATE;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+static void i915_overlay_flip_prepare(struct i915_overlay *overlay,
+ struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+ struct i915_frontbuffer *frontbuffer = NULL;
+
+ drm_WARN_ON(&i915->drm, overlay->old_vma);
+
+ if (vma)
+ frontbuffer = i915_gem_object_frontbuffer_get(vma->obj);
+
+ i915_gem_object_frontbuffer_track(overlay->frontbuffer, frontbuffer,
+ overlay->frontbuffer_bits);
+
+ if (overlay->frontbuffer)
+ i915_gem_object_frontbuffer_put(overlay->frontbuffer);
+ overlay->frontbuffer = frontbuffer;
+
+ overlay->old_vma = overlay->vma;
+ if (vma)
+ overlay->vma = i915_vma_get(vma);
+ else
+ overlay->vma = NULL;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static int i915_overlay_continue(struct drm_device *drm,
+ struct i915_vma *vma,
+ bool load_polyphase_filter)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+ struct i915_request *rq;
+ u32 flip_addr = overlay->flip_addr;
+ u32 *cs;
+
+ drm_WARN_ON(drm, !i915_overlay_is_active(drm));
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ rq = alloc_request(overlay, NULL);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ intel_ring_advance(rq, cs);
+
+ i915_overlay_flip_prepare(overlay, vma);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static void i915_overlay_release_old_vma(struct i915_overlay *overlay)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+ struct intel_display *display = i915->display;
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(&overlay->old_vma);
+ if (drm_WARN_ON(&i915->drm, !vma))
+ return;
+
+ intel_frontbuffer_flip(display, overlay->frontbuffer_bits);
+
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+}
+
+static void
+i915_overlay_release_old_vid_tail(struct i915_overlay *overlay)
+{
+ i915_overlay_release_old_vma(overlay);
+}
+
+static void i915_overlay_off_tail(struct i915_overlay *overlay)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+
+ i915_overlay_release_old_vma(overlay);
+
+ overlay->frontbuffer_bits = 0;
+
+ if (IS_I830(i915))
+ i830_overlay_clock_gating(i915, true);
+}
+
+static void i915_overlay_last_flip_retire(struct i915_active *active)
+{
+ struct i915_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+
+ if (overlay->flip_complete)
+ overlay->flip_complete(overlay);
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int i915_overlay_off(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+ struct i915_request *rq;
+ u32 *cs, flip_addr = overlay->flip_addr;
+
+ drm_WARN_ON(drm, !i915_overlay_is_active(drm));
+
+ /*
+ * According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases.
+ */
+ flip_addr |= OFC_UPDATE;
+
+ rq = alloc_request(overlay, i915_overlay_off_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ /* wait for overlay to go idle */
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+
+ /* turn overlay off */
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+
+ intel_ring_advance(rq, cs);
+
+ i915_overlay_flip_prepare(overlay, NULL);
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+/*
+ * Recover from an interruption due to a signal.
+ * We have to be careful not to repeat work forever an make forward progress.
+ */
+static int i915_overlay_recover_from_interrupt(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+/*
+ * Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs.
+ */
+static int i915_overlay_release_old_vid(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+ struct i915_request *rq;
+ u32 *cs;
+
+ /*
+ * Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
+ if (!overlay->old_vma)
+ return 0;
+
+ if (!(intel_uncore_read(&i915->uncore, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ i915_overlay_release_old_vid_tail(overlay);
+ return 0;
+ }
+
+ rq = alloc_request(overlay, i915_overlay_release_old_vid_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+static void i915_overlay_reset(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+
+ if (!overlay)
+ return;
+
+ overlay->frontbuffer_bits = 0;
+}
+
+static struct i915_vma *i915_overlay_pin_fb(struct drm_device *drm,
+ struct drm_gem_object *obj,
+ u32 *offset)
+{
+ struct drm_i915_gem_object *new_bo = to_intel_bo(obj);
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ int ret;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(new_bo, &ww);
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0, 0,
+ NULL, PIN_MAPPABLE);
+ ret = PTR_ERR_OR_ZERO(vma);
+ }
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ return ERR_PTR(ret);
+
+ *offset = i915_ggtt_offset(vma);
+
+ return vma;
+}
+
+static void i915_overlay_unpin_fb(struct drm_device *drm,
+ struct i915_vma *vma)
+{
+ i915_vma_unpin(vma);
+}
+
+static struct drm_gem_object *
+i915_overlay_obj_lookup(struct drm_device *drm,
+ struct drm_file *file_priv,
+ u32 handle)
+{
+ struct drm_i915_gem_object *bo;
+
+ bo = i915_gem_object_lookup(file_priv, handle);
+ if (!bo)
+ return ERR_PTR(-ENOENT);
+
+ if (i915_gem_object_is_tiled(bo)) {
+ drm_dbg(drm, "buffer used for overlay image can not be tiled\n");
+ i915_gem_object_put(bo);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return intel_bo_to_drm_bo(bo);
+}
+
+static int get_registers(struct i915_overlay *overlay, bool use_phys)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put_bo;
+ }
+
+ if (use_phys)
+ overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+ else
+ overlay->flip_addr = i915_ggtt_offset(vma);
+ overlay->regs = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(overlay->regs)) {
+ err = PTR_ERR(overlay->regs);
+ goto err_put_bo;
+ }
+
+ overlay->reg_bo = obj;
+ return 0;
+
+err_put_bo:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void __iomem *i915_overlay_setup(struct drm_device *drm,
+ bool needs_physical)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_engine_cs *engine;
+ struct i915_overlay *overlay;
+ int ret;
+
+ engine = to_gt(i915)->engine[RCS0];
+ if (!engine || !engine->kernel_context)
+ return ERR_PTR(-ENOENT);
+
+ overlay = kzalloc_obj(*overlay);
+ if (!overlay)
+ return ERR_PTR(-ENOMEM);
+
+ overlay->i915 = i915;
+ overlay->context = engine->kernel_context;
+
+ i915_active_init(&overlay->last_flip,
+ NULL, i915_overlay_last_flip_retire, 0);
+
+ ret = get_registers(overlay, needs_physical);
+ if (ret) {
+ kfree(overlay);
+ return ERR_PTR(ret);
+ }
+
+ i915->overlay = overlay;
+
+ return overlay->regs;
+}
+
+static void i915_overlay_cleanup(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+
+ if (!overlay)
+ return;
+
+ /*
+ * The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already.
+ */
+ drm_WARN_ON(drm, i915_overlay_is_active(drm));
+
+ i915_gem_object_put(overlay->reg_bo);
+ i915_active_fini(&overlay->last_flip);
+
+ kfree(overlay);
+ i915->overlay = NULL;
+}
+
+const struct intel_display_overlay_interface i915_display_overlay_interface = {
+ .is_active = i915_overlay_is_active,
+ .overlay_on = i915_overlay_on,
+ .overlay_continue = i915_overlay_continue,
+ .overlay_off = i915_overlay_off,
+ .recover_from_interrupt = i915_overlay_recover_from_interrupt,
+ .release_old_vid = i915_overlay_release_old_vid,
+ .reset = i915_overlay_reset,
+ .obj_lookup = i915_overlay_obj_lookup,
+ .pin_fb = i915_overlay_pin_fb,
+ .unpin_fb = i915_overlay_unpin_fb,
+ .setup = i915_overlay_setup,
+ .cleanup = i915_overlay_cleanup,
+};
diff --git a/drivers/gpu/drm/i915/i915_overlay.h b/drivers/gpu/drm/i915/i915_overlay.h
new file mode 100644
index 000000000000..f8053eb8d189
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_overlay.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2026 Intel Corporation
+ */
+
+#ifndef __I915_OVERLAY_H__
+#define __I915_OVERLAY_H__
+
+extern const struct intel_display_overlay_interface i915_display_overlay_interface;
+
+#endif /* __I915_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2820e8f0f765..19b82427aa41 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2635,10 +2635,9 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream,
{
RING_CONTEXT_CONTROL(ce->engine->mmio_base),
CTX_CONTEXT_CONTROL,
- _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
- active ?
- GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
- 0)
+ active ?
+ REG_MASKED_FIELD_ENABLE(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE) :
+ REG_MASKED_FIELD_DISABLE(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE),
},
};
@@ -2827,8 +2826,8 @@ gen8_enable_metric_set(struct i915_perf_stream *stream,
*/
if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
intel_uncore_write(uncore, GEN8_OA_DEBUG,
- _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
+ REG_MASKED_FIELD_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
}
/*
@@ -2847,9 +2846,10 @@ gen8_enable_metric_set(struct i915_perf_stream *stream,
static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
{
- return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
- (stream->sample_flags & SAMPLE_OA_REPORT) ?
- 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ if (stream->sample_flags & SAMPLE_OA_REPORT)
+ return REG_MASKED_FIELD_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ else
+ return REG_MASKED_FIELD_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
static int
@@ -2870,15 +2870,15 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
*/
if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(GEN12_DISABLE_DOP_GATING));
}
intel_uncore_write(uncore, __oa_regs(stream)->oa_debug,
/* Disable clk ratio reports, like previous Gens. */
- _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
+ REG_MASKED_FIELD_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
/*
* If the user didn't require OA reports, instruct
* the hardware not to emit ctx switch reports.
@@ -2949,9 +2949,9 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
*/
if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
- _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_DISABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(GEN12_DISABLE_DOP_GATING));
}
/* disable the context save/restore or OAR counters */
@@ -4475,7 +4475,7 @@ static u32 mask_reg_value(u32 reg, u32 val)
* programmed by userspace doesn't change this.
*/
if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
- val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
+ val = val & ~REG_MASKED_FIELD_ENABLE(GEN8_ST_PO_DISABLE);
/*
* WAIT_FOR_RC6_EXIT has only one bit fulfilling the function
@@ -4483,7 +4483,7 @@ static u32 mask_reg_value(u32 reg, u32 val)
* configs.
*/
if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
- val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
+ val = val & ~REG_MASKED_FIELD_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5bf3b4ab2baa..5d99b99b0c57 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -116,9 +116,6 @@
* #define GEN8_BAR _MMIO(0xb888)
*/
-#define GU_CNTL_PROTECTED _MMIO(0x10100C)
-#define DEPRESENT REG_BIT(9)
-
#define GU_CNTL _MMIO(0x101010)
#define LMEM_INIT REG_BIT(7)
#define DRIVERFLR REG_BIT(31)
@@ -328,29 +325,6 @@
#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
-#define GEN7_ERR_INT _MMIO(0x44040)
-#define ERR_INT_POISON (1 << 31)
-#define ERR_INT_INVALID_GTT_PTE (1 << 29)
-#define ERR_INT_INVALID_PTE_DATA (1 << 28)
-#define ERR_INT_SPRITE_C_FAULT (1 << 23)
-#define ERR_INT_PRIMARY_C_FAULT (1 << 22)
-#define ERR_INT_CURSOR_C_FAULT (1 << 21)
-#define ERR_INT_SPRITE_B_FAULT (1 << 20)
-#define ERR_INT_PRIMARY_B_FAULT (1 << 19)
-#define ERR_INT_CURSOR_B_FAULT (1 << 18)
-#define ERR_INT_SPRITE_A_FAULT (1 << 17)
-#define ERR_INT_PRIMARY_A_FAULT (1 << 16)
-#define ERR_INT_CURSOR_A_FAULT (1 << 15)
-#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
-#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
-#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
-#define ERR_INT_PIPE_CRC_DONE_B (1 << 5)
-#define ERR_INT_FIFO_UNDERRUN_B (1 << 3)
-#define ERR_INT_PIPE_CRC_DONE_A (1 << 2)
-#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3))
-#define ERR_INT_FIFO_UNDERRUN_A (1 << 0)
-#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
-
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM REG_BIT(31)
@@ -361,9 +335,6 @@
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
-#define SCPD0 _MMIO(0x209c) /* 915+ only */
-#define SCPD_FBC_IGNORE_3D (1 << 6)
-#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
#define GEN2_IMR _MMIO(0x20a8)
@@ -377,13 +348,6 @@
#define GINT_DIS (1 << 22)
#define GCFG_DIS (1 << 8)
#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
-#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
-#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
-#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4)
-#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8)
-#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac)
-#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
-#define VLV_PCBR_ADDR_SHIFT 12
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
@@ -397,24 +361,10 @@
#define GEN2_ERROR_REGS I915_ERROR_REGS(EMR, EIR)
-#define INSTPM _MMIO(0x20c0)
-#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
-#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
- will not assert AGPBUSY# and will only
- be delivered when out of C3. */
-#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
-#define INSTPM_TLB_INVALIDATE (1 << 9)
-#define INSTPM_SYNC_FLUSH (1 << 5)
#define MEM_MODE _MMIO(0x20cc)
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */
-#define FW_BLC _MMIO(0x20d8)
-#define FW_BLC2 _MMIO(0x20dc)
-#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK REG_BIT(31)
-#define FW_BLC_SELF_FIFO_MASK REG_BIT(16) /* 945 only */
-#define FW_BLC_SELF_EN REG_BIT(15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -524,42 +474,6 @@
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1 << 5)
-#define I915_PM_INTERRUPT (1 << 31)
-#define I915_ISP_INTERRUPT (1 << 22)
-#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
-#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
-#define I915_MIPIC_INTERRUPT (1 << 19)
-#define I915_MIPIA_INTERRUPT (1 << 18)
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
-#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
-#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
-#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
-#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
-#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
-#define I915_HWB_OOM_INTERRUPT (1 << 13)
-#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
-#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
-#define I915_MISC_INTERRUPT (1 << 11)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
-#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
-#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
-#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
-#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
-#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
-#define I915_DEBUG_INTERRUPT (1 << 2)
-#define I915_WINVALID_INTERRUPT (1 << 1)
-#define I915_USER_INTERRUPT (1 << 1)
-#define I915_ASLE_INTERRUPT (1 << 0)
-#define I915_BSD_USER_INTERRUPT (1 << 25)
-
#define GEN6_BSD_RNCID _MMIO(0x12198)
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
@@ -613,47 +527,6 @@
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D _MMIO(0x6200)
-#define VLV_DSPCLK_GATE_D _MMIO(VLV_DISPLAY_BASE + 0x6200)
-# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
-# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
-# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
-# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */
-# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
-# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
-# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
-# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
-# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
-# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
-# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
-# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */
-# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */
-# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */
-# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */
-# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */
-# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */
-# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */
-# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */
-# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */
-# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
-# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10)
-# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8)
-# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */
-# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */
-# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
-# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
-# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
-/*
- * This bit must be set on the 830 to prevent hangs when turning off the
- * overlay scaler.
- */
-# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
-# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2)
-# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1)
-# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
-# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
-
#define RENCLK_GATE_D1 _MMIO(0x6204)
# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
@@ -758,19 +631,6 @@
#define VLV_CLK_CTL2 _MMIO(0x101104)
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
-/*
- * GEN9 clock gating regs
- */
-#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
-#define DARBF_GATING_DIS REG_BIT(27)
-#define MTL_PIPEDMC_GATING_DIS(pipe) REG_BIT(15 - (pipe))
-#define PWM2_GATING_DIS REG_BIT(14)
-#define PWM1_GATING_DIS REG_BIT(13)
-
-#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
-#define TGL_VRH_GATING_DIS REG_BIT(31)
-#define DPT_GATING_DIS REG_BIT(22)
-
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
#define PIPEB_HLINE_INT_EN REG_BIT(28)
@@ -799,56 +659,9 @@
#define PCH_3DCGDIS1 _MMIO(0x46024)
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
-/* Display Internal Timeout Register */
-#define RM_TIMEOUT _MMIO(0x42060)
-#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
-#define MMIO_TIMEOUT_US(us) ((us) << 0)
-
-/* interrupts */
-#define DE_MASTER_IRQ_CONTROL (1 << 31)
-#define DE_SPRITEB_FLIP_DONE (1 << 29)
-#define DE_SPRITEA_FLIP_DONE (1 << 28)
-#define DE_PLANEB_FLIP_DONE (1 << 27)
-#define DE_PLANEA_FLIP_DONE (1 << 26)
-#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
-#define DE_PCU_EVENT (1 << 25)
-#define DE_GTT_FAULT (1 << 24)
-#define DE_POISON (1 << 23)
-#define DE_PERFORM_COUNTER (1 << 22)
-#define DE_PCH_EVENT (1 << 21)
-#define DE_AUX_CHANNEL_A (1 << 20)
-#define DE_DP_A_HOTPLUG (1 << 19)
-#define DE_GSE (1 << 18)
-#define DE_PIPEB_VBLANK (1 << 15)
-#define DE_PIPEB_EVEN_FIELD (1 << 14)
-#define DE_PIPEB_ODD_FIELD (1 << 13)
-#define DE_PIPEB_LINE_COMPARE (1 << 12)
-#define DE_PIPEB_VSYNC (1 << 11)
-#define DE_PIPEB_CRC_DONE (1 << 10)
-#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
-#define DE_PIPEA_VBLANK (1 << 7)
-#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe)))
-#define DE_PIPEA_EVEN_FIELD (1 << 6)
-#define DE_PIPEA_ODD_FIELD (1 << 5)
-#define DE_PIPEA_LINE_COMPARE (1 << 4)
-#define DE_PIPEA_VSYNC (1 << 3)
-#define DE_PIPEA_CRC_DONE (1 << 2)
-#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
-#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
-#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
-
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1 << 31)
-#define DEISR _MMIO(0x44000)
-#define DEIMR _MMIO(0x44004)
-#define DEIIR _MMIO(0x44008)
-#define DEIER _MMIO(0x4400c)
-
-#define DE_IRQ_REGS I915_IRQ_REGS(DEIMR, \
- DEIER, \
- DEIIR)
-
#define GTISR _MMIO(0x44010)
#define GTIMR _MMIO(0x44014)
#define GTIIR _MMIO(0x44018)
@@ -858,24 +671,6 @@
GTIER, \
GTIIR)
-#define GEN8_MASTER_IRQ _MMIO(0x44200)
-#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
-#define GEN8_PCU_IRQ (1 << 30)
-#define GEN8_DE_PCH_IRQ (1 << 23)
-#define GEN8_DE_MISC_IRQ (1 << 22)
-#define GEN8_DE_PORT_IRQ (1 << 20)
-#define GEN8_DE_PIPE_C_IRQ (1 << 18)
-#define GEN8_DE_PIPE_B_IRQ (1 << 17)
-#define GEN8_DE_PIPE_A_IRQ (1 << 16)
-#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
-#define GEN8_GT_VECS_IRQ (1 << 6)
-#define GEN8_GT_GUC_IRQ (1 << 5)
-#define GEN8_GT_PM_IRQ (1 << 4)
-#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
-#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
-#define GEN8_GT_BCS_IRQ (1 << 1)
-#define GEN8_GT_RCS_IRQ (1 << 0)
-
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
@@ -901,25 +696,6 @@
GEN8_PCU_IER, \
GEN8_PCU_IIR)
-#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
-#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
-#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
-#define GEN11_GU_MISC_IER _MMIO(0x444fc)
-#define GEN11_GU_MISC_GSE (1 << 27)
-
-#define GEN11_GU_MISC_IRQ_REGS I915_IRQ_REGS(GEN11_GU_MISC_IMR, \
- GEN11_GU_MISC_IER, \
- GEN11_GU_MISC_IIR)
-
-#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
-#define GEN11_MASTER_IRQ (1 << 31)
-#define GEN11_PCU_IRQ (1 << 30)
-#define GEN11_GU_MISC_IRQ (1 << 29)
-#define GEN11_DISPLAY_IRQ (1 << 16)
-#define GEN11_GT_DW_IRQ(x) (1 << (x))
-#define GEN11_GT_DW1_IRQ (1 << 1)
-#define GEN11_GT_DW0_IRQ (1 << 0)
-
#define DG1_MSTR_TILE_INTR _MMIO(0x190008)
#define DG1_MSTR_IRQ REG_BIT(31)
#define DG1_MSTR_TILE(t) REG_BIT(t)
@@ -941,133 +717,9 @@
#define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE REG_BIT(5)
#define CHICKEN3_DGMG_DONE_FIX_DISABLE REG_BIT(2)
-#define CHICKEN_PAR1_1 _MMIO(0x42080)
-#define IGNORE_KVMR_PIPE_A REG_BIT(23)
-#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
-#define DIS_RAM_BYPASS_PSR2_MAN_TRACK REG_BIT(16)
-#define SKL_DE_COMPRESSED_HASH_MODE REG_BIT(15)
-#define HSW_MASK_VBL_TO_PIPE_IN_SRD REG_BIT(15) /* hsw/bdw */
-#define FORCE_ARB_IDLE_PLANES REG_BIT(14)
-#define SKL_EDP_PSR_FIX_RDWRAP REG_BIT(3)
-#define IGNORE_PSR2_HW_TRACKING REG_BIT(1)
-
#define CHICKEN_PAR2_1 _MMIO(0x42090)
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT REG_BIT(14)
-#define _CHICKEN_PIPESL_1_A 0x420b0
-#define _CHICKEN_PIPESL_1_B 0x420b4
-#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27)
-#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0)
-#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1)
-#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2)
-#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3)
-#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25)
-#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0)
-#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1)
-#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2)
-#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
-#define HSW_FBCQ_DIS REG_BIT(22)
-#define HSW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(15) /* hsw */
-#define SKL_PSR_MASK_PLANE_FLIP REG_BIT(11) /* skl+ */
-#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0)
-#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
-#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
-#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
-#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
-#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */
-
-#define DISP_ARB_CTL _MMIO(0x45000)
-#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
-#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
-#define DISP_FBC_WM_DIS REG_BIT(15)
-
-#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
-#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
-#define _LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
-#define _LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
-#define _LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
-#define LATENCY_REPORTING_REMOVED(pipe) _PICK((pipe), \
- _LATENCY_REPORTING_REMOVED_PIPE_A, \
- _LATENCY_REPORTING_REMOVED_PIPE_B, \
- _LATENCY_REPORTING_REMOVED_PIPE_C, \
- _LATENCY_REPORTING_REMOVED_PIPE_D)
-#define ICL_DELAY_PMRSP REG_BIT(22)
-#define DISABLE_FLR_SRC REG_BIT(15)
-#define MASK_WAKEMEM REG_BIT(13)
-#define DDI_CLOCK_REG_ACCESS REG_BIT(7)
-
-#define GMD_ID_DISPLAY _MMIO(0x510a0)
-#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
-#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
-#define GMD_ID_STEP REG_GENMASK(5, 0)
-
-/* PCH */
-
-#define SDEISR _MMIO(0xc4000)
-#define SDEIMR _MMIO(0xc4004)
-#define SDEIIR _MMIO(0xc4008)
-#define SDEIER _MMIO(0xc400c)
-
-/* Icelake PPS_DATA and _ECC DIP Registers.
- * These are available for transcoders B,C and eDP.
- * Adding the _A so as to reuse the _MMIO_TRANS2
- * definition, with which it offsets to the right location.
- */
-
-#define _TRANSA_CHICKEN1 0xf0060
-#define _TRANSB_CHICKEN1 0xf1060
-#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE REG_BIT(10)
-#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE REG_BIT(4)
-
-#define _TRANSA_CHICKEN2 0xf0064
-#define _TRANSB_CHICKEN2 0xf1064
-#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE REG_BIT(31)
-#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED REG_BIT(29)
-#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
-#define TRANS_CHICKEN2_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_CHICKEN2_FRAME_START_DELAY_MASK, (x)) /* 0-3 */
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER REG_BIT(26)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH REG_BIT(25)
-
-#define SOUTH_CHICKEN1 _MMIO(0xc2000)
-#define FDIA_PHASE_SYNC_SHIFT_OVR 19
-#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define INVERT_DDIE_HPD REG_BIT(28)
-#define INVERT_DDID_HPD_MTP REG_BIT(27)
-#define INVERT_TC4_HPD REG_BIT(26)
-#define INVERT_TC3_HPD REG_BIT(25)
-#define INVERT_TC2_HPD REG_BIT(24)
-#define INVERT_TC1_HPD REG_BIT(23)
-#define INVERT_DDID_HPD (1 << 18)
-#define INVERT_DDIC_HPD (1 << 17)
-#define INVERT_DDIB_HPD (1 << 16)
-#define INVERT_DDIA_HPD (1 << 15)
-#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
-#define FDI_BC_BIFURCATION_SELECT (1 << 12)
-#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
-#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
-#define SBCLK_RUN_REFCLK_DIS (1 << 7)
-#define ICP_SECOND_PPS_IO_SELECT REG_BIT(2)
-#define SPT_PWM_GRANULARITY (1 << 0)
-#define SOUTH_CHICKEN2 _MMIO(0xc2004)
-#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
-#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12)
-#define LPT_PWM_GRANULARITY (1 << 5)
-#define DPLS_EDP_PPS_FIX_DIS (1 << 0)
-
-#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
-#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
-#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
-#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
-#define PCH_DPMGUNIT_CLOCK_GATE_DISABLE (1 << 15)
-#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
-#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
-#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
-
#define VLV_PMWGICZ _MMIO(0x1300a4)
#define HSW_EDRAM_CAP _MMIO(0x120010)
@@ -1076,106 +728,6 @@
#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
-#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
-#define GEN6_PCODE_READY (1 << 31)
-#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
-#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8)
-#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0)
-#define GEN6_PCODE_ERROR_MASK 0xFF
-#define GEN6_PCODE_SUCCESS 0x0
-#define GEN6_PCODE_ILLEGAL_CMD 0x1
-#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
-#define GEN6_PCODE_TIMEOUT 0x3
-#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
-#define GEN7_PCODE_TIMEOUT 0x2
-#define GEN7_PCODE_ILLEGAL_DATA 0x3
-#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
-#define GEN11_PCODE_LOCKED 0x6
-#define GEN11_PCODE_REJECTED 0x11
-#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
-#define GEN6_PCODE_WRITE_RC6VIDS 0x4
-#define GEN6_PCODE_READ_RC6VIDS 0x5
-#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
-#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
-#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
-#define GEN9_PCODE_READ_MEM_LATENCY 0x6
-#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24)
-#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16)
-#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8)
-#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0)
-#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
-#define SKL_PCODE_CDCLK_CONTROL 0x7
-#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
-#define SKL_CDCLK_READY_FOR_CHANGE 0x1
-#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
-#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
-#define GEN6_READ_OC_PARAMS 0xc
-#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
-#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
-#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
-#define ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8))
-#define DISPLAY_TO_PCODE_CDCLK_MAX 0x28D
-#define DISPLAY_TO_PCODE_VOLTAGE_MASK REG_GENMASK(1, 0)
-#define DISPLAY_TO_PCODE_VOLTAGE_MAX DISPLAY_TO_PCODE_VOLTAGE_MASK
-#define DISPLAY_TO_PCODE_CDCLK_VALID REG_BIT(27)
-#define DISPLAY_TO_PCODE_PIPE_COUNT_VALID REG_BIT(31)
-#define DISPLAY_TO_PCODE_CDCLK_MASK REG_GENMASK(25, 16)
-#define DISPLAY_TO_PCODE_PIPE_COUNT_MASK REG_GENMASK(30, 28)
-#define DISPLAY_TO_PCODE_CDCLK(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_CDCLK_MASK, (x))
-#define DISPLAY_TO_PCODE_PIPE_COUNT(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_PIPE_COUNT_MASK, (x))
-#define DISPLAY_TO_PCODE_VOLTAGE(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_VOLTAGE_MASK, (x))
-#define DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, num_pipes, voltage_level) \
- ((DISPLAY_TO_PCODE_CDCLK(cdclk)) | \
- (DISPLAY_TO_PCODE_PIPE_COUNT(num_pipes)) | \
- (DISPLAY_TO_PCODE_VOLTAGE(voltage_level)))
-#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
-#define ICL_PCODE_REP_QGV_MASK REG_GENMASK(1, 0)
-#define ICL_PCODE_REP_QGV_SAFE REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 0)
-#define ICL_PCODE_REP_QGV_POLL REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 1)
-#define ICL_PCODE_REP_QGV_REJECTED REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 2)
-#define ADLS_PCODE_REP_PSF_MASK REG_GENMASK(3, 2)
-#define ADLS_PCODE_REP_PSF_SAFE REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 0)
-#define ADLS_PCODE_REP_PSF_POLL REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 1)
-#define ADLS_PCODE_REP_PSF_REJECTED REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 2)
-#define ICL_PCODE_REQ_QGV_PT_MASK REG_GENMASK(7, 0)
-#define ICL_PCODE_REQ_QGV_PT(x) REG_FIELD_PREP(ICL_PCODE_REQ_QGV_PT_MASK, (x))
-#define ADLS_PCODE_REQ_PSF_PT_MASK REG_GENMASK(10, 8)
-#define ADLS_PCODE_REQ_PSF_PT(x) REG_FIELD_PREP(ADLS_PCODE_REQ_PSF_PT_MASK, (x))
-#define GEN6_PCODE_READ_D_COMP 0x10
-#define GEN6_PCODE_WRITE_D_COMP 0x11
-#define ICL_PCODE_EXIT_TCCOLD 0x12
-#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
-#define DISPLAY_IPS_CONTROL 0x19
-#define TGL_PCODE_TCCOLD 0x26
-#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
-#define TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ 0
-#define TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ REG_BIT(0)
- /* See also IPS_CTL */
-#define IPS_PCODE_CONTROL (1 << 30)
-#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
-#define GEN9_PCODE_SAGV_CONTROL 0x21
-#define GEN9_SAGV_DISABLE 0x0
-#define GEN9_SAGV_IS_DISABLED 0x1
-#define GEN9_SAGV_ENABLE 0x3
-#define DG1_PCODE_STATUS 0x7E
-#define DG1_UNCORE_GET_INIT_STATUS 0x0
-#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1
-#define PCODE_POWER_SETUP 0x7C
-#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
-#define POWER_SETUP_SUBCOMMAND_WRITE_I1 0x5
-#define POWER_SETUP_I1_WATTS REG_BIT(31)
-#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
-#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
-#define POWER_SETUP_SUBCOMMAND_G8_ENABLE 0x6
-#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
-#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
-/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
-#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
-#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
-/* PCODE_MBOX_DOMAIN_* - mailbox domain IDs */
-/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
-#define PCODE_MBOX_DOMAIN_NONE 0x0
-#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -1224,20 +776,9 @@
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
-#define PRIMARY_SPI_TRIGGER _MMIO(0x102040)
-#define PRIMARY_SPI_ADDRESS _MMIO(0x102080)
-#define PRIMARY_SPI_REGIONID _MMIO(0x102084)
-#define SPI_STATIC_REGIONS _MMIO(0x102090)
-#define OPTIONROM_SPI_REGIONID_MASK REG_GENMASK(7, 0)
-#define OROM_OFFSET _MMIO(0x1020c0)
-#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
-
-#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
-#define XE3P_ECC_IMPACTING_DE REG_BIT(12)
-#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
-#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
-#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
-
#define MTL_MEDIA_GSI_BASE 0x380000
+#define DSPCLK_GATE_D _MMIO(0x6200)
+# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index e81fac8ab51b..e897d3ccbf9e 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -6,183 +6,8 @@
#ifndef __I915_REG_DEFS__
#define __I915_REG_DEFS__
-#include <linux/bitfield.h>
-#include <linux/bits.h>
-
-/*
- * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
- * implementations, for compatibility reasons with previous implementation.
- */
-#define REG_GENMASK(high, low) GENMASK_U32(high, low)
-#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
-#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
-#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
-
-#define REG_BIT(n) BIT_U32(n)
-#define REG_BIT64(n) BIT_U64(n)
-#define REG_BIT16(n) BIT_U16(n)
-#define REG_BIT8(n) BIT_U8(n)
-
-/*
- * Local integer constant expression version of is_power_of_2().
- */
-#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
-
-/**
- * REG_FIELD_PREP() - Prepare a u32 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to put in the field
- *
- * Local copy of FIELD_PREP() to generate an integer constant expression, force
- * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
- *
- * @return: @__val masked and shifted into the field defined by @__mask.
- */
-#define REG_FIELD_PREP(__mask, __val) \
- ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
- BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
- BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
- BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
- BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
-
-/**
- * REG_FIELD_PREP8() - Prepare a u8 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to put in the field
- *
- * Local copy of FIELD_PREP() to generate an integer constant expression, force
- * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8().
- *
- * @return: @__val masked and shifted into the field defined by @__mask.
- */
-#define REG_FIELD_PREP8(__mask, __val) \
- ((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
- BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
- BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) + \
- BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
- BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
-
-/**
- * REG_FIELD_GET() - Extract a u32 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to extract the bitfield value from
- *
- * Local wrapper for FIELD_GET() to force u32 and for consistency with
- * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
- *
- * @return: Masked and shifted value of the field defined by @__mask in @__val.
- */
-#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
-
-/**
- * REG_FIELD_GET64() - Extract a u64 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to extract the bitfield value from
- *
- * Local wrapper for FIELD_GET() to force u64 and for consistency with
- * REG_GENMASK64().
- *
- * @return: Masked and shifted value of the field defined by @__mask in @__val.
- */
-#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
-
-
-/**
- * REG_FIELD_PREP16() - Prepare a u16 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to put in the field
- *
- * Local copy of FIELD_PREP16() to generate an integer constant
- * expression, force u8 and for consistency with
- * REG_FIELD_GET16(), REG_BIT16() and REG_GENMASK16().
- *
- * @return: @__val masked and shifted into the field defined by @__mask.
- */
-#define REG_FIELD_PREP16(__mask, __val) \
- ((u16)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
- BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
- BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U16_MAX) + \
- BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
- BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
-
-#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
-#define _MASKED_FIELD(mask, value) ({ \
- if (__builtin_constant_p(mask)) \
- BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
- if (__builtin_constant_p(value)) \
- BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
- if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
- BUILD_BUG_ON_MSG((value) & ~(mask), \
- "Incorrect value for mask"); \
- __MASKED_FIELD(mask, value); })
-#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
-#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
-
-/*
- * Given the first two numbers __a and __b of arbitrarily many evenly spaced
- * numbers, pick the 0-based __index'th value.
- *
- * Always prefer this over _PICK() if the numbers are evenly spaced.
- */
-#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
-
-/*
- * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
- * @__c_index corresponds to the index in which the second range starts to be
- * used. Using math interval notation, the first range is used for indexes [ 0,
- * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
- *
- * #define _FOO_A 0xf000
- * #define _FOO_B 0xf004
- * #define _FOO_C 0xf008
- * #define _SUPER_FOO_A 0xa000
- * #define _SUPER_FOO_B 0xa100
- * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \
- * _FOO_A, _FOO_B, \
- * _SUPER_FOO_A, _SUPER_FOO_B))
- *
- * This expands to:
- * 0: 0xf000,
- * 1: 0xf004,
- * 2: 0xf008,
- * 3: 0xa000,
- * 4: 0xa100,
- * 5: 0xa200,
- * ...
- */
-#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \
- (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \
- ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \
- _PICK_EVEN((__index) - (__c_index), __c, __d)))
-
-/*
- * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
- *
- * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
- */
-#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
-
-/**
- * REG_FIELD_GET8() - Extract a u8 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to extract the bitfield value from
- *
- * Local wrapper for FIELD_GET() to force u8 and for consistency with
- * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
- *
- * @return: Masked and shifted value of the field defined by @__mask in @__val.
- */
-#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
-
-/**
- * REG_FIELD_MAX() - produce the maximum value representable by a field
- * @__mask: shifted mask defining the field's length and position
- *
- * Local wrapper for FIELD_MAX() to return the maximum bit value that can
- * be held in the field specified by @_mask, cast to u32 for consistency
- * with other macros.
- */
-#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask))
+#include <drm/intel/pick.h>
+#include <drm/intel/reg_bits.h>
typedef struct {
u32 reg;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index d2e56b387993..366418108f78 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -38,7 +38,7 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
fence_work(&f->work);
else
- queue_work(system_unbound_wq, &f->work);
+ queue_work(system_dfl_wq, &f->work);
} else {
fence_complete(f);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index afc192d9931b..6a3a4d4244dc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -27,6 +27,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
#include "display/intel_fb.h"
#include "display/intel_frontbuffer.h"
@@ -2332,3 +2333,12 @@ int __init i915_vma_module_init(void)
return 0;
}
+
+static int i915_vma_fence_id(const struct i915_vma *vma)
+{
+ return vma->fence ? vma->fence->id : -1;
+}
+
+const struct intel_display_vma_interface i915_display_vma_interface = {
+ .fence_id = i915_vma_fence_id,
+};
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8054047840aa..fa2d9b429db6 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -404,11 +404,6 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
-static inline int i915_vma_fence_id(const struct i915_vma *vma)
-{
- return vma->fence ? vma->fence->id : -1;
-}
-
void i915_vma_parked(struct intel_gt *gt);
static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
@@ -481,4 +476,6 @@ int i915_vma_module_init(void);
I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma));
I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma));
+extern const struct intel_display_vma_interface i915_display_vma_interface;
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 53d619ef0c3d..a8f2112ce81f 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -202,7 +202,7 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
i915_vma_resource_unbind_work(&vma_res->work);
} else {
INIT_WORK(&vma_res->work, i915_vma_resource_unbind_work);
- queue_work(system_unbound_wq, &vma_res->work);
+ queue_work(system_dfl_wq, &vma_res->work);
}
break;
case FENCE_FREE:
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 7336934bb934..68a6f94f2a37 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -26,11 +26,13 @@
*/
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/i9xx_plane_regs.h"
#include "display/intel_display.h"
#include "display/intel_display_core.h"
-
+#include "display/intel_display_regs.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
@@ -452,7 +454,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0);
intel_uncore_write(&i915->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableSDEUnitClockGating:bdw */
intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -517,13 +519,13 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915)
if (INTEL_INFO(i915)->gt == 1)
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
/* must write both registers */
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2_GT2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
/*
@@ -557,7 +559,7 @@ static void vlv_init_clock_gating(struct drm_i915_private *i915)
/* WaDisableDopClockGating:vlv */
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* This is required by WaCatErrorRejectionIssue:vlv */
intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
@@ -592,7 +594,7 @@ static void chv_init_clock_gating(struct drm_i915_private *i915)
/* WaDisableSemaphoreAndSyncFlipWait:chv */
intel_uncore_write(&i915->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableCSUnitClockGating:chv */
intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1, 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
@@ -638,7 +640,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i965g_init_clock_gating(struct drm_i915_private *i915)
@@ -650,7 +652,7 @@ static void i965g_init_clock_gating(struct drm_i915_private *i915)
I965_FBC_CLOCK_GATE_DISABLE);
intel_uncore_write(&i915->uncore, RENCLK_GATE_D2, 0);
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void gen3_init_clock_gating(struct drm_i915_private *i915)
@@ -663,21 +665,21 @@ static void gen3_init_clock_gating(struct drm_i915_private *i915)
if (IS_PINEVIEW(i915))
intel_uncore_write(&i915->uncore, ECOSKPD(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+ REG_MASKED_FIELD_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
intel_uncore_write(&i915->uncore, ECOSKPD(RENDER_RING_BASE),
- _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+ REG_MASKED_FIELD_DISABLE(ECO_FLIP_DONE));
/* interrupts should cause a wake up from C3 */
- intel_uncore_write(&i915->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+ intel_uncore_write(&i915->uncore, INSTPM, REG_MASKED_FIELD_ENABLE(INSTPM_AGPBUSY_INT_EN));
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i85x_init_clock_gating(struct drm_i915_private *i915)
@@ -685,11 +687,11 @@ static void i85x_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(&i915->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
- intel_uncore_write(&i915->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
- _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
+ intel_uncore_write(&i915->uncore, MI_STATE, REG_MASKED_FIELD_ENABLE(MI_AGPBUSY_INT_EN) |
+ REG_MASKED_FIELD_DISABLE(MI_AGPBUSY_830_MODE));
intel_uncore_write(&i915->uncore, MEM_MODE,
- _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
/*
* Have FBC ignore 3D activity since we use software
@@ -699,14 +701,14 @@ static void i85x_init_clock_gating(struct drm_i915_private *i915)
* until a 2D blit occurs.
*/
intel_uncore_write(&i915->uncore, SCPD0,
- _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
+ REG_MASKED_FIELD_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *i915)
{
intel_uncore_write(&i915->uncore, MEM_MODE,
- _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
- _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
void intel_clock_gating_init(struct drm_device *drm)
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 478d00f89a4b..ae42818ab6e0 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -3,6 +3,11 @@
* Copyright © 2020 Intel Corporation
*/
+#include <drm/intel/intel_pcode_regs.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
+
+#include <drm/intel/intel_gmd_interrupt_regs.h>
+
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
#include "display/i9xx_wm_regs.h"
@@ -11,12 +16,12 @@
#include "display/intel_color_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
-#include "display/intel_display_core.h"
+#include "display/intel_display_limits.h"
#include "display/intel_display_regs.h"
-#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_fbc.h"
#include "display/intel_fbc_regs.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_lvds_regs.h"
@@ -32,6 +37,7 @@
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
+#include "gvt/display_helpers.h"
#include "gvt/reg.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 756652b8ec97..c07d48fc1b35 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -4,6 +4,8 @@
*/
#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -276,26 +278,31 @@ int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u3
return err;
}
-/* Helpers with drm device */
-int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+static int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
{
struct drm_i915_private *i915 = to_i915(drm);
return snb_pcode_read(&i915->uncore, mbox, val, val1);
}
-int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+static int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
{
struct drm_i915_private *i915 = to_i915(drm);
return snb_pcode_write_timeout(&i915->uncore, mbox, val, timeout_ms);
}
-int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
+static int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
{
struct drm_i915_private *i915 = to_i915(drm);
return skl_pcode_request(&i915->uncore, mbox, request, reply_mask, reply,
timeout_base_ms);
}
+
+const struct intel_display_pcode_interface i915_display_pcode_interface = {
+ .read = intel_pcode_read,
+ .write = intel_pcode_write_timeout,
+ .request = intel_pcode_request,
+};
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
index c91a821a88d4..19795ea8172e 100644
--- a/drivers/gpu/drm/i915/intel_pcode.h
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -27,13 +27,6 @@ int intel_pcode_init(struct intel_uncore *uncore);
int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val);
int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val);
-/* Helpers with drm device */
-int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
-int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
-#define intel_pcode_write(drm, mbox, val) \
- intel_pcode_write_timeout((drm), (mbox), (val), 1)
-
-int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms);
+extern const struct intel_display_pcode_interface i915_display_pcode_interface;
#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index bccedd59a114..5b698d4d7a7f 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -132,8 +132,8 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
}
#define fw_ack(d) readl((d)->reg_ack)
-#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
-#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
+#define fw_set(d, val) writel(REG_MASKED_FIELD_ENABLE((val)), (d)->reg_set)
+#define fw_clear(d, val) writel(REG_MASKED_FIELD_DISABLE((val)), (d)->reg_set)
static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index f7ed4e18a3ab..3d7f045f662d 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -66,8 +66,8 @@ bool intel_pxp_is_active(const struct intel_pxp *pxp)
static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable)
{
- u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
- _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+ u32 val = enable ? REG_MASKED_FIELD_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ REG_MASKED_FIELD_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val);
}
@@ -278,7 +278,7 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
spin_lock_irq(gt->irq_lock);
intel_pxp_mark_termination_in_progress(pxp);
pxp->session_events |= PXP_TERMINATION_REQUEST;
- queue_work(system_unbound_wq, &pxp->session_work);
+ queue_work(system_dfl_wq, &pxp->session_work);
spin_unlock_irq(gt->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index d81750b9bdda..735325e828bc 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -48,7 +48,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ;
if (pxp->session_events)
- queue_work(system_unbound_wq, &pxp->session_work);
+ queue_work(system_dfl_wq, &pxp->session_work);
}
static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index 6c376338bb37..6a305322e30d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -526,7 +526,7 @@ static int test_ipc(void *arg)
struct workqueue_struct *wq;
int ret = 0;
- wq = alloc_workqueue("i1915-selftest", 0, 0);
+ wq = alloc_workqueue("i1915-selftest", WQ_PERCPU, 0);
if (wq == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 210b9f8f7b61..27dc0e40a8d7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -223,7 +223,7 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->wq)
goto err_drv;
- i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0);
+ i915->unordered_wq = alloc_workqueue("mock-unordered", WQ_PERCPU, 0);
if (!i915->unordered_wq)
goto err_wq;
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index dac4b9bac743..3612b03cabd1 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 3a3f9f22d42a..dab979287a96 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -211,14 +211,15 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
# Display code specific to xe
xe-$(CONFIG_DRM_XE_DISPLAY) += \
- display/intel_bo.o \
- display/intel_fb_bo.o \
display/intel_fbdev_fb.o \
display/xe_display.o \
+ display/xe_display_bo.o \
+ display/xe_display_pcode.o \
display/xe_display_rpm.o \
display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
+ display/xe_frontbuffer.o \
display/xe_hdcp_gsc.o \
display/xe_initial_plane.o \
display/xe_panic.o \
@@ -233,6 +234,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_audio.o \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
+ i915-display/intel_bo.o \
i915-display/intel_bw.o \
i915-display/intel_casf.o \
i915-display/intel_cdclk.o \
@@ -275,7 +277,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_dp_test.o \
i915-display/intel_dpll.o \
i915-display/intel_dpll_mgr.o \
- i915-display/intel_dpt_common.o \
+ i915-display/intel_dpt.o \
i915-display/intel_dram.o \
i915-display/intel_drrs.o \
i915-display/intel_dsb.o \
@@ -304,6 +306,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_lock.o \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
+ i915-display/intel_overlay.o \
i915-display/intel_panel.o \
i915-display/intel_parent.o \
i915-display/intel_pch.o \
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h
deleted file mode 100644
index 8619ec015ad4..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../i915/i915_reg.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
deleted file mode 100644
index c4b5adaaa99a..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef I915_VMA_H
-#define I915_VMA_H
-
-#include <uapi/drm/i915_drm.h>
-
-#include "xe_ggtt.h"
-
-#include <linux/refcount.h>
-
-/* We don't want these from i915_drm.h in case of Xe */
-#undef I915_TILING_X
-#undef I915_TILING_Y
-#define I915_TILING_X 0
-#define I915_TILING_Y 0
-
-struct xe_bo;
-
-struct i915_vma {
- refcount_t ref;
- struct xe_bo *bo, *dpt;
- struct xe_ggtt_node *node;
-};
-
-#define i915_vma_fence_id(vma) -1
-
-static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
-{
- return xe_ggtt_node_addr(vma->node);
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
deleted file mode 100644
index 4fcd3bf6b76f..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_PCODE_H__
-#define __INTEL_PCODE_H__
-
-#include "xe_pcode.h"
-
-#endif /* __INTEL_PCODE_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index c05d4c4292d3..a8cfd65119e0 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_UNCORE_H__
#define __INTEL_UNCORE_H__
+#include "i915_reg_defs.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_mmio.h"
@@ -38,6 +39,14 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
}
+static inline void intel_uncore_write8(struct intel_uncore *uncore,
+ i915_reg_t i915_reg, u8 val)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ xe_mmio_write8(__compat_uncore_to_mmio(uncore), reg, val);
+}
+
static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
deleted file mode 100644
index 05d5e5c0a0de..000000000000
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: MIT
-/* Copyright © 2024 Intel Corporation */
-
-#include <drm/drm_gem.h>
-
-#include "intel_bo.h"
-#include "intel_frontbuffer.h"
-#include "xe_bo.h"
-#include "xe_pxp.h"
-
-bool intel_bo_is_tiled(struct drm_gem_object *obj)
-{
- /* legacy tiling is unused */
- return false;
-}
-
-bool intel_bo_is_userptr(struct drm_gem_object *obj)
-{
- /* xe does not have userptr bos */
- return false;
-}
-
-bool intel_bo_is_shmem(struct drm_gem_object *obj)
-{
- return false;
-}
-
-bool intel_bo_is_protected(struct drm_gem_object *obj)
-{
- return xe_bo_is_protected(gem_to_xe_bo(obj));
-}
-
-int intel_bo_key_check(struct drm_gem_object *obj)
-{
- return xe_pxp_obj_key_check(obj);
-}
-
-int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- return drm_gem_prime_mmap(obj, vma);
-}
-
-int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
-{
- struct xe_bo *bo = gem_to_xe_bo(obj);
-
- return xe_bo_read(bo, offset, dst, size);
-}
-
-struct xe_frontbuffer {
- struct intel_frontbuffer base;
- struct drm_gem_object *obj;
- struct kref ref;
-};
-
-struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj)
-{
- struct xe_frontbuffer *front;
-
- front = kmalloc_obj(*front);
- if (!front)
- return NULL;
-
- intel_frontbuffer_init(&front->base, obj->dev);
-
- kref_init(&front->ref);
-
- drm_gem_object_get(obj);
- front->obj = obj;
-
- return &front->base;
-}
-
-void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
-{
- struct xe_frontbuffer *front =
- container_of(_front, typeof(*front), base);
-
- kref_get(&front->ref);
-}
-
-static void frontbuffer_release(struct kref *ref)
-{
- struct xe_frontbuffer *front =
- container_of(ref, typeof(*front), ref);
-
- intel_frontbuffer_fini(&front->base);
-
- drm_gem_object_put(front->obj);
-
- kfree(front);
-}
-
-void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
-{
- struct xe_frontbuffer *front =
- container_of(_front, typeof(*front), base);
-
- kref_put(&front->ref, frontbuffer_release);
-}
-
-void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front)
-{
-}
-
-void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
-{
- /* FIXME */
-}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 7ad76022cb14..87af5646c938 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -23,6 +23,29 @@ u32 intel_fbdev_fb_pitch_align(u32 stride)
return ALIGN(stride, XE_PAGE_SIZE);
}
+bool intel_fbdev_fb_prefer_stolen(struct drm_device *drm, unsigned int size)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct ttm_resource_manager *stolen;
+
+ stolen = ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
+ if (!stolen)
+ return false;
+
+ if (IS_DGFX(xe))
+ return false;
+
+ if (XE_DEVICE_WA(xe, 22019338487_display))
+ return false;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ return stolen->size >= size * 2;
+}
+
struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
{
struct xe_device *xe = to_xe_device(drm);
@@ -30,7 +53,7 @@ struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe) && !XE_DEVICE_WA(xe, 22019338487_display)) {
+ if (intel_fbdev_fb_prefer_stolen(drm, size)) {
obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
@@ -40,6 +63,8 @@ struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
drm_info(&xe->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj));
+ } else {
+ drm_info(&xe->drm, "Allocating fbdev: Stolen memory not preferred.\n");
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index f8a831b5dc7d..49b6f98e7391 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -35,7 +35,11 @@
#include "intel_hotplug.h"
#include "intel_opregion.h"
#include "skl_watermark.h"
+#include "xe_display_bo.h"
+#include "xe_display_pcode.h"
#include "xe_display_rpm.h"
+#include "xe_dsb_buffer.h"
+#include "xe_frontbuffer.h"
#include "xe_hdcp_gsc.h"
#include "xe_initial_plane.h"
#include "xe_module.h"
@@ -538,10 +542,14 @@ static const struct intel_display_irq_interface xe_display_irq_interface = {
};
static const struct intel_display_parent_interface parent = {
+ .bo = &xe_display_bo_interface,
+ .dsb = &xe_display_dsb_interface,
+ .frontbuffer = &xe_display_frontbuffer_interface,
.hdcp = &xe_display_hdcp_interface,
.initial_plane = &xe_display_initial_plane_interface,
.irq = &xe_display_irq_interface,
.panic = &xe_display_panic_interface,
+ .pcode = &xe_display_pcode_interface,
.rpm = &xe_display_rpm_interface,
.stolen = &xe_display_stolen_interface,
};
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/xe_display_bo.c
index db8b1a27b4de..a689f71e7b14 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/xe_display_bo.c
@@ -1,31 +1,28 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
-#include <drm/drm_modeset_helper.h>
-#include <drm/ttm/ttm_bo.h>
+#include <drm/drm_gem.h>
+#include <drm/intel/display_parent_interface.h>
-#include "intel_display_types.h"
#include "intel_fb.h"
-#include "intel_fb_bo.h"
#include "xe_bo.h"
+#include "xe_display_bo.h"
+#include "xe_pxp.h"
-void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
+static bool xe_display_bo_is_protected(struct drm_gem_object *obj)
+{
+ return xe_bo_is_protected(gem_to_xe_bo(obj));
+}
+
+static int xe_display_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
- if (bo->flags & XE_BO_FLAG_PINNED) {
- /* Unpin our kernel fb first */
- xe_bo_lock(bo, false);
- xe_bo_unpin(bo);
- xe_bo_unlock(bo);
- }
- xe_bo_put(bo);
+ return xe_bo_read(bo, offset, dst, size);
}
-int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
+static int xe_display_bo_framebuffer_init(struct drm_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(bo->ttm.base.dev);
@@ -67,9 +64,23 @@ err:
return ret;
}
-struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+static void xe_display_bo_framebuffer_fini(struct drm_gem_object *obj)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
+ if (bo->flags & XE_BO_FLAG_PINNED) {
+ /* Unpin our kernel fb first */
+ xe_bo_lock(bo, false);
+ xe_bo_unpin(bo);
+ xe_bo_unlock(bo);
+ }
+ xe_bo_put(bo);
+}
+
+static struct drm_gem_object *
+xe_display_bo_framebuffer_lookup(struct drm_device *drm,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xe_device *xe = to_xe_device(drm);
struct xe_bo *bo;
@@ -89,3 +100,13 @@ struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
return gem;
}
+
+const struct intel_display_bo_interface xe_display_bo_interface = {
+ .is_protected = xe_display_bo_is_protected,
+ .key_check = xe_pxp_obj_key_check,
+ .fb_mmap = drm_gem_prime_mmap,
+ .read_from_page = xe_display_bo_read_from_page,
+ .framebuffer_init = xe_display_bo_framebuffer_init,
+ .framebuffer_fini = xe_display_bo_framebuffer_fini,
+ .framebuffer_lookup = xe_display_bo_framebuffer_lookup,
+};
diff --git a/drivers/gpu/drm/xe/display/xe_display_bo.h b/drivers/gpu/drm/xe/display/xe_display_bo.h
new file mode 100644
index 000000000000..6879c104b0b1
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_bo.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __XE_DISPLAY_BO_H__
+#define __XE_DISPLAY_BO_H__
+
+extern const struct intel_display_bo_interface xe_display_bo_interface;
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_display_pcode.c b/drivers/gpu/drm/xe/display/xe_display_pcode.c
new file mode 100644
index 000000000000..f6820ef7e666
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_pcode.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2026 Intel Corporation */
+
+#include <drm/intel/display_parent_interface.h>
+
+#include "xe_device.h"
+#include "xe_pcode.h"
+
+static int xe_display_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_read(tile, mbox, val, val1);
+}
+
+static int xe_display_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_write_timeout(tile, mbox, val, timeout_ms);
+}
+
+static int xe_display_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms);
+}
+
+const struct intel_display_pcode_interface xe_display_pcode_interface = {
+ .read = xe_display_pcode_read,
+ .write = xe_display_pcode_write_timeout,
+ .request = xe_display_pcode_request,
+};
diff --git a/drivers/gpu/drm/xe/display/xe_display_pcode.h b/drivers/gpu/drm/xe/display/xe_display_pcode.h
new file mode 100644
index 000000000000..58bd2fb7fb79
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_pcode.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __XE_DISPLAY_PCODE_H__
+#define __XE_DISPLAY_PCODE_H__
+
+extern const struct intel_display_pcode_interface xe_display_pcode_interface;
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_display_vma.h b/drivers/gpu/drm/xe/display/xe_display_vma.h
new file mode 100644
index 000000000000..28267be61ae0
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_vma.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _XE_DISPLAY_VMA_H_
+#define _XE_DISPLAY_VMA_H_
+
+#include <linux/refcount.h>
+
+struct xe_bo;
+struct xe_ggtt_node;
+
+struct i915_vma {
+ refcount_t ref;
+ struct xe_bo *bo, *dpt;
+ struct xe_ggtt_node *node;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index 8ffc13855ef7..1c67a950c6ad 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -3,10 +3,12 @@
* Copyright 2023, Intel Corporation.
*/
-#include "intel_dsb_buffer.h"
+#include <drm/intel/display_parent_interface.h>
+
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_dsb_buffer.h"
struct intel_dsb_buffer {
u32 *cmd_buf;
@@ -14,29 +16,29 @@ struct intel_dsb_buffer {
size_t buf_size;
};
-u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+static u32 xe_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
return xe_bo_ggtt_addr(dsb_buf->bo);
}
-void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+static void xe_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
iosys_map_wr(&dsb_buf->bo->vmap, idx * 4, u32, val);
}
-u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+static u32 xe_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
{
return iosys_map_rd(&dsb_buf->bo->vmap, idx * 4, u32);
}
-void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+static void xe_dsb_buffer_fill(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
iosys_map_memset(&dsb_buf->bo->vmap, idx * 4, val, size);
}
-struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
+static struct intel_dsb_buffer *xe_dsb_buffer_create(struct drm_device *drm, size_t size)
{
struct xe_device *xe = to_xe_device(drm);
struct intel_dsb_buffer *dsb_buf;
@@ -69,13 +71,13 @@ err_pin_map:
return ERR_PTR(ret);
}
-void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+static void xe_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
{
xe_bo_unpin_map_no_vm(dsb_buf->bo);
kfree(dsb_buf);
}
-void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+static void xe_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
struct xe_device *xe = dsb_buf->bo->tile->xe;
@@ -86,3 +88,13 @@ void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
xe_device_wmb(xe);
xe_device_l2_flush(xe);
}
+
+const struct intel_display_dsb_interface xe_display_dsb_interface = {
+ .ggtt_offset = xe_dsb_buffer_ggtt_offset,
+ .write = xe_dsb_buffer_write,
+ .read = xe_dsb_buffer_read,
+ .fill = xe_dsb_buffer_fill,
+ .create = xe_dsb_buffer_create,
+ .cleanup = xe_dsb_buffer_cleanup,
+ .flush_map = xe_dsb_buffer_flush_map,
+};
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.h b/drivers/gpu/drm/xe/display/xe_dsb_buffer.h
new file mode 100644
index 000000000000..2e4772187016
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __XE_DSB_BUFFER_H__
+#define __XE_DSB_BUFFER_H__
+
+extern const struct intel_display_dsb_interface xe_display_dsb_interface;
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index e1d29b6ba043..dbbc61032b7f 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -5,15 +5,14 @@
#include <drm/ttm/ttm_bo.h>
-#include "i915_vma.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
-#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_display_vma.h"
#include "xe_ggtt.h"
#include "xe_pm.h"
#include "xe_vram_types.h"
@@ -409,7 +408,7 @@ found:
refcount_inc(&vma->ref);
new_plane_state->ggtt_vma = vma;
- new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ new_plane_state->surf = xe_ggtt_node_addr(new_plane_state->ggtt_vma->node) +
plane->surf_offset(new_plane_state);
return true;
@@ -439,7 +438,7 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
new_plane_state->ggtt_vma = vma;
- new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ new_plane_state->surf = xe_ggtt_node_addr(new_plane_state->ggtt_vma->node) +
plane->surf_offset(new_plane_state);
return 0;
@@ -451,25 +450,6 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
old_plane_state->ggtt_vma = NULL;
}
-/*
- * For Xe introduce dummy intel_dpt_create which just return NULL,
- * intel_dpt_destroy which does nothing, and fake intel_dpt_ofsset returning 0;
- */
-struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
-{
- return NULL;
-}
-
-void intel_dpt_destroy(struct i915_address_space *vm)
-{
- return;
-}
-
-u64 intel_dpt_offset(struct i915_vma *dpt_vma)
-{
- return 0;
-}
-
void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
{
*map = vma->bo->vmap;
diff --git a/drivers/gpu/drm/xe/display/xe_frontbuffer.c b/drivers/gpu/drm/xe/display/xe_frontbuffer.c
new file mode 100644
index 000000000000..113fc017ee94
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_frontbuffer.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2026 Intel Corporation */
+
+#include <drm/drm_gem.h>
+#include <drm/intel/display_parent_interface.h>
+
+#include "intel_frontbuffer.h"
+#include "xe_frontbuffer.h"
+
+struct xe_frontbuffer {
+ struct intel_frontbuffer base;
+ struct drm_gem_object *obj;
+ struct kref ref;
+};
+
+static struct intel_frontbuffer *xe_frontbuffer_get(struct drm_gem_object *obj)
+{
+ struct xe_frontbuffer *front;
+
+ front = kmalloc_obj(*front);
+ if (!front)
+ return NULL;
+
+ intel_frontbuffer_init(&front->base, obj->dev);
+
+ kref_init(&front->ref);
+
+ drm_gem_object_get(obj);
+ front->obj = obj;
+
+ return &front->base;
+}
+
+static void xe_frontbuffer_ref(struct intel_frontbuffer *_front)
+{
+ struct xe_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ kref_get(&front->ref);
+}
+
+static void frontbuffer_release(struct kref *ref)
+{
+ struct xe_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+
+ intel_frontbuffer_fini(&front->base);
+
+ drm_gem_object_put(front->obj);
+
+ kfree(front);
+}
+
+static void xe_frontbuffer_put(struct intel_frontbuffer *_front)
+{
+ struct xe_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ kref_put(&front->ref, frontbuffer_release);
+}
+
+static void xe_frontbuffer_flush_for_display(struct intel_frontbuffer *front)
+{
+}
+
+const struct intel_display_frontbuffer_interface xe_display_frontbuffer_interface = {
+ .get = xe_frontbuffer_get,
+ .ref = xe_frontbuffer_ref,
+ .put = xe_frontbuffer_put,
+ .flush_for_display = xe_frontbuffer_flush_for_display,
+};
diff --git a/drivers/gpu/drm/xe/display/xe_frontbuffer.h b/drivers/gpu/drm/xe/display/xe_frontbuffer.h
new file mode 100644
index 000000000000..6b4f59b42ade
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_frontbuffer.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _XE_FRONTBUFFER_H_
+#define _XE_FRONTBUFFER_H_
+
+extern const struct intel_display_frontbuffer_interface xe_display_frontbuffer_interface;
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_initial_plane.c b/drivers/gpu/drm/xe/display/xe_initial_plane.c
index 4cfeafcc158d..65cc0b0c934b 100644
--- a/drivers/gpu/drm/xe/display/xe_initial_plane.c
+++ b/drivers/gpu/drm/xe/display/xe_initial_plane.c
@@ -3,26 +3,21 @@
* Copyright © 2021 Intel Corporation
*/
-/* for ioread64 */
-#include <linux/io-64-nonatomic-lo-hi.h>
-
#include <drm/intel/display_parent_interface.h>
#include "regs/xe_gtt_defs.h"
-#include "xe_ggtt.h"
-#include "xe_mmio.h"
-#include "i915_vma.h"
#include "intel_crtc.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_fbdev_fb.h"
#include "xe_bo.h"
+#include "xe_display_vma.h"
+#include "xe_ggtt.h"
+#include "xe_mmio.h"
#include "xe_vram_types.h"
-#include "xe_wa.h"
-
-#include <generated/xe_device_wa_oob.h>
/* Early xe has no irq */
static void xe_initial_plane_vblank_wait(struct drm_crtc *_crtc)
@@ -90,17 +85,11 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
- if (XE_DEVICE_WA(xe, 22019338487_display))
- return NULL;
-
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- */
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
- plane_config->size * 2 >> PAGE_SHIFT >= stolen->size)
+ !intel_fbdev_fb_prefer_stolen(&xe->drm, plane_config->size)) {
+ drm_info(&xe->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
+ }
}
size = round_up(plane_config->base + plane_config->size,
@@ -170,7 +159,7 @@ xe_initial_plane_setup(struct drm_plane_state *_plane_state,
plane_state->ggtt_vma = vma;
- plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma);
+ plane_state->surf = xe_ggtt_node_addr(plane_state->ggtt_vma->node);
plane_config->vma = vma;
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index c39aab843e35..27ac0bf1f6cd 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -6,12 +6,13 @@
#ifndef _XE_REG_DEFS_H_
#define _XE_REG_DEFS_H_
+#include <drm/intel/pick.h>
+#include <drm/intel/reg_bits.h>
+
#include <linux/build_bug.h>
#include <linux/log2.h>
#include <linux/sizes.h>
-#include "compat-i915-headers/i915_reg_defs.h"
-
/**
* XE_REG_ADDR_MAX - The upper limit on MMIO register address
*
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index 39723928a019..c34408cfd292 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -442,9 +442,9 @@ static void clear_dropped_eviction_line_bit(struct xe_gt *gt, u16 group, u16 ins
* On Xe2 and later GPUs, the bit has to be cleared by writing 0 to it.
*/
if (GRAPHICS_VER(xe) >= 20)
- write_ptr_reg = _MASKED_BIT_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ write_ptr_reg = REG_MASKED_FIELD_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
else
- write_ptr_reg = _MASKED_BIT_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ write_ptr_reg = REG_MASKED_FIELD_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT, write_ptr_reg, group, instance);
}
@@ -504,7 +504,7 @@ static int xe_eu_stall_data_buf_read(struct xe_eu_stall_data_stream *stream,
/* Read pointer can overflow into one additional bit */
read_ptr &= (buf_size << 1) - 1;
read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, (read_ptr >> 6));
- read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ read_ptr_reg = REG_MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
xecore_buf->read = read_ptr;
trace_xe_eu_stall_data_read(group, instance, read_ptr, write_ptr,
@@ -674,7 +674,7 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(DISABLE_DOP_GATING));
for_each_dss_steering(xecore, gt, group, instance) {
write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT, group, instance);
@@ -683,7 +683,7 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
clear_dropped_eviction_line_bit(gt, group, instance);
write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, write_ptr);
- read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ read_ptr_reg = REG_MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
/* Initialize the read pointer to the write pointer */
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
write_ptr <<= 6;
@@ -695,10 +695,10 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
stream->data_drop.reported_to_user = false;
bitmap_zero(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS);
- reg_value = _MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
- REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
- REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
- stream->sampling_rate_mult));
+ reg_value = REG_MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
+ REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
+ REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
+ stream->sampling_rate_mult));
xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_CTRL, reg_value);
/* GGTT addresses can never be > 32 bits */
xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE_UPPER, 0);
@@ -830,7 +830,7 @@ static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(DISABLE_DOP_GATING));
xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
xe_pm_runtime_put(gt_to_xe(gt));
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 7e8a3a7db741..755a2bff5d7b 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -47,7 +47,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = gt_to_xe(gt);
u64 lrc_desc;
- u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
+ u32 ring_mode = REG_MASKED_FIELD_ENABLE(GFX_DISABLE_LEGACY_MODE);
lrc_desc = xe_lrc_descriptor(lrc);
@@ -61,7 +61,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
xe_mmio_write32(mmio, RCU_MODE,
- _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(RCU_MODE_CCS_ENABLE));
xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
lrc->ring.old_tail = lrc->ring.tail;
@@ -83,7 +83,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base));
if (xe_device_has_msix(gt_to_xe(hwe->gt)))
- ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ ring_mode |= REG_MASKED_FIELD_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode);
xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index ea3ad600d7c7..337baf0a6e87 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -327,21 +327,21 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
{
u32 ccs_mask =
xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
- u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
+ u32 ring_mode = REG_MASKED_FIELD_ENABLE(GFX_DISABLE_LEGACY_MODE);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
- _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(RCU_MODE_CCS_ENABLE));
xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
xe_bo_ggtt_addr(hwe->hwsp));
if (xe_device_has_msix(gt_to_xe(hwe->gt)))
- ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ ring_mode |= REG_MASKED_FIELD_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
- _MASKED_BIT_DISABLE(STOP_RING));
+ REG_MASKED_FIELD_DISABLE(STOP_RING));
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 73a503d88217..aa26c71ae34f 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -642,12 +642,12 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{
- regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ regs[CTX_CONTEXT_CONTROL] = REG_MASKED_FIELD_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (xe_gt_has_indirect_ring_state(hwe->gt))
regs[CTX_CONTEXT_CONTROL] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
}
static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
@@ -1212,7 +1212,7 @@ static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc,
*cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
*cmd++ = CS_DEBUG_MODE2(0).addr;
- *cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+ *cmd++ = REG_MASKED_FIELD_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
return cmd - batch;
}
@@ -1515,12 +1515,12 @@ static int xe_lrc_ctx_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, struct
if (init_flags & XE_LRC_CREATE_RUNALONE)
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
- _MASKED_BIT_ENABLE(CTX_CTRL_RUN_ALONE));
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_RUN_ALONE));
if (init_flags & XE_LRC_CREATE_PXP)
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
- _MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_PXP_ENABLE));
lrc->ctx_timestamp = 0;
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
@@ -1551,7 +1551,7 @@ static int xe_lrc_ctx_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, struct
if (init_flags & XE_LRC_DISABLE_STATE_CACHE_PERF_FIX) {
state_cache_perf_fix[0] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
state_cache_perf_fix[1] = COMMON_SLICE_CHICKEN3.addr;
- state_cache_perf_fix[2] = _MASKED_BIT_ENABLE(DISABLE_STATE_CACHE_PERF_FIX);
+ state_cache_perf_fix[2] = REG_MASKED_FIELD_ENABLE(DISABLE_STATE_CACHE_PERF_FIX);
xe_lrc_write_ring(lrc, state_cache_perf_fix, sizeof(state_cache_perf_fix));
}
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index a1a05c68dc7d..78adb303b663 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -154,6 +154,15 @@ u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
return val;
}
+void xe_mmio_write8(struct xe_mmio *mmio, struct xe_reg reg, u8 val)
+{
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
+
+ trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
+
+ writeb(val, mmio->regs + addr);
+}
+
u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 41ae720acbc3..befe021f2215 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -17,6 +17,7 @@ int xe_mmio_probe_tiles(struct xe_device *xe);
void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size);
u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
+void xe_mmio_write8(struct xe_mmio *mmio, struct xe_reg reg, u8 val);
u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index dcd393b0931a..c176a61febb2 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -758,8 +758,9 @@ static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
},
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
- enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
+ enable ?
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) :
+ REG_MASKED_FIELD_DISABLE(CTX_CTRL_OAC_CONTEXT_ENABLE)
},
};
@@ -782,9 +783,9 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
},
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
- enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
- _MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
+ enable ?
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE | CTX_CTRL_RUN_ALONE) :
+ REG_MASKED_FIELD_DISABLE(CTX_CTRL_OAC_CONTEXT_ENABLE | CTX_CTRL_RUN_ALONE),
},
};
@@ -812,9 +813,10 @@ static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool enable)
{
- return _MASKED_FIELD(OAG_OA_DEBUG_DISABLE_MMIO_TRG,
- enable && stream && stream->sample ?
- 0 : OAG_OA_DEBUG_DISABLE_MMIO_TRG);
+ if (enable && stream && stream->sample)
+ return REG_MASKED_FIELD_DISABLE(OAG_OA_DEBUG_DISABLE_MMIO_TRG);
+ else
+ return REG_MASKED_FIELD_ENABLE(OAG_OA_DEBUG_DISABLE_MMIO_TRG);
}
static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
@@ -825,9 +827,9 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
/* Enable thread stall DOP gating and EU DOP gating. */
if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
- _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(DISABLE_DOP_GATING));
}
xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
@@ -1055,16 +1057,18 @@ exit:
static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
{
/* If user didn't require OA reports, ask HW not to emit ctx switch reports */
- return _MASKED_FIELD(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
- stream->sample ?
- 0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ if (stream->sample)
+ return REG_MASKED_FIELD_DISABLE(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ else
+ return REG_MASKED_FIELD_ENABLE(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
{
- return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
- xe_bo_size(stream->oa_buffer.bo) > SZ_16M ?
- OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
+ if (xe_bo_size(stream->oa_buffer.bo) > SZ_16M)
+ return REG_MASKED_FIELD_ENABLE(OAG_OA_DEBUG_BUF_SIZE_SELECT);
+ else
+ return REG_MASKED_FIELD_DISABLE(OAG_OA_DEBUG_BUF_SIZE_SELECT);
}
static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
@@ -1079,9 +1083,9 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
*/
if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(DISABLE_DOP_GATING));
}
/* Disable clk ratio reports */
@@ -1096,7 +1100,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL;
xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
- _MASKED_BIT_ENABLE(oa_debug) |
+ REG_MASKED_FIELD_ENABLE(oa_debug) |
oag_report_ctx_switches(stream) |
oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 0d33c14ea0cf..dc66d0c7ee06 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -348,33 +348,3 @@ int xe_pcode_probe_early(struct xe_device *xe)
return xe_pcode_ready(xe, false);
}
ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
-
-/* Helpers with drm device. These should only be called by the display side */
-#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-
-int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
-{
- struct xe_device *xe = to_xe_device(drm);
- struct xe_tile *tile = xe_device_get_root_tile(xe);
-
- return xe_pcode_read(tile, mbox, val, val1);
-}
-
-int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
-{
- struct xe_device *xe = to_xe_device(drm);
- struct xe_tile *tile = xe_device_get_root_tile(xe);
-
- return xe_pcode_write_timeout(tile, mbox, val, timeout_ms);
-}
-
-int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
-{
- struct xe_device *xe = to_xe_device(drm);
- struct xe_tile *tile = xe_device_get_root_tile(xe);
-
- return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms);
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index a5584c1c75f9..490e4f269607 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -34,12 +34,4 @@ int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
| FIELD_PREP(PCODE_MB_PARAM1, param1)\
| FIELD_PREP(PCODE_MB_PARAM2, param2))
-/* Helpers with drm device */
-int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
-int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
-#define intel_pcode_write(drm, mbox, val) \
- intel_pcode_write_timeout((drm), (mbox), (val), 1)
-int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms);
-
#endif
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index d61446bf9c19..e2978e48f660 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -312,8 +312,8 @@ void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
{
- u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
- _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+ u32 val = enable ? REG_MASKED_FIELD_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ REG_MASKED_FIELD_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index d35bc4989144..9cebb2490245 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -881,7 +881,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
/* Start the DMA */
xe_mmio_write32(mmio, DMA_CTRL,
- _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+ REG_MASKED_FIELD_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = xe_mmio_wait32(mmio, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
@@ -891,7 +891,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
/* Disable the bits once DMA is over */
- xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+ xe_mmio_write32(mmio, DMA_CTRL, REG_MASKED_FIELD_DISABLE(dma_flags));
return ret;
}
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
index ce946859a3a9..97ec94a2e749 100644
--- a/include/drm/intel/display_parent_interface.h
+++ b/include/drm/intel/display_parent_interface.h
@@ -9,19 +9,66 @@
struct dma_fence;
struct drm_crtc;
struct drm_device;
+struct drm_file;
struct drm_framebuffer;
struct drm_gem_object;
+struct drm_mode_fb_cmd2;
struct drm_plane_state;
struct drm_scanout_buffer;
struct i915_vma;
+struct intel_dpt;
+struct intel_dsb_buffer;
+struct intel_frontbuffer;
struct intel_hdcp_gsc_context;
struct intel_initial_plane_config;
struct intel_panic;
struct intel_stolen_node;
struct ref_tracker;
+struct seq_file;
+struct vm_area_struct;
/* Keep struct definitions sorted */
+struct intel_display_bo_interface {
+ bool (*is_tiled)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_userptr)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_shmem)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_protected)(struct drm_gem_object *obj);
+ int (*key_check)(struct drm_gem_object *obj);
+ int (*fb_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
+ int (*read_from_page)(struct drm_gem_object *obj, u64 offset, void *dst, int size);
+ void (*describe)(struct seq_file *m, struct drm_gem_object *obj); /* Optional */
+ int (*framebuffer_init)(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd);
+ void (*framebuffer_fini)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*framebuffer_lookup)(struct drm_device *drm,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+};
+
+struct intel_display_dpt_interface {
+ struct intel_dpt *(*create)(struct drm_gem_object *obj, size_t size);
+ void (*destroy)(struct intel_dpt *dpt);
+ void (*suspend)(struct intel_dpt *dpt);
+ void (*resume)(struct intel_dpt *dpt);
+};
+
+struct intel_display_dsb_interface {
+ u32 (*ggtt_offset)(struct intel_dsb_buffer *dsb_buf);
+ void (*write)(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
+ u32 (*read)(struct intel_dsb_buffer *dsb_buf, u32 idx);
+ void (*fill)(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
+ struct intel_dsb_buffer *(*create)(struct drm_device *drm, size_t size);
+ void (*cleanup)(struct intel_dsb_buffer *dsb_buf);
+ void (*flush_map)(struct intel_dsb_buffer *dsb_buf);
+};
+
+struct intel_display_frontbuffer_interface {
+ struct intel_frontbuffer *(*get)(struct drm_gem_object *obj);
+ void (*ref)(struct intel_frontbuffer *front);
+ void (*put)(struct intel_frontbuffer *front);
+ void (*flush_for_display)(struct intel_frontbuffer *front);
+};
+
struct intel_display_hdcp_interface {
ssize_t (*gsc_msg_send)(struct intel_hdcp_gsc_context *gsc_context,
void *msg_in, size_t msg_in_len,
@@ -44,6 +91,35 @@ struct intel_display_irq_interface {
void (*synchronize)(struct drm_device *drm);
};
+struct intel_display_overlay_interface {
+ bool (*is_active)(struct drm_device *drm);
+
+ int (*overlay_on)(struct drm_device *drm,
+ u32 frontbuffer_bits);
+ int (*overlay_continue)(struct drm_device *drm,
+ struct i915_vma *vma,
+ bool load_polyphase_filter);
+ int (*overlay_off)(struct drm_device *drm);
+ int (*recover_from_interrupt)(struct drm_device *drm);
+ int (*release_old_vid)(struct drm_device *drm);
+
+ void (*reset)(struct drm_device *drm);
+
+ struct i915_vma *(*pin_fb)(struct drm_device *drm,
+ struct drm_gem_object *obj,
+ u32 *offset);
+ void (*unpin_fb)(struct drm_device *drm,
+ struct i915_vma *vma);
+
+ struct drm_gem_object *(*obj_lookup)(struct drm_device *drm,
+ struct drm_file *filp,
+ u32 handle);
+
+ void __iomem *(*setup)(struct drm_device *drm,
+ bool needs_physical);
+ void (*cleanup)(struct drm_device *drm);
+};
+
struct intel_display_panic_interface {
struct intel_panic *(*alloc)(void);
int (*setup)(struct intel_panic *panic, struct drm_scanout_buffer *sb);
@@ -55,6 +131,13 @@ struct intel_display_pc8_interface {
void (*unblock)(struct drm_device *drm);
};
+struct intel_display_pcode_interface {
+ int (*read)(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+ int (*write)(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+ int (*request)(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+};
+
struct intel_display_rpm_interface {
struct ref_tracker *(*get)(const struct drm_device *drm);
struct ref_tracker *(*get_raw)(const struct drm_device *drm);
@@ -93,6 +176,10 @@ struct intel_display_stolen_interface {
void (*node_free)(const struct intel_stolen_node *node);
};
+struct intel_display_vma_interface {
+ int (*fence_id)(const struct i915_vma *vma);
+};
+
/**
* struct intel_display_parent_interface - services parent driver provides to display
*
@@ -106,6 +193,18 @@ struct intel_display_stolen_interface {
* check the optional pointers.
*/
struct intel_display_parent_interface {
+ /** @bo: BO interface */
+ const struct intel_display_bo_interface *bo;
+
+ /** @dpt: DPT interface. Optional. */
+ const struct intel_display_dpt_interface *dpt;
+
+ /** @dsb: DSB buffer interface */
+ const struct intel_display_dsb_interface *dsb;
+
+ /** @frontbuffer: Frontbuffer interface */
+ const struct intel_display_frontbuffer_interface *frontbuffer;
+
/** @hdcp: HDCP GSC interface */
const struct intel_display_hdcp_interface *hdcp;
@@ -118,9 +217,15 @@ struct intel_display_parent_interface {
/** @panic: Panic interface */
const struct intel_display_panic_interface *panic;
+ /** @overlay: Overlay. Optional. */
+ const struct intel_display_overlay_interface *overlay;
+
/** @pc8: PC8 interface. Optional. */
const struct intel_display_pc8_interface *pc8;
+ /** @pcode: Pcode interface */
+ const struct intel_display_pcode_interface *pcode;
+
/** @rpm: Runtime PM functions */
const struct intel_display_rpm_interface *rpm;
@@ -130,6 +235,9 @@ struct intel_display_parent_interface {
/** @stolen: Stolen memory. */
const struct intel_display_stolen_interface *stolen;
+ /** @vma: VMA interface. Optional. */
+ const struct intel_display_vma_interface *vma;
+
/* Generic independent functions */
struct {
/** @fence_priority_display: Set display priority. Optional. */
diff --git a/include/drm/intel/i915_drm.h b/include/drm/intel/i915_drm.h
index adff68538484..1fdaabed1470 100644
--- a/include/drm/intel/i915_drm.h
+++ b/include/drm/intel/i915_drm.h
@@ -39,46 +39,46 @@ bool i915_gpu_turbo_disable(void);
extern struct resource intel_graphics_stolen_res;
/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga arbiter.
+ * The bridge device's (device 0) PCI config space has information
+ * about the fb aperture size and the amount of pre-reserved memory.
*/
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+
+/* device 2 has a read-only mirror */
#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
-#define SNB_GMCH_GGMS_MASK 0x3
-#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
-#define SNB_GMCH_GMS_MASK 0x1f
-#define BDW_GMCH_GGMS_SHIFT 6
-#define BDW_GMCH_GGMS_MASK 0x3
-#define BDW_GMCH_GMS_SHIFT 8
-#define BDW_GMCH_GMS_MASK 0xff
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define BDW_GMCH_GGMS_SHIFT 6
+#define BDW_GMCH_GGMS_MASK 0x3
+#define BDW_GMCH_GMS_SHIFT 8
+#define BDW_GMCH_GMS_MASK 0xff
+/* device 2 has a read-only mirror from i85x/i865 onwards */
#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_GMS_MASK (0x7 << 4)
+#define I830_GMCH_GMS_LOCAL (0x1 << 4)
+#define I830_GMCH_GMS_STOLEN_512 (0x2 << 4)
+#define I830_GMCH_GMS_STOLEN_1024 (0x3 << 4)
+#define I830_GMCH_GMS_STOLEN_8192 (0x4 << 4)
+#define I855_GMCH_GMS_MASK (0xF << 4)
+#define I855_GMCH_GMS_STOLEN_0M (0x0 << 4)
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-#define I830_GMCH_GMS_MASK 0x70
-#define I830_GMCH_GMS_LOCAL 0x10
-#define I830_GMCH_GMS_STOLEN_512 0x20
-#define I830_GMCH_GMS_STOLEN_1024 0x30
-#define I830_GMCH_GMS_STOLEN_8192 0x40
-
-#define I855_GMCH_GMS_MASK 0xF0
-#define I855_GMCH_GMS_STOLEN_0M 0x0
-#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+/* valid for both I830_GMCH_CTRL and SNB_GMCH_CTRL */
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define I830_DRB3 0x63
#define I85X_DRB3 0x43
@@ -87,12 +87,12 @@ extern struct resource intel_graphics_stolen_res;
#define I830_ESMRAMC 0x91
#define I845_ESMRAMC 0x9e
#define I85X_ESMRAMC 0x61
-#define TSEG_ENABLE (1 << 0)
-#define I830_TSEG_SIZE_512K (0 << 1)
-#define I830_TSEG_SIZE_1M (1 << 1)
-#define I845_TSEG_SIZE_MASK (3 << 1)
-#define I845_TSEG_SIZE_512K (2 << 1)
-#define I845_TSEG_SIZE_1M (3 << 1)
+#define TSEG_ENABLE (1 << 0)
+#define I830_TSEG_SIZE_512K (0 << 1)
+#define I830_TSEG_SIZE_1M (1 << 1)
+#define I845_TSEG_SIZE_MASK (3 << 1)
+#define I845_TSEG_SIZE_512K (2 << 1)
+#define I845_TSEG_SIZE_1M (3 << 1)
#define INTEL_BSM 0x5c
#define INTEL_GEN11_BSM_DW0 0xc0
diff --git a/include/drm/intel/intel_gmd_interrupt_regs.h b/include/drm/intel/intel_gmd_interrupt_regs.h
new file mode 100644
index 000000000000..ce66c4151e76
--- /dev/null
+++ b/include/drm/intel/intel_gmd_interrupt_regs.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_GMD_INTERRUPT_REGS_H_
+#define _INTEL_GMD_INTERRUPT_REGS_H_
+
+#define I915_PM_INTERRUPT (1 << 31)
+#define I915_ISP_INTERRUPT (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
+#define I915_MIPIC_INTERRUPT (1 << 19)
+#define I915_MIPIA_INTERRUPT (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
+#define I915_HWB_OOM_INTERRUPT (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
+#define I915_MISC_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
+#define I915_DEBUG_INTERRUPT (1 << 2)
+#define I915_WINVALID_INTERRUPT (1 << 1)
+#define I915_USER_INTERRUPT (1 << 1)
+#define I915_ASLE_INTERRUPT (1 << 0)
+#define I915_BSD_USER_INTERRUPT (1 << 25)
+
+#define GEN8_MASTER_IRQ _MMIO(0x44200)
+#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
+#define GEN8_PCU_IRQ (1 << 30)
+#define GEN8_DE_PCH_IRQ (1 << 23)
+#define GEN8_DE_MISC_IRQ (1 << 22)
+#define GEN8_DE_PORT_IRQ (1 << 20)
+#define GEN8_DE_PIPE_C_IRQ (1 << 18)
+#define GEN8_DE_PIPE_B_IRQ (1 << 17)
+#define GEN8_DE_PIPE_A_IRQ (1 << 16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
+#define GEN8_GT_VECS_IRQ (1 << 6)
+#define GEN8_GT_GUC_IRQ (1 << 5)
+#define GEN8_GT_PM_IRQ (1 << 4)
+#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
+#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
+#define GEN8_GT_BCS_IRQ (1 << 1)
+#define GEN8_GT_RCS_IRQ (1 << 0)
+
+#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER _MMIO(0x444fc)
+#define GEN11_GU_MISC_GSE (1 << 27)
+
+#define GEN11_GU_MISC_IRQ_REGS I915_IRQ_REGS(GEN11_GU_MISC_IMR, \
+ GEN11_GU_MISC_IER, \
+ GEN11_GU_MISC_IIR)
+
+#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
+#define GEN11_MASTER_IRQ (1 << 31)
+#define GEN11_PCU_IRQ (1 << 30)
+#define GEN11_GU_MISC_IRQ (1 << 29)
+#define GEN11_DISPLAY_IRQ (1 << 16)
+#define GEN11_GT_DW_IRQ(x) (1 << (x))
+#define GEN11_GT_DW1_IRQ (1 << 1)
+#define GEN11_GT_DW0_IRQ (1 << 0)
+
+#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define SCPD_FBC_IGNORE_3D (1 << 6)
+#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
+
+#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac)
+#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT 12
+
+#endif
diff --git a/include/drm/intel/intel_gmd_misc_regs.h b/include/drm/intel/intel_gmd_misc_regs.h
new file mode 100644
index 000000000000..763d7711f21c
--- /dev/null
+++ b/include/drm/intel/intel_gmd_misc_regs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_GMD_MISC_REGS_H_
+#define _INTEL_GMD_MISC_REGS_H_
+
+#define DISP_ARB_CTL _MMIO(0x45000)
+#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
+#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
+#define DISP_FBC_WM_DIS REG_BIT(15)
+
+#define INSTPM _MMIO(0x20c0)
+#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1 << 9)
+#define INSTPM_SYNC_FLUSH (1 << 5)
+
+#endif
diff --git a/include/drm/intel/intel_pcode_regs.h b/include/drm/intel/intel_pcode_regs.h
new file mode 100644
index 000000000000..db989ee7c488
--- /dev/null
+++ b/include/drm/intel/intel_pcode_regs.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_PCODE_REGS_H_
+#define _INTEL_PCODE_REGS_H_
+
+#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
+#define GEN6_PCODE_READY (1 << 31)
+#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
+#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8)
+#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0)
+#define GEN6_PCODE_ERROR_MASK 0xFF
+#define GEN6_PCODE_SUCCESS 0x0
+#define GEN6_PCODE_ILLEGAL_CMD 0x1
+#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define GEN6_PCODE_TIMEOUT 0x3
+#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
+#define GEN7_PCODE_TIMEOUT 0x2
+#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
+#define GEN11_PCODE_LOCKED 0x6
+#define GEN11_PCODE_REJECTED 0x11
+#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
+#define GEN9_PCODE_READ_MEM_LATENCY 0x6
+#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24)
+#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16)
+#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8)
+#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0)
+#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
+#define SKL_PCODE_CDCLK_CONTROL 0x7
+#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
+#define SKL_CDCLK_READY_FOR_CHANGE 0x1
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_READ_OC_PARAMS 0xc
+#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
+#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
+#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
+#define ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8))
+#define DISPLAY_TO_PCODE_CDCLK_MAX 0x28D
+#define DISPLAY_TO_PCODE_VOLTAGE_MASK REG_GENMASK(1, 0)
+#define DISPLAY_TO_PCODE_VOLTAGE_MAX DISPLAY_TO_PCODE_VOLTAGE_MASK
+#define DISPLAY_TO_PCODE_CDCLK_VALID REG_BIT(27)
+#define DISPLAY_TO_PCODE_PIPE_COUNT_VALID REG_BIT(31)
+#define DISPLAY_TO_PCODE_CDCLK_MASK REG_GENMASK(25, 16)
+#define DISPLAY_TO_PCODE_PIPE_COUNT_MASK REG_GENMASK(30, 28)
+#define DISPLAY_TO_PCODE_CDCLK(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_CDCLK_MASK, (x))
+#define DISPLAY_TO_PCODE_PIPE_COUNT(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_PIPE_COUNT_MASK, (x))
+#define DISPLAY_TO_PCODE_VOLTAGE(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_VOLTAGE_MASK, (x))
+#define DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, num_pipes, voltage_level) \
+ ((DISPLAY_TO_PCODE_CDCLK(cdclk)) | \
+ (DISPLAY_TO_PCODE_PIPE_COUNT(num_pipes)) | \
+ (DISPLAY_TO_PCODE_VOLTAGE(voltage_level)))
+#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
+#define ICL_PCODE_REP_QGV_MASK REG_GENMASK(1, 0)
+#define ICL_PCODE_REP_QGV_SAFE REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 0)
+#define ICL_PCODE_REP_QGV_POLL REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 1)
+#define ICL_PCODE_REP_QGV_REJECTED REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 2)
+#define ADLS_PCODE_REP_PSF_MASK REG_GENMASK(3, 2)
+#define ADLS_PCODE_REP_PSF_SAFE REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 0)
+#define ADLS_PCODE_REP_PSF_POLL REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 1)
+#define ADLS_PCODE_REP_PSF_REJECTED REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 2)
+#define ICL_PCODE_REQ_QGV_PT_MASK REG_GENMASK(7, 0)
+#define ICL_PCODE_REQ_QGV_PT(x) REG_FIELD_PREP(ICL_PCODE_REQ_QGV_PT_MASK, (x))
+#define ADLS_PCODE_REQ_PSF_PT_MASK REG_GENMASK(10, 8)
+#define ADLS_PCODE_REQ_PSF_PT(x) REG_FIELD_PREP(ADLS_PCODE_REQ_PSF_PT_MASK, (x))
+#define GEN6_PCODE_READ_D_COMP 0x10
+#define GEN6_PCODE_WRITE_D_COMP 0x11
+#define ICL_PCODE_EXIT_TCCOLD 0x12
+#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
+#define DISPLAY_IPS_CONTROL 0x19
+#define TGL_PCODE_TCCOLD 0x26
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ 0
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ REG_BIT(0)
+/* See also IPS_CTL */
+#define IPS_PCODE_CONTROL (1 << 30)
+#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
+#define GEN9_PCODE_SAGV_CONTROL 0x21
+#define GEN9_SAGV_DISABLE 0x0
+#define GEN9_SAGV_IS_DISABLED 0x1
+#define GEN9_SAGV_ENABLE 0x3
+#define DG1_PCODE_STATUS 0x7E
+#define DG1_UNCORE_GET_INIT_STATUS 0x0
+#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1
+#define PCODE_POWER_SETUP 0x7C
+#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
+#define POWER_SETUP_SUBCOMMAND_WRITE_I1 0x5
+#define POWER_SETUP_I1_WATTS REG_BIT(31)
+#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
+#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define POWER_SETUP_SUBCOMMAND_G8_ENABLE 0x6
+#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
+#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
+/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
+#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
+#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
+/* PCODE_MBOX_DOMAIN_* - mailbox domain IDs */
+/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
+#define PCODE_MBOX_DOMAIN_NONE 0x0
+#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
+
+#endif
diff --git a/include/drm/intel/pick.h b/include/drm/intel/pick.h
new file mode 100644
index 000000000000..d976fab8f270
--- /dev/null
+++ b/include/drm/intel/pick.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _PICK_H_
+#define _PICK_H_
+
+/*
+ * Given the first two numbers __a and __b of arbitrarily many evenly spaced
+ * numbers, pick the 0-based __index'th value.
+ *
+ * Always prefer this over _PICK() if the numbers are evenly spaced.
+ */
+#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
+
+/*
+ * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
+ * @__c_index corresponds to the index in which the second range starts to be
+ * used. Using math interval notation, the first range is used for indexes [ 0,
+ * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
+ *
+ * #define _FOO_A 0xf000
+ * #define _FOO_B 0xf004
+ * #define _FOO_C 0xf008
+ * #define _SUPER_FOO_A 0xa000
+ * #define _SUPER_FOO_B 0xa100
+ * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \
+ * _FOO_A, _FOO_B, \
+ * _SUPER_FOO_A, _SUPER_FOO_B))
+ *
+ * This expands to:
+ * 0: 0xf000,
+ * 1: 0xf004,
+ * 2: 0xf008,
+ * 3: 0xa000,
+ * 4: 0xa100,
+ * 5: 0xa200,
+ * ...
+ */
+#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \
+ (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \
+ ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \
+ _PICK_EVEN((__index) - (__c_index), __c, __d)))
+
+/*
+ * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
+ *
+ * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
+ */
+#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
+
+#endif
diff --git a/include/drm/intel/reg_bits.h b/include/drm/intel/reg_bits.h
new file mode 100644
index 000000000000..2a9066e1d808
--- /dev/null
+++ b/include/drm/intel/reg_bits.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _REG_BITS_H_
+#define _REG_BITS_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+/*
+ * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
+ * implementations, for compatibility reasons with previous implementation.
+ */
+#define REG_GENMASK(high, low) GENMASK_U32(high, low)
+#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
+#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
+#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
+
+#define REG_BIT(n) BIT_U32(n)
+#define REG_BIT64(n) BIT_U64(n)
+#define REG_BIT16(n) BIT_U16(n)
+#define REG_BIT8(n) BIT_U8(n)
+
+/*
+ * Local integer constant expression version of is_power_of_2().
+ */
+#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
+
+/**
+ * REG_FIELD_PREP8() - Prepare a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP8(__mask, __val) \
+ ((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_PREP16() - Prepare a u16 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP16() to generate an integer constant
+ * expression, force u8 and for consistency with
+ * REG_FIELD_GET16(), REG_BIT16() and REG_GENMASK16().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP16(__mask, __val) \
+ ((u16)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U16_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_PREP() - Prepare a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP(__mask, __val) \
+ ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_GET8() - Extract a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u8 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_GET() - Extract a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u32 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_GET64() - Extract a u64 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u64 and for consistency with
+ * REG_GENMASK64().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_MAX() - produce the maximum value representable by a field
+ * @__mask: shifted mask defining the field's length and position
+ *
+ * Local wrapper for FIELD_MAX() to return the maximum bit value that can
+ * be held in the field specified by @_mask, cast to u32 for consistency
+ * with other macros.
+ */
+#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask))
+
+#define REG_MASKED_FIELD(mask, value) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(mask), (mask) & 0xffff0000, 0)) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(value), (value) & 0xffff0000, 0)) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(mask) && __builtin_constant_p(value), (value) & ~(mask), 0)) + \
+ ((mask) << 16 | (value)))
+
+#define REG_MASKED_FIELD_ENABLE(a) \
+ (__builtin_choose_expr(__builtin_constant_p(a), REG_MASKED_FIELD((a), (a)), ({ typeof(a) _a = (a); REG_MASKED_FIELD(_a, _a); })))
+
+#define REG_MASKED_FIELD_DISABLE(a) \
+ (REG_MASKED_FIELD((a), 0))
+
+#endif
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index bdd2e0652bc3..53edd69acb9b 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -159,7 +159,7 @@
*
* This macro does not rely on timekeeping. Hence it is safe to call even when
* timekeeping is suspended, at the expense of an underestimation of wall clock
- * time, which is rather minimal with a non-zero delay_us.
+ * time, which is rather minimal with a non-zero @delay_us.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
@@ -167,9 +167,9 @@
* Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val.
*/
-#define read_poll_timeout_atomic(op, val, cond, sleep_us, timeout_us, \
- sleep_before_read, args...) \
- poll_timeout_us_atomic((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
+ delay_before_read, args...) \
+ poll_timeout_us_atomic((val) = op(args), cond, delay_us, timeout_us, delay_before_read)
/**
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
diff --git a/include/video/vga.h b/include/video/vga.h
index 468764d6727a..2f13c371800b 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -46,6 +46,7 @@
#define VGA_MIS_R 0x3CC /* Misc Output Read Register */
#define VGA_MIS_W 0x3C2 /* Misc Output Write Register */
#define VGA_FTC_R 0x3CA /* Feature Control Read Register */
+#define VGA_IS0_R 0x3C2 /* Input Status Register 0 */
#define VGA_IS1_RC 0x3DA /* Input Status Register 1 - color emulation */
#define VGA_IS1_RM 0x3BA /* Input Status Register 1 - mono emulation */
#define VGA_PEL_D 0x3C9 /* PEL Data Register */