diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
9 files changed, 342 insertions, 177 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 74ad0d1240fe..bae83a129b5f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -155,6 +155,9 @@ MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); #define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); +#define FIRMWARE_DCN_36_DMUB "amdgpu/dcn_3_6_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_36_DMUB); + #define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); @@ -179,6 +182,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); +static struct amdgpu_i2c_adapter * +create_i2c(struct ddc_service *ddc_service, bool oem); static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) { @@ -320,7 +325,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, return 0; } -static bool dm_is_idle(void *handle) +static bool dm_is_idle(struct amdgpu_ip_block *ip_block) { /* XXX todo */ return true; @@ -1271,6 +1276,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) case IP_VERSION(3, 1, 4): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1): hw_params.dpia_supported = true; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; @@ -1282,6 +1288,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; break; default: @@ -1745,7 +1752,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_ } if (quirk_entries.support_edp0_on_dp1) { init_data->flags.support_edp0_on_dp1 = true; - drm_info(dev, "aux_hpd_discon_quirk attached\n"); + drm_info(dev, "support_edp0_on_dp1 attached\n"); } } @@ -1891,6 +1898,7 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode( switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 6, 0): /* * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to * cause a hard hang. A fix exists for newer PMFW. @@ -2027,7 +2035,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_device_seamless_boot_supported(adev)) { init_data.flags.seamless_boot_edp_requested = true; init_data.flags.allow_seamless_boot_optimization = true; - DRM_INFO("Seamless boot condition check passed\n"); + drm_dbg(adev->dm.ddev, "Seamless boot requested\n"); } init_data.flags.enable_mipi_converter_optimization = true; @@ -2394,6 +2402,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1): return 0; default: @@ -2519,6 +2528,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) case IP_VERSION(3, 5, 1): dmub_asic = DMUB_ASIC_DCN35; break; + case IP_VERSION(3, 6, 0): + dmub_asic = DMUB_ASIC_DCN36; + break; case IP_VERSION(4, 0, 1): dmub_asic = DMUB_ASIC_DCN401; break; @@ -2952,6 +2964,33 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) return 0; } +static int dm_oem_i2c_hw_init(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + struct amdgpu_i2c_adapter *oem_i2c; + struct ddc_service *oem_ddc_service; + int r; + + oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc); + if (oem_ddc_service) { + oem_i2c = create_i2c(oem_ddc_service, true); + if (!oem_i2c) { + dev_info(adev->dev, "Failed to create oem i2c adapter data\n"); + return -ENOMEM; + } + + r = i2c_add_adapter(&oem_i2c->base); + if (r) { + dev_info(adev->dev, "Failed to register oem i2c\n"); + kfree(oem_i2c); + return r; + } + dm->oem_i2c = oem_i2c; + } + + return 0; +} + /** * dm_hw_init() - Initialize DC device * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. @@ -2983,6 +3022,10 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block) return r; amdgpu_dm_hpd_init(adev); + r = dm_oem_i2c_hw_init(adev); + if (r) + dev_info(adev->dev, "Failed to add OEM i2c bus\n"); + return 0; } @@ -2998,6 +3041,8 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + kfree(adev->dm.oem_i2c); + amdgpu_dm_hpd_fini(adev); amdgpu_dm_irq_fini(adev); @@ -3045,10 +3090,11 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, } +DEFINE_FREE(state_release, struct dc_state *, if (_T) dc_state_release(_T)) + static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) { - struct dc_state *context = NULL; - enum dc_status res = DC_ERROR_UNEXPECTED; + struct dc_state *context __free(state_release) = NULL; int i; struct dc_stream_state *del_streams[MAX_PIPES]; int del_streams_count = 0; @@ -3058,7 +3104,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) context = dc_state_create_current_copy(dc); if (context == NULL) - goto context_alloc_fail; + return DC_ERROR_UNEXPECTED; /* First remove from context all streams */ for (i = 0; i < context->stream_count; i++) { @@ -3069,25 +3115,20 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) /* Remove all planes for removed streams and then remove the streams */ for (i = 0; i < del_streams_count; i++) { - if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { - res = DC_FAIL_DETACH_SURFACES; - goto fail; - } + enum dc_status res; + + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) + return DC_FAIL_DETACH_SURFACES; res = dc_state_remove_stream(dc, context, del_streams[i]); if (res != DC_OK) - goto fail; + return res; } params.streams = context->streams; params.stream_count = context->stream_count; - res = dc_commit_streams(dc, ¶ms); - -fail: - dc_state_release(context); -context_alloc_fail: - return res; + return dc_commit_streams(dc, ¶ms); } static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) @@ -3100,13 +3141,29 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } +static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + + if (amdgpu_in_reset(adev)) + return 0; + + WARN_ON(adev->dm.cached_state); + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) + return PTR_ERR(adev->dm.cached_state); + + return 0; +} + static int dm_suspend(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; struct amdgpu_display_manager *dm = &adev->dm; - int ret = 0; if (amdgpu_in_reset(adev)) { + enum dc_status res; + mutex_lock(&dm->dc_lock); dc_allow_idle_optimizations(adev->dm.dc, false); @@ -3116,19 +3173,24 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block) if (dm->cached_dc_state) dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); - amdgpu_dm_commit_zero_streams(dm->dc); + res = amdgpu_dm_commit_zero_streams(dm->dc); + if (res != DC_OK) { + drm_err(adev_to_drm(adev), "Failed to commit zero streams: %d\n", res); + return -EINVAL; + } amdgpu_dm_irq_suspend(adev); hpd_rx_irq_work_suspend(dm); - return ret; + return 0; } - WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); + if (!adev->dm.cached_state) { + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) + return PTR_ERR(adev->dm.cached_state); + } s3_handle_hdmi_cec(adev_to_drm(adev), true); @@ -3259,14 +3321,14 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, struct dc_scaling_info scaling_infos[MAX_SURFACES]; struct dc_flip_addrs flip_addrs[MAX_SURFACES]; struct dc_stream_update stream_update; - } *bundle; + } *bundle __free(kfree); int k, m; bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) { drm_err(dm->ddev, "Failed to allocate update bundle\n"); - goto cleanup; + return; } for (k = 0; k < dc_state->stream_count; k++) { @@ -3286,9 +3348,24 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, &bundle->stream_update, bundle->surface_updates); } +} -cleanup: - kfree(bundle); +static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, + struct dc_sink *sink) +{ + struct dc_panel_patch *ppatch = NULL; + + if (!sink) + return; + + ppatch = &sink->edid_caps.panel_patch; + if (ppatch->wait_after_dpcd_poweroff_ms) { + msleep(ppatch->wait_after_dpcd_poweroff_ms); + drm_dbg_driver(adev_to_drm(adev), + "%s: adding a %ds delay as w/a for panel\n", + __func__, + ppatch->wait_after_dpcd_poweroff_ms / 1000); + } } static int dm_resume(struct amdgpu_ip_block *ip_block) @@ -3337,7 +3414,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) r = dm_dmub_hw_init(adev); if (r) - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -3423,6 +3500,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) /* Do detection*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + bool ret; if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; @@ -3439,17 +3517,20 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) if (aconnector->mst_root) continue; - mutex_lock(&aconnector->hpd_lock); + guard(mutex)(&aconnector->hpd_lock); if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); } else { - mutex_lock(&dm->dc_lock); + guard(mutex)(&dm->dc_lock); dc_exit_ips_for_hw_access(dm->dc); - dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); - mutex_unlock(&dm->dc_lock); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); + if (ret) { + /* w/a delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); + } } if (aconnector->fake_enable && aconnector->dc_link->local_sink) @@ -3459,7 +3540,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; amdgpu_dm_update_connector_after_detect(aconnector); - mutex_unlock(&aconnector->hpd_lock); } drm_connector_list_iter_end(&iter); @@ -3542,6 +3622,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .early_fini = amdgpu_dm_early_fini, .hw_init = dm_hw_init, .hw_fini = dm_hw_fini, + .prepare_suspend = dm_prepare_suspend, .suspend = dm_suspend, .resume = dm_resume, .is_idle = dm_is_idle, @@ -3628,12 +3709,14 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps->min_input_signal = min_input_signal_override; } +DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T)) + void amdgpu_dm_update_connector_after_detect( struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; + struct dc_sink *sink __free(sink_release) = NULL; struct drm_device *dev = connector->dev; - struct dc_sink *sink; /* MST handled by drm_mst framework */ if (aconnector->mst_mgr.mst_state == true) @@ -3655,7 +3738,7 @@ void amdgpu_dm_update_connector_after_detect( * For S3 resume with headless use eml_sink to fake stream * because on resume connector->sink is set to NULL */ - mutex_lock(&dev->mode_config.mutex); + guard(mutex)(&dev->mode_config.mutex); if (sink) { if (aconnector->dc_sink) { @@ -3680,10 +3763,6 @@ void amdgpu_dm_update_connector_after_detect( } } - mutex_unlock(&dev->mode_config.mutex); - - if (sink) - dc_sink_release(sink); return; } @@ -3691,10 +3770,8 @@ void amdgpu_dm_update_connector_after_detect( * TODO: temporary guard to look for proper fix * if this sink is MST sink, we should not do anything */ - if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_sink_release(sink); + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) return; - } if (aconnector->dc_sink == sink) { /* @@ -3703,15 +3780,13 @@ void amdgpu_dm_update_connector_after_detect( */ drm_dbg_kms(dev, "DCHPD: connector_id=%d: dc_sink didn't change.\n", aconnector->connector_id); - if (sink) - dc_sink_release(sink); return; } drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", aconnector->connector_id, aconnector->dc_sink, sink); - mutex_lock(&dev->mode_config.mutex); + guard(mutex)(&dev->mode_config.mutex); /* * 1. Update status of the drm connector @@ -3773,12 +3848,7 @@ void amdgpu_dm_update_connector_after_detect( connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; } - mutex_unlock(&dev->mode_config.mutex); - update_subconnector_property(aconnector); - - if (sink) - dc_sink_release(sink); } static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) @@ -3798,7 +3868,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. */ - mutex_lock(&aconnector->hpd_lock); + guard(mutex)(&aconnector->hpd_lock); if (adev->dm.hdcp_workqueue) { hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); @@ -3810,7 +3880,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) aconnector->timing_changed = false; if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) - DRM_ERROR("KMS: Failed to detect connector\n"); + drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n"); if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); @@ -3822,11 +3892,13 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) drm_kms_helper_connector_hotplug_event(connector); } else { - mutex_lock(&adev->dm.dc_lock); - dc_exit_ips_for_hw_access(dc); - ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); - mutex_unlock(&adev->dm.dc_lock); + scoped_guard(mutex, &adev->dm.dc_lock) { + dc_exit_ips_for_hw_access(dc); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + } if (ret) { + /* w/a delay for certain panels */ + apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink); amdgpu_dm_update_connector_after_detect(aconnector); drm_modeset_lock_all(dev); @@ -3837,8 +3909,6 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_kms_helper_connector_hotplug_event(connector); } } - mutex_unlock(&aconnector->hpd_lock); - } static void handle_hpd_irq(void *param) @@ -4678,48 +4748,40 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, int bl_idx) { -#if defined(CONFIG_ACPI) - struct amdgpu_dm_backlight_caps caps; + struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx]; - memset(&caps, 0, sizeof(caps)); - - if (dm->backlight_caps[bl_idx].caps_valid) + if (caps->caps_valid) return; - amdgpu_acpi_get_backlight_caps(&caps); +#if defined(CONFIG_ACPI) + amdgpu_acpi_get_backlight_caps(caps); /* validate the firmware value is sane */ - if (caps.caps_valid) { - int spread = caps.max_input_signal - caps.min_input_signal; + if (caps->caps_valid) { + int spread = caps->max_input_signal - caps->min_input_signal; - if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || - caps.min_input_signal < 0 || + if (caps->max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + caps->min_input_signal < 0 || spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || spread < AMDGPU_DM_MIN_SPREAD) { DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", - caps.min_input_signal, caps.max_input_signal); - caps.caps_valid = false; + caps->min_input_signal, caps->max_input_signal); + caps->caps_valid = false; } } - if (caps.caps_valid) { - dm->backlight_caps[bl_idx].caps_valid = true; - if (caps.aux_support) - return; - dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; - dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; - } else { - dm->backlight_caps[bl_idx].min_input_signal = - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; - dm->backlight_caps[bl_idx].max_input_signal = - AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + if (!caps->caps_valid) { + caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->caps_valid = true; } #else - if (dm->backlight_caps[bl_idx].aux_support) + if (caps->aux_support) return; - dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; - dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + caps->max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + caps->caps_valid = true; #endif } @@ -4745,10 +4807,38 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c uint32_t brightness) { unsigned int min, max; + u8 prev_signal = 0, prev_lum = 0; if (!get_brightness_range(caps, &min, &max)) return brightness; + for (int i = 0; i < caps->data_points; i++) { + u8 signal, lum; + + if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE) + break; + + signal = caps->luminance_data[i].input_signal; + lum = caps->luminance_data[i].luminance; + + /* + * brightness == signal: luminance is percent numerator + * brightness < signal: interpolate between previous and current luminance numerator + * brightness > signal: find next data point + */ + if (brightness < signal) + lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) * + (brightness - prev_signal), + signal - prev_signal); + else if (brightness > signal) { + prev_signal = signal; + prev_lum = lum; + continue; + } + brightness = DIV_ROUND_CLOSEST(lum * brightness, 101); + break; + } + // Rescale 0..255 to min..max return min + DIV_ROUND_CLOSEST((max - min) * brightness, AMDGPU_MAX_BL_LEVEL); @@ -4773,19 +4863,19 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, int bl_idx, u32 user_brightness) { - struct amdgpu_dm_backlight_caps caps; + struct amdgpu_dm_backlight_caps *caps; struct dc_link *link; u32 brightness; bool rc, reallow_idle = false; amdgpu_dm_update_backlight_caps(dm, bl_idx); - caps = dm->backlight_caps[bl_idx]; + caps = &dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; /* update scratch register */ if (bl_idx == 0) amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); - brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); + brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; /* Change brightness based on AUX property */ @@ -4795,7 +4885,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, reallow_idle = true; } - if (caps.aux_support) { + if (caps->aux_support) { rc = dc_link_set_backlight_level_nits(link, true, brightness, AUX_BL_DEFAULT_TRANSITION_TIME_MS); if (!rc) @@ -4912,6 +5002,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) } else props.brightness = AMDGPU_MAX_BL_LEVEL; + if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) + drm_info(drm, "Using custom brightness curve\n"); props.max_brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; @@ -5104,6 +5196,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(2, 1, 0): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); @@ -5127,6 +5220,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1): psr_feature_enabled = true; break; @@ -5144,6 +5238,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): replay_feature_enabled = true; break; @@ -5293,6 +5388,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1): if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); @@ -5435,6 +5531,9 @@ static int dm_init_microcode(struct amdgpu_device *adev) case IP_VERSION(3, 5, 1): fw_name_dmub = FIRMWARE_DCN_351_DMUB; break; + case IP_VERSION(3, 6, 0): + fw_name_dmub = FIRMWARE_DCN_36_DMUB; + break; case IP_VERSION(4, 0, 1): fw_name_dmub = FIRMWARE_DCN_401_DMUB; break; @@ -5563,6 +5662,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 6, 0): case IP_VERSION(4, 0, 1): adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; @@ -5641,9 +5741,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, case DRM_COLOR_YCBCR_BT2020: if (full_range) - *color_space = COLOR_SPACE_2020_YCBCR; + *color_space = COLOR_SPACE_2020_YCBCR_FULL; else - return -EINVAL; + *color_space = COLOR_SPACE_2020_YCBCR_LIMITED; break; default: @@ -6139,12 +6239,14 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) color_space = COLOR_SPACE_2020_RGB_FULLRANGE; else - color_space = COLOR_SPACE_2020_YCBCR; + color_space = COLOR_SPACE_2020_YCBCR_LIMITED; break; case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 default: if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { color_space = COLOR_SPACE_SRGB; + if (connector_state->hdmi.broadcast_rgb == DRM_HDMI_BROADCAST_RGB_LIMITED) + color_space = COLOR_SPACE_SRGB_LIMITED; /* * 27030khz is the separation point between HDTV and SDTV * according to HDMI spec, we use YCbCr709 and YCbCr601 @@ -7484,12 +7586,12 @@ cleanup: } struct dc_stream_state * -create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, +create_validate_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream) { - struct drm_connector *connector = &aconnector->base; + struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dc_stream_state *stream; const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; @@ -7500,8 +7602,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (!dm_state) return NULL; - if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || - aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + aconnector = to_amdgpu_dm_connector(connector); + + if (aconnector && + (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || + aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)) bpc_limit = 8; do { @@ -7513,10 +7619,11 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, break; } - if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + dc_result = dc_validate_stream(adev->dm.dc, stream); + + if (!aconnector) /* writeback connector */ return stream; - dc_result = dc_validate_stream(adev->dm.dc, stream); if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); @@ -7546,7 +7653,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, __func__, __LINE__); aconnector->force_yuv420_output = true; - stream = create_validate_stream_for_sink(aconnector, drm_mode, + stream = create_validate_stream_for_sink(connector, drm_mode, dm_state, old_stream); aconnector->force_yuv420_output = false; } @@ -7555,12 +7662,16 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, } enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { int result = MODE_ERROR; struct dc_sink *dc_sink; + struct drm_display_mode *test_mode; /* TODO: Unhardcode stream count */ struct dc_stream_state *stream; + /* we always have an amdgpu_dm_connector here since we got + * here via the amdgpu_dm_connector_helper_funcs + */ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || @@ -7583,11 +7694,16 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec goto fail; } - drm_mode_set_crtcinfo(mode, 0); + test_mode = drm_mode_duplicate(connector->dev, mode); + if (!test_mode) + goto fail; - stream = create_validate_stream_for_sink(aconnector, mode, + drm_mode_set_crtcinfo(test_mode, 0); + + stream = create_validate_stream_for_sink(connector, test_mode, to_dm_connector_state(connector->state), NULL); + drm_mode_destroy(connector->dev, test_mode); if (stream) { dc_stream_release(stream); result = MODE_OK; @@ -8313,6 +8429,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, dm->ddev->mode_config.scaling_mode_property, DRM_MODE_SCALE_NONE); + if (connector_type == DRM_MODE_CONNECTOR_HDMIA + || (connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root)) + drm_connector_attach_broadcast_rgb_property(&aconnector->base); + drm_object_attach_property(&aconnector->base.base, adev->mode_info.underscan_property, UNDERSCAN_OFF); @@ -8365,7 +8485,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO; - if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) + if (!ddc_service->ddc_pin) return result; cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); @@ -8384,11 +8504,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, cmd.payloads[i].data = msgs[i].buf; } - if (dc_submit_i2c( - ddc_service->ctx->dc, - ddc_service->link->link_index, - &cmd)) - result = num; + if (i2c->oem) { + if (dc_submit_i2c_oem( + ddc_service->ctx->dc, + &cmd)) + result = num; + } else { + if (dc_submit_i2c( + ddc_service->ctx->dc, + ddc_service->link->link_index, + &cmd)) + result = num; + } kfree(cmd.payloads); return result; @@ -8405,9 +8532,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = { }; static struct amdgpu_i2c_adapter * -create_i2c(struct ddc_service *ddc_service, - int link_index, - int *res) +create_i2c(struct ddc_service *ddc_service, bool oem) { struct amdgpu_device *adev = ddc_service->ctx->driver_context; struct amdgpu_i2c_adapter *i2c; @@ -8418,9 +8543,14 @@ create_i2c(struct ddc_service *ddc_service, i2c->base.owner = THIS_MODULE; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; - snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); + if (oem) + snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c OEM bus"); + else + snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", + ddc_service->link->link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; + i2c->oem = oem; return i2c; } @@ -8466,7 +8596,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, link->priv = aconnector; - i2c = create_i2c(link->ddc, link->link_index, &res); + i2c = create_i2c(link->ddc, false); if (!i2c) { DRM_ERROR("Failed to create i2c adapter data\n"); return -ENOMEM; @@ -10073,7 +10203,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; - bool abm_changed, hdr_changed, scaling_changed; + bool abm_changed, hdr_changed, scaling_changed, output_color_space_changed = false; memset(&stream_update, 0, sizeof(stream_update)); @@ -10092,13 +10222,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) scaling_changed = is_scaling_state_different(dm_new_con_state, dm_old_con_state); + if ((new_con_state->hdmi.broadcast_rgb != old_con_state->hdmi.broadcast_rgb) && + (dm_old_crtc_state->stream->output_color_space != + get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state))) + output_color_space_changed = true; + abm_changed = dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level; hdr_changed = !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); - if (!scaling_changed && !abm_changed && !hdr_changed) + if (!scaling_changed && !abm_changed && !hdr_changed && !output_color_space_changed) continue; stream_update.stream = dm_new_crtc_state->stream; @@ -10110,6 +10245,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) stream_update.dst = dm_new_crtc_state->stream->dst; } + if (output_color_space_changed) { + dm_new_crtc_state->stream->output_color_space + = get_output_color_space(&dm_new_crtc_state->stream->timing, new_con_state); + + stream_update.output_color_space = &dm_new_crtc_state->stream->output_color_space; + } + if (abm_changed) { dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; @@ -10610,7 +10752,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto skip_modeset; - new_stream = create_validate_stream_for_sink(aconnector, + new_stream = create_validate_stream_for_sink(connector, &new_crtc_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); @@ -12643,3 +12785,10 @@ bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, { return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); } + +void dm_acpi_process_phy_transition_interlock( + const struct dc_context *ctx, + struct dm_process_phy_transition_init_params process_phy_transition_init_params) +{ + // Not yet implemented +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d2703ca7dff3..385faaca6e26 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -151,6 +151,18 @@ struct idle_workqueue { bool running; }; +#define MAX_LUMINANCE_DATA_POINTS 99 + +/** + * struct amdgpu_dm_luminance_data - Custom luminance data + * @luminance: Luminance in percent + * @input_signal: Input signal in range 0-255 + */ +struct amdgpu_dm_luminance_data { + u8 luminance; + u8 input_signal; +} __packed; + /** * struct amdgpu_dm_backlight_caps - Information about backlight * @@ -195,6 +207,14 @@ struct amdgpu_dm_backlight_caps { * @dc_level: the default brightness if booted on DC */ u8 dc_level; + /** + * @data_points: the number of custom luminance data points + */ + u8 data_points; + /** + * @luminance_data: custom luminance data + */ + struct amdgpu_dm_luminance_data luminance_data[MAX_LUMINANCE_DATA_POINTS]; }; /** @@ -606,6 +626,13 @@ struct amdgpu_display_manager { * Bounding box data read from dmub during early initialization for DCN4+ */ struct dml2_soc_bb *bb_from_dmub; + + /** + * @oem_i2c: + * + * OEM i2c bus + */ + struct amdgpu_i2c_adapter *oem_i2c; }; enum dsc_clock_force_state { @@ -949,7 +976,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, int link_index); enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode); + const struct drm_display_mode *mode); void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector); @@ -989,7 +1016,7 @@ int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int struct set_config_cmd_payload *payload, enum set_config_status *operation_result); struct dc_stream_state * - create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, + create_validate_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 049046c60462..c7d13e743e6c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1169,7 +1169,7 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) case COLOR_SPACE_2020_RGB_FULLRANGE: seq_puts(m, "BT2020_RGB"); break; - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: seq_puts(m, "BT2020_YCC"); break; default: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index c0dc23244049..a3e93b2891f0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -172,7 +172,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, struct mod_hdcp_display_adjustment display_adjust; unsigned int conn_index = aconnector->base.index; - mutex_lock(&hdcp_w->mutex); + guard(mutex)(&hdcp_w->mutex); hdcp_w->aconnector[conn_index] = aconnector; memset(&link_adjust, 0, sizeof(link_adjust)); @@ -209,7 +209,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output); process_output(hdcp_w); - mutex_unlock(&hdcp_w->mutex); } static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, @@ -220,7 +219,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, struct drm_connector_state *conn_state = aconnector->base.state; unsigned int conn_index = aconnector->base.index; - mutex_lock(&hdcp_w->mutex); + guard(mutex)(&hdcp_w->mutex); hdcp_w->aconnector[conn_index] = aconnector; /* the removal of display will invoke auth reset -> hdcp destroy and @@ -239,7 +238,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); process_output(hdcp_w); - mutex_unlock(&hdcp_w->mutex); } void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) @@ -247,7 +245,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; unsigned int conn_index; - mutex_lock(&hdcp_w->mutex); + guard(mutex)(&hdcp_w->mutex); mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); @@ -259,8 +257,6 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde } process_output(hdcp_w); - - mutex_unlock(&hdcp_w->mutex); } void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) @@ -277,7 +273,7 @@ static void event_callback(struct work_struct *work) hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, callback_dwork); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); cancel_delayed_work(&hdcp_work->callback_dwork); @@ -285,8 +281,6 @@ static void event_callback(struct work_struct *work) &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); } static void event_property_update(struct work_struct *work) @@ -323,7 +317,7 @@ static void event_property_update(struct work_struct *work) continue; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); if (conn_state->commit) { ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, @@ -355,7 +349,6 @@ static void event_property_update(struct work_struct *work) drm_hdcp_update_content_protection(connector, DRM_MODE_CONTENT_PROTECTION_DESIRED); } - mutex_unlock(&hdcp_work->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); } } @@ -368,7 +361,7 @@ static void event_property_validate(struct work_struct *work) struct amdgpu_dm_connector *aconnector; unsigned int conn_index; - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { @@ -408,8 +401,6 @@ static void event_property_validate(struct work_struct *work) schedule_work(&hdcp_work->property_update_work); } } - - mutex_unlock(&hdcp_work->mutex); } static void event_watchdog_timer(struct work_struct *work) @@ -420,7 +411,7 @@ static void event_watchdog_timer(struct work_struct *work) struct hdcp_workqueue, watchdog_timer_dwork); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); @@ -429,8 +420,6 @@ static void event_watchdog_timer(struct work_struct *work) &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); } static void event_cpirq(struct work_struct *work) @@ -439,13 +428,11 @@ static void event_cpirq(struct work_struct *work) hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); } void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) @@ -470,7 +457,6 @@ static bool enable_assr(void *handle, struct dc_link *link) struct mod_hdcp hdcp = hdcp_work->hdcp; struct psp_context *psp = hdcp.config.psp.handle; struct ta_dtm_shared_memory *dtm_cmd; - bool res = true; if (!psp->dtm_context.context.initialized) { DRM_INFO("Failed to enable ASSR, DTM TA is not initialized."); @@ -479,7 +465,7 @@ static bool enable_assr(void *handle, struct dc_link *link) dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf; - mutex_lock(&psp->dtm_context.mutex); + guard(mutex)(&psp->dtm_context.mutex); memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; @@ -491,12 +477,10 @@ static bool enable_assr(void *handle, struct dc_link *link) if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { DRM_INFO("Failed to enable ASSR"); - res = false; + return false; } - mutex_unlock(&psp->dtm_context.mutex); - - return res; + return true; } static void update_config(void *handle, struct cp_psp_stream_config *config) @@ -557,13 +541,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); - mutex_lock(&hdcp_w->mutex); + guard(mutex)(&hdcp_w->mutex); mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); process_output(hdcp_w); - mutex_unlock(&hdcp_w->mutex); - } /** @@ -743,6 +725,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, dc->ctx->dce_version == DCN_VERSION_3_15 || dc->ctx->dce_version == DCN_VERSION_3_5 || dc->ctx->dce_version == DCN_VERSION_3_51 || + dc->ctx->dce_version == DCN_VERSION_3_6 || dc->ctx->dce_version == DCN_VERSION_3_16) hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index fbd80d8545a8..2cd35392e2da 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -55,16 +55,21 @@ static u32 edid_extract_panel_id(struct edid *edid) (u32)EDID_PRODUCT_ID(edid); } -static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) +static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps) { uint32_t panel_id = edid_extract_panel_id(edid); switch (panel_id) { + /* Workaround for monitors that need a delay after detecting the link */ + case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215): + drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id); + edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000; + break; /* Workaround for some monitors which does not work well with FAMS */ case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): - DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); + drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.disable_fams = true; break; /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ @@ -73,11 +78,11 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003): - DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); + drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.remove_sink_ext_caps = true; break; case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): - DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id); + drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.disable_colorimetry = true; break; default: @@ -101,6 +106,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( { struct amdgpu_dm_connector *aconnector = link->priv; struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; struct cea_sad *sads; int sad_count = -1; @@ -130,7 +136,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->edid_hdmi = connector->display_info.is_hdmi; - apply_edid_quirks(edid_buf, edid_caps); + apply_edid_quirks(dev, edid_buf, edid_caps); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index a215234151ac..b61e210f6246 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -473,7 +473,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) unregister_all_irq_handlers(adev); } -int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) +void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) { int src; struct list_head *hnd_list_h; @@ -511,10 +511,9 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - return 0; } -int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) +void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) { int src; struct list_head *hnd_list_h, *hnd_list_l; @@ -522,7 +521,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) DM_IRQ_TABLE_LOCK(adev, irq_table_flags); - DRM_DEBUG_KMS("DM_IRQ: early resume\n"); + drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n"); /* re-enable short pulse interrupts HW interrupt */ for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { @@ -533,11 +532,9 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - - return 0; } -int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) +void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) { int src; struct list_head *hnd_list_h, *hnd_list_l; @@ -545,7 +542,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) DM_IRQ_TABLE_LOCK(adev, irq_table_flags); - DRM_DEBUG_KMS("DM_IRQ: resume\n"); + drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n"); /** * Renable HW interrupt for HPD and only since FLIP and VBLANK @@ -559,7 +556,6 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - return 0; } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h index 2349238a626b..ba17c23b2706 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h @@ -90,14 +90,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev); * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend. * */ -int amdgpu_dm_irq_suspend(struct amdgpu_device *adev); +void amdgpu_dm_irq_suspend(struct amdgpu_device *adev); /** * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume. * amdgpu_dm_irq_resume - enable ASIC interrupt during resume. * */ -int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev); -int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev); +void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev); +void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev); #endif /* __AMDGPU_DM_IRQ_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 07e744da7bf4..7ceedf626d23 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1625,7 +1625,6 @@ int pre_validate_dsc(struct drm_atomic_state *state, if (ind >= 0) { struct drm_connector *connector; - struct amdgpu_dm_connector *aconnector; struct drm_connector_state *drm_new_conn_state; struct dm_connector_state *dm_new_conn_state; struct dm_crtc_state *dm_old_crtc_state; @@ -1633,15 +1632,17 @@ int pre_validate_dsc(struct drm_atomic_state *state, connector = amdgpu_dm_find_first_crtc_matching_connector(state, state->crtcs[ind].ptr); - aconnector = to_amdgpu_dm_connector(connector); + if (!connector) + continue; + drm_new_conn_state = drm_atomic_get_new_connector_state(state, - &aconnector->base); + connector); dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); local_dc_state->streams[i] = - create_validate_stream_for_sink(aconnector, + create_validate_stream_for_sink(connector, &state->crtcs[ind].new_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 92472109f84a..3e0f45f1711c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -700,7 +700,7 @@ static void amdgpu_dm_plane_add_gfx12_modifiers(struct amdgpu_device *adev, uint64_t mod_4k = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D); uint64_t mod_256b = ver | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D); uint64_t dcc = ver | AMD_FMT_MOD_SET(DCC, 1); - uint8_t max_comp_block[] = {1, 0}; + uint8_t max_comp_block[] = {2, 1, 0}; uint64_t max_comp_block_mod[ARRAY_SIZE(max_comp_block)] = {0}; uint8_t i = 0, j = 0; uint64_t gfx12_modifiers[] = {mod_256k, mod_64k, mod_4k, mod_256b, DRM_FORMAT_MOD_LINEAR}; @@ -1261,21 +1261,24 @@ static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane, } static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) + struct drm_atomic_state *state, bool flip) { struct drm_crtc_state *new_crtc_state; struct drm_plane_state *new_plane_state; struct dm_crtc_state *dm_new_crtc_state; - /* Only support async updates on cursor planes. */ - if (plane->type != DRM_PLANE_TYPE_CURSOR) + if (flip) { + if (plane->type != DRM_PLANE_TYPE_OVERLAY) + return -EINVAL; + } else if (plane->type != DRM_PLANE_TYPE_CURSOR) { return -EINVAL; + } new_plane_state = drm_atomic_get_new_plane_state(state, plane); new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); /* Reject overlay cursors for now*/ - if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) + if (!flip && dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) return -EINVAL; return 0; @@ -1433,7 +1436,7 @@ static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane) dc_plane_state = dm_plane_state->dc_state; - dc_plane_force_update_for_panic(dc_plane_state, fb->modifier ? true : false); + dc_plane_force_dcc_and_tiling_disable(dc_plane_state, fb->modifier ? true : false); } static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { |