diff options
author | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2019-05-09 10:18:57 +0200 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2019-05-09 10:19:03 +0200 |
commit | 752c4f3c1d19708578257ecc041672b6d8afb9bf (patch) | |
tree | 75bf7773e3dcddb495540dbd3fd135702de738eb /drivers/gpu/drm/amd | |
parent | 3be2071004bf748ed39e54ccda85caaa774c7ccd (diff) | |
parent | eb85d03e01c3e9f3b0ba7282b2e3515a635decb2 (diff) | |
download | lwn-752c4f3c1d19708578257ecc041672b6d8afb9bf.tar.gz lwn-752c4f3c1d19708578257ecc041672b6d8afb9bf.zip |
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
Requested for backmerging airlied's drm-legacy cleanup.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
103 files changed, 2950 insertions, 1362 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index f8c58c425eb9..fdd0ca4b0f0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -23,7 +23,7 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -FULL_AMD_PATH=$(src)/.. +FULL_AMD_PATH=$(srctree)/$(src)/.. DISPLAY_FOLDER_NAME=display FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6e71749cb3bb..14398f55f602 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -436,6 +436,12 @@ struct amdgpu_cs_chunk { void *kdata; }; +struct amdgpu_cs_post_dep { + struct drm_syncobj *syncobj; + struct dma_fence_chain *chain; + u64 point; +}; + struct amdgpu_cs_parser { struct amdgpu_device *adev; struct drm_file *filp; @@ -465,8 +471,8 @@ struct amdgpu_cs_parser { /* user fence */ struct amdgpu_bo_list_entry uf_entry; - unsigned num_post_dep_syncobjs; - struct drm_syncobj **post_dep_syncobjs; + unsigned num_post_deps; + struct amdgpu_cs_post_dep *post_deps; }; static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, @@ -827,6 +833,7 @@ struct amdgpu_device { /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; + struct amdgpu_irq_src vupdate_irq; struct amdgpu_irq_src pageflip_irq; struct amdgpu_irq_src hpd_irq; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index acf8ae0cee9a..aeead072fa79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -335,6 +335,43 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) amdgpu_bo_unref(&(bo)); } +uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, + enum kgd_engine_type type) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + switch (type) { + case KGD_ENGINE_PFP: + return adev->gfx.pfp_fw_version; + + case KGD_ENGINE_ME: + return adev->gfx.me_fw_version; + + case KGD_ENGINE_CE: + return adev->gfx.ce_fw_version; + + case KGD_ENGINE_MEC1: + return adev->gfx.mec_fw_version; + + case KGD_ENGINE_MEC2: + return adev->gfx.mec2_fw_version; + + case KGD_ENGINE_RLC: + return adev->gfx.rlc_fw_version; + + case KGD_ENGINE_SDMA1: + return adev->sdma.instance[0].fw_version; + + case KGD_ENGINE_SDMA2: + return adev->sdma.instance[1].fw_version; + + default: + return 0; + } + + return 0; +} + void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 775f815f9521..4e37fa7e85b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -81,6 +81,18 @@ struct amdgpu_kfd_dev { uint64_t vram_used; }; +enum kgd_engine_type { + KGD_ENGINE_PFP = 1, + KGD_ENGINE_ME, + KGD_ENGINE_CE, + KGD_ENGINE_MEC1, + KGD_ENGINE_MEC2, + KGD_ENGINE_RLC, + KGD_ENGINE_SDMA1, + KGD_ENGINE_SDMA2, + KGD_ENGINE_MAX +}; + struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, struct mm_struct *mm); bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); @@ -142,6 +154,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9); void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); +uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, + enum kgd_engine_type type); void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info); uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ff7fac7df34b..fa09e11a600c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -22,14 +22,12 @@ #include <linux/fdtable.h> #include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "cikd.h" #include "cik_sdma.h" -#include "amdgpu_ucode.h" #include "gfx_v7_0.h" #include "gca/gfx_7_2_d.h" #include "gca/gfx_7_2_enum.h" @@ -139,7 +137,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, @@ -191,7 +188,6 @@ static const struct kfd2kgd_calls kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, @@ -792,63 +788,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd, unlock_srbm(kgd); } -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - const union amdgpu_firmware_header *hdr; - - switch (type) { - case KGD_ENGINE_PFP: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.pfp_fw->data; - break; - - case KGD_ENGINE_ME: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.me_fw->data; - break; - - case KGD_ENGINE_CE: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.ce_fw->data; - break; - - case KGD_ENGINE_MEC1: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec_fw->data; - break; - - case KGD_ENGINE_MEC2: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec2_fw->data; - break; - - case KGD_ENGINE_RLC: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.rlc_fw->data; - break; - - case KGD_ENGINE_SDMA1: - hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[0].fw->data; - break; - - case KGD_ENGINE_SDMA2: - hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[1].fw->data; - break; - - default: - return 0; - } - - if (hdr == NULL) - return 0; - - /* Only 12 bit in use*/ - return hdr->common.ucode_version; -} - static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 56ea929f524b..fec3a6aa1de6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -23,12 +23,10 @@ #include <linux/module.h> #include <linux/fdtable.h> #include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_ucode.h" #include "gfx_v8_0.h" #include "gca/gfx_8_0_sh_mask.h" #include "gca/gfx_8_0_d.h" @@ -95,7 +93,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, @@ -148,7 +145,6 @@ static const struct kfd2kgd_calls kfd2kgd = { get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, @@ -751,63 +747,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd, unlock_srbm(kgd); } -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - const union amdgpu_firmware_header *hdr; - - switch (type) { - case KGD_ENGINE_PFP: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.pfp_fw->data; - break; - - case KGD_ENGINE_ME: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.me_fw->data; - break; - - case KGD_ENGINE_CE: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.ce_fw->data; - break; - - case KGD_ENGINE_MEC1: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec_fw->data; - break; - - case KGD_ENGINE_MEC2: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec2_fw->data; - break; - - case KGD_ENGINE_RLC: - hdr = (const union amdgpu_firmware_header *) - adev->gfx.rlc_fw->data; - break; - - case KGD_ENGINE_SDMA1: - hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[0].fw->data; - break; - - case KGD_ENGINE_SDMA2: - hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[1].fw->data; - break; - - default: - return 0; - } - - if (hdr == NULL) - return 0; - - /* Only 12 bit in use*/ - return hdr->common.ucode_version; -} - static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 5c51d4910650..ef3d93b995b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -25,12 +25,10 @@ #include <linux/module.h> #include <linux/fdtable.h> #include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_ucode.h" #include "soc15_hw_ip.h" #include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_sh_mask.h" @@ -111,7 +109,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); @@ -158,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = { get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .get_fw_version = get_fw_version, .set_scratch_backing_va = set_scratch_backing_va, .get_tile_config = amdgpu_amdkfd_get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, @@ -874,56 +870,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd, */ } -/* FIXME: Does this need to be ASIC-specific code? */ -static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) -{ - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - const union amdgpu_firmware_header *hdr; - - switch (type) { - case KGD_ENGINE_PFP: - hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data; - break; - - case KGD_ENGINE_ME: - hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data; - break; - - case KGD_ENGINE_CE: - hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data; - break; - - case KGD_ENGINE_MEC1: - hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data; - break; - - case KGD_ENGINE_MEC2: - hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data; - break; - - case KGD_ENGINE_RLC: - hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data; - break; - - case KGD_ENGINE_SDMA1: - hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data; - break; - - case KGD_ENGINE_SDMA2: - hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data; - break; - - default: - return 0; - } - - if (hdr == NULL) - return 0; - - /* Only 12 bit in use*/ - return hdr->common.ucode_version; -} - static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 52a5e4fdc95b..2f6239b6be6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs case AMDGPU_CHUNK_ID_SYNCOBJ_IN: case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: + case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: + case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: break; default: @@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); - for (i = 0; i < parser->num_post_dep_syncobjs; i++) - drm_syncobj_put(parser->post_dep_syncobjs[i]); - kfree(parser->post_dep_syncobjs); + for (i = 0; i < parser->num_post_deps; i++) { + drm_syncobj_put(parser->post_deps[i].syncobj); + kfree(parser->post_deps[i].chain); + } + kfree(parser->post_deps); dma_fence_put(parser->fence); @@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, } static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, - uint32_t handle) + uint32_t handle, u64 point, + u64 flags) { - int r; struct dma_fence *fence; - r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence); - if (r) + int r; + + r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); + if (r) { + DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n", + handle, point, r); return r; + } r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); dma_fence_put(fence); @@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { + struct drm_amdgpu_cs_chunk_sem *deps; unsigned num_deps; int i, r; - struct drm_amdgpu_cs_chunk_sem *deps; deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_sem); + for (i = 0; i < num_deps; ++i) { + r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle, + 0, 0); + if (r) + return r; + } + + return 0; +} + +static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) +{ + struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; + unsigned num_deps; + int i, r; + + syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_syncobj); for (i = 0; i < num_deps; ++i) { - r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle); + r = amdgpu_syncobj_lookup_and_add_to_sync(p, + syncobj_deps[i].handle, + syncobj_deps[i].point, + syncobj_deps[i].flags); if (r) return r; } + return 0; } static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { + struct drm_amdgpu_cs_chunk_sem *deps; unsigned num_deps; int i; - struct drm_amdgpu_cs_chunk_sem *deps; + deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_sem); - p->post_dep_syncobjs = kmalloc_array(num_deps, - sizeof(struct drm_syncobj *), - GFP_KERNEL); - p->num_post_dep_syncobjs = 0; + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), + GFP_KERNEL); + p->num_post_deps = 0; - if (!p->post_dep_syncobjs) + if (!p->post_deps) return -ENOMEM; + for (i = 0; i < num_deps; ++i) { - p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); - if (!p->post_dep_syncobjs[i]) + p->post_deps[i].syncobj = + drm_syncobj_find(p->filp, deps[i].handle); + if (!p->post_deps[i].syncobj) return -EINVAL; - p->num_post_dep_syncobjs++; + p->post_deps[i].chain = NULL; + p->post_deps[i].point = 0; + p->num_post_deps++; } + + return 0; +} + + +static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk + *chunk) +{ + struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; + unsigned num_deps; + int i; + + syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_syncobj); + + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), + GFP_KERNEL); + p->num_post_deps = 0; + + if (!p->post_deps) + return -ENOMEM; + + for (i = 0; i < num_deps; ++i) { + struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; + + dep->chain = NULL; + if (syncobj_deps[i].point) { + dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL); + if (!dep->chain) + return -ENOMEM; + } + + dep->syncobj = drm_syncobj_find(p->filp, + syncobj_deps[i].handle); + if (!dep->syncobj) { + kfree(dep->chain); + return -EINVAL; + } + dep->point = syncobj_deps[i].point; + p->num_post_deps++; + } + return 0; } @@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, chunk = &p->chunks[i]; - if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES || - chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { + switch (chunk->chunk_id) { + case AMDGPU_CHUNK_ID_DEPENDENCIES: + case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: r = amdgpu_cs_process_fence_dep(p, chunk); if (r) return r; - } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) { + break; + case AMDGPU_CHUNK_ID_SYNCOBJ_IN: r = amdgpu_cs_process_syncobj_in_dep(p, chunk); if (r) return r; - } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) { + break; + case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: r = amdgpu_cs_process_syncobj_out_dep(p, chunk); if (r) return r; + break; + case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: + r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); + if (r) + return r; + break; + case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: + r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); + if (r) + return r; + break; } } @@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) { int i; - for (i = 0; i < p->num_post_dep_syncobjs; ++i) - drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); + for (i = 0; i < p->num_post_deps; ++i) { + if (p->post_deps[i].chain && p->post_deps[i].point) { + drm_syncobj_add_point(p->post_deps[i].syncobj, + p->post_deps[i].chain, + p->fence, p->post_deps[i].point); + p->post_deps[i].chain = NULL; + } else { + drm_syncobj_replace_fence(p->post_deps[i].syncobj, + p->fence); + } + } } static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 85f8792c2a5f..b9371ec5e04f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2471,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->virt.vf_errors.lock); hash_init(adev->mn_hash); mutex_init(&adev->lock_reset); + mutex_init(&adev->virt.dpm_mutex); amdgpu_device_check_arguments(adev); @@ -3191,11 +3192,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) break; if (fence) { - r = dma_fence_wait_timeout(fence, false, tmo); + tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); fence = next; - if (r <= 0) + if (tmo == 0) { + r = -ETIMEDOUT; break; + } else if (tmo < 0) { + r = tmo; + break; + } } else { fence = next; } @@ -3206,8 +3212,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); - if (r <= 0 || tmo <= 0) { - DRM_ERROR("recover vram bo from shadow failed\n"); + if (r < 0 || tmo <= 0) { + DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); return -EIO; } @@ -3428,7 +3434,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, vram_lost = amdgpu_device_check_vram_lost(tmp_adev); if (vram_lost) { - DRM_ERROR("VRAM is lost!\n"); + DRM_INFO("VRAM is lost due to GPU reset!\n"); atomic_inc(&tmp_adev->vram_lost_counter); } @@ -3695,6 +3701,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, struct pci_dev *pdev = adev->pdev; enum pci_bus_speed cur_speed; enum pcie_link_width cur_width; + u32 ret = 1; *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; @@ -3702,6 +3709,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, while (pdev) { cur_speed = pcie_get_speed_cap(pdev); cur_width = pcie_get_width_cap(pdev); + ret = pcie_bandwidth_available(adev->pdev, NULL, + NULL, &cur_width); + if (!ret) + cur_width = PCIE_LNK_WIDTH_RESRV; if (cur_speed != PCI_SPEED_UNKNOWN) { if (*speed == PCI_SPEED_UNKNOWN) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f34e3ab5a9f3..1e2cc9d68a05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -75,9 +75,10 @@ * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE. * - 3.31.0 - Add support for per-flip tiling attribute changes with DC + * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 31 +#define KMS_DRIVER_MINOR 32 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index ee47c11e92ce..4dee2326b29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; - struct dma_fence *old, **ptr; + struct dma_fence __rcu **ptr; uint32_t seq; + int r; fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); if (fence == NULL) @@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, seq, flags | AMDGPU_FENCE_FLAG_INT); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; + if (unlikely(rcu_dereference_protected(*ptr, 1))) { + struct dma_fence *old; + + rcu_read_lock(); + old = dma_fence_get_rcu_safe(ptr); + rcu_read_unlock(); + + if (old) { + r = dma_fence_wait(old, false); + dma_fence_put(old); + if (r) + return r; + } + } + /* This function can't be called concurrently anyway, otherwise * emitting the fence would mess up the hardware ring buffer. */ - old = rcu_dereference_protected(*ptr, 1); - if (old && !dma_fence_is_signaled(old)) { - DRM_INFO("rcu slot is busy\n"); - dma_fence_wait(old, false); - } - rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); *f = &fence->base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 61107cfc9af6..d4fcf5475464 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -31,6 +31,7 @@ #include <drm/amdgpu_drm.h> #include "amdgpu.h" #include "amdgpu_display.h" +#include "amdgpu_xgmi.h" void amdgpu_gem_object_free(struct drm_gem_object *gobj) { @@ -668,6 +669,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct amdgpu_device *adev = dev->dev_private; struct drm_amdgpu_gem_op *args = data; struct drm_gem_object *gobj; + struct amdgpu_vm_bo_base *base; struct amdgpu_bo *robj; int r; @@ -706,6 +708,15 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, amdgpu_bo_unreserve(robj); break; } + for (base = robj->vm_bo; base; base = base->next) + if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), + amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) { + r = -EINVAL; + amdgpu_bo_unreserve(robj); + goto out; + } + + robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_CPU); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 0b8ef2d27d6b..fe393a46f881 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -35,6 +35,7 @@ #include "amdgpu_trace.h" #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) +#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000) /* * IB @@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) * cost waiting for it coming back under RUNTIME only */ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; + } else if (adev->gmc.xgmi.hive_id) { + tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; } for (i = 0; i < adev->num_rings; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 2e376064bad8..b17d0545728e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -696,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (adev->pm.dpm_enabled) { dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; + } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && + adev->virt.ops->get_pp_clk) { + dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; + dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; } else { dev_info.max_engine_clock = adev->clock.default_sclk * 10; dev_info.max_memory_clock = adev->clock.default_mclk * 10; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 889e443eeee7..2e9e3db778c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -58,7 +58,7 @@ struct amdgpu_hpd; #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base) #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base) -#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base); +#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base) #define AMDGPU_MAX_HPD_PINS 6 #define AMDGPU_MAX_CRTCS 6 @@ -406,7 +406,7 @@ struct amdgpu_crtc { struct amdgpu_flip_work *pflip_works; enum amdgpu_flip_status pflip_status; int deferred_flip_completion; - u64 last_flip_vblank; + u32 last_flip_vblank; /* pll sharing */ struct amdgpu_atom_ss ss; bool ss_enabled; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ec9e45004bff..93b2c5a48a71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -88,12 +88,14 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) if (bo->gem_base.import_attach) drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); drm_gem_object_release(&bo->gem_base); - amdgpu_bo_unref(&bo->parent); + /* in case amdgpu_device_recover_vram got NULL of bo->parent */ if (!list_empty(&bo->shadow_list)) { mutex_lock(&adev->shadow_list_lock); list_del_init(&bo->shadow_list); mutex_unlock(&adev->shadow_list_lock); } + amdgpu_bo_unref(&bo->parent); + kfree(bo->metadata); kfree(bo); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 88362019d1dd..95144e49c7f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -144,7 +144,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (adev->smu.ppt_funcs->get_current_power_state) + if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) pm = amdgpu_smu_get_current_power_state(adev); else if (adev->powerplay.pp_funcs->get_current_power_state) pm = amdgpu_dpm_get_current_power_state(adev); @@ -327,6 +327,18 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, goto fail; } + if (amdgpu_sriov_vf(adev)) { + if (amdgim_is_hwperf(adev) && + adev->virt.ops->force_dpm_level) { + mutex_lock(&adev->pm.mutex); + adev->virt.ops->force_dpm_level(adev, level); + mutex_unlock(&adev->pm.mutex); + return count; + } else { + return -EINVAL; + } + } + if (current_level == level) return count; @@ -790,6 +802,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && + adev->virt.ops->get_pp_clk) + return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); + if (is_support_sw_smu(adev)) return smu_print_clk_levels(&adev->smu, PP_SCLK, buf); else if (adev->powerplay.pp_funcs->print_clock_levels) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 2206bb4b0903..905cce1814f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -187,13 +187,13 @@ static int psp_tmr_init(struct psp_context *psp) int ret; /* - * Allocate 3M memory aligned to 1M from Frame Buffer (local - * physical). + * According to HW engineer, they prefer the TMR address be "naturally + * aligned" , e.g. the start address be an integer divide of TMR size. * * Note: this memory need be reserved till the driver * uninitializes. */ - ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000, + ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, PSP_TMR_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 469cb6477b8e..22bd21efe6b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -530,6 +530,33 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, return 0; } +/* Only used in device probe stage and called only once. */ +int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, + struct ras_common_if *head, bool enable) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret; + + if (!con) + return -EINVAL; + + if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { + /* If ras is enabled by vbios, we set up ras object first in + * both case. For enable, that is all what we need do. For + * disable, we need perform a ras TA disable cmd after that. + */ + ret = __amdgpu_ras_feature_enable(adev, head, 1); + if (ret) + return ret; + + if (!enable) + ret = amdgpu_ras_feature_enable(adev, head, 0); + } else + ret = amdgpu_ras_feature_enable(adev, head, enable); + + return ret; +} + static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, bool bypass) { @@ -558,11 +585,13 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; int i; + const enum amdgpu_ras_error_type default_ras_type = + AMDGPU_RAS_ERROR__NONE; for (i = 0; i < ras_block_count; i++) { struct ras_common_if head = { .block = i, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, + .type = default_ras_type, .sub_block_index = 0, }; strcpy(head.name, ras_block_str(i)); @@ -1368,9 +1397,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK; - if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) - amdgpu_ras_enable_all_features(adev, 1); - if (amdgpu_ras_fs_init(adev)) goto fs_out; @@ -1398,18 +1424,25 @@ void amdgpu_ras_post_init(struct amdgpu_device *adev) if (!con) return; - /* We enable ras on all hw_supported block, but as boot parameter might - * disable some of them and one or more IP has not implemented yet. - * So we disable them on behalf. - */ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { + /* Set up all other IPs which are not implemented. There is a + * tricky thing that IP's actual ras error type should be + * MULTI_UNCORRECTABLE, but as driver does not handle it, so + * ERROR_NONE make sense anyway. + */ + amdgpu_ras_enable_all_features(adev, 1); + + /* We enable ras on all hw_supported block, but as boot + * parameter might disable some of them and one or more IP has + * not implemented yet. So we disable them on behalf. + */ list_for_each_entry_safe(obj, tmp, &con->head, node) { if (!amdgpu_ras_is_supported(adev, obj->head.block)) { amdgpu_ras_feature_enable(adev, &obj->head, 0); /* there should be no any reference. */ WARN_ON(alive_obj(obj)); } - }; + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 682f2be0d68c..eaef5edefc34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -262,6 +262,9 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev); int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable); +int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, + struct ras_common_if *head, bool enable); + int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, struct ras_fs_if *head); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index c17af30e758d..1ba9ba3b54f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -28,11 +28,8 @@ #define AMDGPU_MAX_SDMA_INSTANCES 2 enum amdgpu_sdma_irq { - AMDGPU_SDMA_IRQ_TRAP0 = 0, - AMDGPU_SDMA_IRQ_TRAP1, - AMDGPU_SDMA_IRQ_ECC0, - AMDGPU_SDMA_IRQ_ECC1, - + AMDGPU_SDMA_IRQ_INSTANCE0 = 0, + AMDGPU_SDMA_IRQ_INSTANCE1, AMDGPU_SDMA_IRQ_LAST }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 462a04e0f5e6..7d484fad3909 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -36,6 +36,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) /* enable virtual display */ adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; + adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC; adev->cg_flags = 0; adev->pg_flags = 0; } @@ -375,4 +376,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) } } +static uint32_t parse_clk(char *buf, bool min) +{ + char *ptr = buf; + uint32_t clk = 0; + + do { + ptr = strchr(ptr, ':'); + if (!ptr) + break; + ptr+=2; + clk = simple_strtoul(ptr, NULL, 10); + } while (!min); + + return clk * 100; +} + +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) +{ + char *buf = NULL; + uint32_t clk = 0; + + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); + clk = parse_clk(buf, lowest); + + kfree(buf); + + return clk; +} + +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) +{ + char *buf = NULL; + uint32_t clk = 0; + + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); + clk = parse_clk(buf, lowest); + + kfree(buf); + + return clk; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 722deefc0a7e..584947b7ccf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -57,6 +57,8 @@ struct amdgpu_virt_ops { int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); + int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); + int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); }; /* @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, /* VRAM LOST by GIM */ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, + /* HW PERF SIM in GIM */ + AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3), }; struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct amdgpu_virt { struct amdgpu_vf_error_buffer vf_errors; struct amdgpu_virt_fw_reserve fw_reserve; uint32_t gim_feature; + /* protect DPM events to GIM */ + struct mutex dpm_mutex; }; #define amdgpu_sriov_enabled(adev) \ @@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void) #endif } +#define amdgim_is_hwperf(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION) + bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, unsigned int key, unsigned int chksum); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 21c712e34148..a07c85815b7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -769,14 +769,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, addr = 0; if (ats_entries) { - uint64_t ats_value; + uint64_t value = 0, flags; - ats_value = AMDGPU_PTE_DEFAULT_ATC; - if (level != AMDGPU_VM_PTB) - ats_value |= AMDGPU_PDE_PTE; + flags = AMDGPU_PTE_DEFAULT_ATC; + if (level != AMDGPU_VM_PTB) { + /* Handle leaf PDEs as PTEs */ + flags |= AMDGPU_PDE_PTE; + amdgpu_gmc_get_vm_pde(adev, level, &value, &flags); + } r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries, - 0, ats_value); + value, flags); if (r) return r; @@ -784,15 +787,22 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, } if (entries) { - uint64_t value = 0; - - /* Workaround for fault priority problem on GMC9 */ - if (level == AMDGPU_VM_PTB && - adev->asic_type >= CHIP_VEGA10) - value = AMDGPU_PTE_EXECUTABLE; + uint64_t value = 0, flags = 0; + + if (adev->asic_type >= CHIP_VEGA10) { + if (level != AMDGPU_VM_PTB) { + /* Handle leaf PDEs as PTEs */ + flags |= AMDGPU_PDE_PTE; + amdgpu_gmc_get_vm_pde(adev, level, + &value, &flags); + } else { + /* Workaround for fault priority problem on GMC9 */ + flags = AMDGPU_PTE_EXECUTABLE; + } + } r = vm->update_funcs->update(¶ms, bo, addr, 0, entries, - 0, value); + value, flags); if (r) return r; } @@ -2027,7 +2037,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); - if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev))) { + if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && + (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) { bo_va->is_xgmi = true; mutex_lock(&adev->vm_manager.lock_pstate); /* Power up XGMI if it can be potentially used */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index beac15bca526..91baf95212a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -303,6 +303,7 @@ struct amdgpu_vm_manager { const struct amdgpu_vm_pte_funcs *vm_pte_funcs; struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS]; unsigned vm_pte_num_rqs; + struct amdgpu_ring *page_fault; /* partial resident texture handling */ spinlock_t prt_lock; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 336834797af3..a48c84c51775 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -24,6 +24,7 @@ #include <linux/list.h> #include "amdgpu.h" #include "amdgpu_xgmi.h" +#include "amdgpu_smu.h" static DEFINE_MUTEX(xgmi_mutex); @@ -216,7 +217,17 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) if (hive->pstate == pstate) return 0; - /* Todo : sent the message to SMU for pstate change */ + + dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); + + if (is_support_sw_smu(adev)) + ret = smu_set_xgmi_pstate(&adev->smu, pstate); + if (ret) + dev_err(adev->dev, + "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", + adev->gmc.xgmi.node_id, + adev->gmc.xgmi.hive_id, ret); + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 189599b694e8..d42808b05971 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -977,8 +977,8 @@ static int cik_sdma_sw_init(void *handle) r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1); if (r) return r; } @@ -1114,7 +1114,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; switch (type) { - case AMDGPU_SDMA_IRQ_TRAP0: + case AMDGPU_SDMA_IRQ_INSTANCE0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); @@ -1130,7 +1130,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, break; } break; - case AMDGPU_SDMA_IRQ_TRAP1: + case AMDGPU_SDMA_IRQ_INSTANCE1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3765d97b8512..ba67d1023264 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2455,8 +2455,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); - adev->gfx.rlc.funcs->reset(adev); - gfx_v9_0_init_pg(adev); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -3568,7 +3566,7 @@ static int gfx_v9_0_ecc_late_init(void *handle) int r; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { - amdgpu_ras_feature_enable(adev, &ras_block, 0); + amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); return 0; } @@ -3581,7 +3579,7 @@ static int gfx_v9_0_ecc_late_init(void *handle) **ras_if = ras_block; - r = amdgpu_ras_feature_enable(adev, *ras_if, 1); + r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); if (r) goto feature; @@ -4840,10 +4838,16 @@ static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + struct ras_common_if *ras_if = adev->gfx.ras_if; struct ras_dispatch_if ih_data = { - .head = *adev->gfx.ras_if, .entry = entry, }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + DRM_ERROR("CP ECC ERROR IRQ\n"); amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 404875147ec3..3fd79e07944d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -248,10 +248,16 @@ static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + struct ras_common_if *ras_if = adev->gmc.ras_if; struct ras_dispatch_if ih_data = { - .head = *adev->gmc.ras_if, .entry = entry, }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } @@ -676,7 +682,7 @@ static int gmc_v9_0_ecc_late_init(void *handle) int r; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { - amdgpu_ras_feature_enable(adev, &ras_block, 0); + amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); return 0; } /* handle resume path. */ @@ -689,7 +695,7 @@ static int gmc_v9_0_ecc_late_init(void *handle) **ras_if = ras_block; - r = amdgpu_ras_feature_enable(adev, *ras_if, 1); + r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); if (r) goto feature; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 73851ebb3833..8dbad496b29f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, xgpu_ai_mailbox_set_valid(adev, false); } +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) +{ + int r = 0; + u32 req, val, size; + + if (!amdgim_is_hwperf(adev) || buf == NULL) + return -EBADRQC; + + switch(type) { + case PP_SCLK: + req = IDH_IRQ_GET_PP_SCLK; + break; + case PP_MCLK: + req = IDH_IRQ_GET_PP_MCLK; + break; + default: + return -EBADRQC; + } + + mutex_lock(&adev->virt.dpm_mutex); + + xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); + + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); + if (!r && adev->fw_vram_usage.va != NULL) { + val = RREG32_NO_KIQ( + SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); + size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + + val), PAGE_SIZE); + + if (size < PAGE_SIZE) + strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); + else + size = 0; + + r = size; + goto out; + } + + r = xgpu_ai_poll_msg(adev, IDH_FAIL); + if(r) + pr_info("%s DPM request failed", + (type == PP_SCLK)? "SCLK" : "MCLK"); + +out: + mutex_unlock(&adev->virt.dpm_mutex); + return r; +} + +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level) +{ + int r = 0; + u32 req = IDH_IRQ_FORCE_DPM_LEVEL; + + if (!amdgim_is_hwperf(adev)) + return -EBADRQC; + + mutex_lock(&adev->virt.dpm_mutex); + xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0); + + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); + if (!r) + goto out; + + r = xgpu_ai_poll_msg(adev, IDH_FAIL); + if (!r) + pr_info("DPM request failed"); + else + pr_info("Mailbox is broken"); + +out: + mutex_unlock(&adev->virt.dpm_mutex); + return r; +} + static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, enum idh_request req) { @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .reset_gpu = xgpu_ai_request_reset, .wait_reset = NULL, .trans_msg = xgpu_ai_mailbox_trans_msg, + .get_pp_clk = xgpu_ai_get_pp_clk, + .force_dpm_level = xgpu_ai_force_dpm_level, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index b4a9ceea334b..39d151b79153 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -35,6 +35,10 @@ enum idh_request { IDH_REL_GPU_FINI_ACCESS, IDH_REQ_GPU_RESET_ACCESS, + IDH_IRQ_FORCE_DPM_LEVEL = 10, + IDH_IRQ_GET_PP_SCLK, + IDH_IRQ_GET_PP_MCLK, + IDH_LOG_VF_ERROR = 200, }; @@ -43,6 +47,8 @@ enum idh_event { IDH_READY_TO_ACCESS_GPU, IDH_FLR_NOTIFICATION, IDH_FLR_NOTIFICATION_CMPL, + IDH_SUCCESS, + IDH_FAIL, IDH_EVENT_MAX }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 6a0fcd67662a..aef9d059ae52 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -515,7 +515,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) /* wait until RCV_MSG become 3 */ if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { - pr_err("failed to recieve FLR_CMPL\n"); + pr_err("failed to receive FLR_CMPL\n"); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 2b3429d90690..b91df7bd1d98 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -674,7 +674,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, for (i = 0; i < topology_info_input->num_nodes; i++) { topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; - topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; + topology_info_input->nodes[i].is_sharing_enabled = 1; topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index cca3552b36ed..36196372e8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -870,8 +870,8 @@ static int sdma_v2_4_sw_init(void *handle) r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1); if (r) return r; } @@ -1006,7 +1006,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; switch (type) { - case AMDGPU_SDMA_IRQ_TRAP0: + case AMDGPU_SDMA_IRQ_INSTANCE0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); @@ -1022,7 +1022,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, break; } break; - case AMDGPU_SDMA_IRQ_TRAP1: + case AMDGPU_SDMA_IRQ_INSTANCE1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 0ce8331baeb2..6d39544e7829 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1154,8 +1154,8 @@ static int sdma_v3_0_sw_init(void *handle) r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1); if (r) return r; } @@ -1340,7 +1340,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; switch (type) { - case AMDGPU_SDMA_IRQ_TRAP0: + case AMDGPU_SDMA_IRQ_INSTANCE0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); @@ -1356,7 +1356,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, break; } break; - case AMDGPU_SDMA_IRQ_TRAP1: + case AMDGPU_SDMA_IRQ_INSTANCE1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 8691b621148e..9c88ce513d78 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -156,7 +156,6 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000), }; static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { @@ -186,7 +185,6 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000), }; static const struct soc15_reg_golden golden_settings_sdma_rv1[] = @@ -851,7 +849,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl); /* enable DMA RB */ @@ -942,7 +940,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i) wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_PAGE_RB_WPTR_POLL_CNTL, - F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl); /* enable DMA RB */ @@ -1518,7 +1516,7 @@ static int sdma_v4_0_late_init(void *handle) int r; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { - amdgpu_ras_feature_enable(adev, &ras_block, 0); + amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); return 0; } @@ -1532,7 +1530,7 @@ static int sdma_v4_0_late_init(void *handle) **ras_if = ras_block; - r = amdgpu_ras_feature_enable(adev, *ras_if, 1); + r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); if (r) goto feature; @@ -1551,13 +1549,13 @@ static int sdma_v4_0_late_init(void *handle) if (r) goto sysfs; resume: - r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC0); + r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0); if (r) goto irq; - r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC1); + r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1); if (r) { - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC0); + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0); goto irq; } @@ -1621,8 +1619,8 @@ static int sdma_v4_0_sw_init(void *handle) r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1); if (r) return r; @@ -1641,8 +1639,8 @@ static int sdma_v4_0_sw_init(void *handle) r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1); if (r) return r; } @@ -1709,8 +1707,8 @@ static int sdma_v4_0_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) return 0; - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC0); - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC1); + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0); + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1); sdma_v4_0_ctx_switch_enable(adev, false); sdma_v4_0_enable(adev, false); @@ -1780,13 +1778,12 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1; u32 sdma_cntl; - sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL); + sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL); sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); - WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl); + WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl); return 0; } @@ -1866,10 +1863,16 @@ static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + struct ras_common_if *ras_if = adev->sdma.ras_if; struct ras_dispatch_if ih_data = { - .head = *adev->sdma.ras_if, .entry = entry, }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } @@ -1908,7 +1911,7 @@ static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev, { u32 sdma_edc_config; - u32 reg_offset = (type == AMDGPU_SDMA_IRQ_ECC0) ? + u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) : sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG); @@ -2292,6 +2295,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; } adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1; + adev->vm_manager.page_fault = &adev->sdma.instance[0].page; } else { for (i = 0; i < adev->sdma.num_instances; i++) { sched = &adev->sdma.instance[i].ring.sched; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index f15f196684ba..3eeefd40dae0 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -503,8 +503,8 @@ static int si_dma_sw_init(void *handle) r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? - AMDGPU_SDMA_IRQ_TRAP0 : - AMDGPU_SDMA_IRQ_TRAP1); + AMDGPU_SDMA_IRQ_INSTANCE0 : + AMDGPU_SDMA_IRQ_INSTANCE1); if (r) return r; } @@ -591,7 +591,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; switch (type) { - case AMDGPU_SDMA_IRQ_TRAP0: + case AMDGPU_SDMA_IRQ_INSTANCE0: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); @@ -607,7 +607,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, break; } break; - case AMDGPU_SDMA_IRQ_TRAP1: + case AMDGPU_SDMA_IRQ_INSTANCE1: switch (state) { case AMDGPU_IRQ_STATE_DISABLE: sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index bdb5ad93990d..4900e4958dec 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -470,6 +470,12 @@ static int soc15_asic_reset(struct amdgpu_device *adev) case CHIP_VEGA12: soc15_asic_get_baco_capability(adev, &baco_reset); break; + case CHIP_VEGA20: + if (adev->psp.sos_fw_version >= 0x80067) + soc15_asic_get_baco_capability(adev, &baco_reset); + else + baco_reset = false; + break; default: baco_reset = false; break; @@ -895,7 +901,8 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; } else if (adev->pdev->device == 0x15d8) { - adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS | diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index bed78a778e3f..40363ca6c5f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -283,7 +283,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev) } if (vce_v2_0_wait_for_idle(adev)) { - DRM_INFO("VCE is busy, Can't set clock gateing"); + DRM_INFO("VCE is busy, Can't set clock gating"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index aadc3e66ebd7..f3f5938430d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -382,6 +382,7 @@ static int vce_v4_0_start(struct amdgpu_device *adev) static int vce_v4_0_stop(struct amdgpu_device *adev) { + /* Disable VCPU */ WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001); /* hold on ECPU */ @@ -389,8 +390,8 @@ static int vce_v4_0_stop(struct amdgpu_device *adev) VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - /* clear BUSY flag */ - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK); + /* clear VCE_STATUS */ + WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0); /* Set Clock-Gating off */ /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) @@ -922,6 +923,7 @@ static int vce_v4_0_set_clockgating_state(void *handle, return 0; } +#endif static int vce_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state) @@ -935,16 +937,11 @@ static int vce_v4_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) - return 0; - if (state == AMD_PG_STATE_GATE) - /* XXX do we need a vce_v4_0_stop()? */ - return 0; + return vce_v4_0_stop(adev); else return vce_v4_0_start(adev); } -#endif static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) @@ -1059,7 +1056,7 @@ const struct amd_ip_funcs vce_v4_0_ip_funcs = { .soft_reset = NULL /* vce_v4_0_soft_reset */, .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */, .set_clockgating_state = vce_v4_0_set_clockgating_state, - .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */, + .set_powergating_state = vce_v4_0_set_powergating_state, }; static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index b3cdbf79f47b..c1e4d44d6137 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = { { 0x9876, &carrizo_device_info }, /* Carrizo */ { 0x9877, &carrizo_device_info }, /* Carrizo */ { 0x15DD, &raven_device_info }, /* Raven */ + { 0x15D8, &raven_device_info }, /* Raven */ #endif { 0x67A0, &hawaii_device_info }, /* Hawaii */ { 0x67A1, &hawaii_device_info }, /* Hawaii */ @@ -493,9 +494,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, { unsigned int size; - kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd, + kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, KGD_ENGINE_MEC1); - kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd, + kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 380a7f9bd55d..1854506e3e8f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -111,7 +111,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, - unsigned long possible_crtcs); + unsigned long possible_crtcs, + const struct dc_plane_cap *plane_cap); static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, struct drm_plane *plane, uint32_t link_index); @@ -251,12 +252,22 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, return NULL; } +static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) +{ + return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || + dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; +} + static void dm_pflip_high_irq(void *interrupt_params) { struct amdgpu_crtc *amdgpu_crtc; struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; unsigned long flags; + struct drm_pending_vblank_event *e; + struct dm_crtc_state *acrtc_state; + uint32_t vpos, hpos, v_blank_start, v_blank_end; + bool vrr_active; amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); @@ -279,26 +290,116 @@ static void dm_pflip_high_irq(void *interrupt_params) return; } - /* Update to correct count(s) if racing with vblank irq */ - amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); + /* page flip completed. */ + e = amdgpu_crtc->event; + amdgpu_crtc->event = NULL; - /* wake up userspace */ - if (amdgpu_crtc->event) { - drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); + if (!e) + WARN_ON(1); - /* page flip completed. clean up */ - amdgpu_crtc->event = NULL; + acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state); + vrr_active = amdgpu_dm_vrr_active(acrtc_state); + + /* Fixed refresh rate, or VRR scanout position outside front-porch? */ + if (!vrr_active || + !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start, + &v_blank_end, &hpos, &vpos) || + (vpos < v_blank_start)) { + /* Update to correct count and vblank timestamp if racing with + * vblank irq. This also updates to the correct vblank timestamp + * even in VRR mode, as scanout is past the front-porch atm. + */ + drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); - } else - WARN_ON(1); + /* Wake up userspace by sending the pageflip event with proper + * count and timestamp of vblank of flip completion. + */ + if (e) { + drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); + + /* Event sent, so done with vblank for this flip */ + drm_crtc_vblank_put(&amdgpu_crtc->base); + } + } else if (e) { + /* VRR active and inside front-porch: vblank count and + * timestamp for pageflip event will only be up to date after + * drm_crtc_handle_vblank() has been executed from late vblank + * irq handler after start of back-porch (vline 0). We queue the + * pageflip event for send-out by drm_crtc_handle_vblank() with + * updated timestamp and count, once it runs after us. + * + * We need to open-code this instead of using the helper + * drm_crtc_arm_vblank_event(), as that helper would + * call drm_crtc_accurate_vblank_count(), which we must + * not call in VRR mode while we are in front-porch! + */ + + /* sequence will be replaced by real count during send-out. */ + e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); + e->pipe = amdgpu_crtc->crtc_id; + + list_add_tail(&e->base.link, &adev->ddev->vblank_event_list); + e = NULL; + } + + /* Keep track of vblank of this flip for flip throttling. We use the + * cooked hw counter, as that one incremented at start of this vblank + * of pageflip completion, so last_flip_vblank is the forbidden count + * for queueing new pageflips if vsync + VRR is enabled. + */ + amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev, + amdgpu_crtc->crtc_id); amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n", - __func__, amdgpu_crtc->crtc_id, amdgpu_crtc); + DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", + amdgpu_crtc->crtc_id, amdgpu_crtc, + vrr_active, (int) !e); +} - drm_crtc_vblank_put(&amdgpu_crtc->base); +static void dm_vupdate_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + struct dm_crtc_state *acrtc_state; + unsigned long flags; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); + + if (acrtc) { + acrtc_state = to_dm_crtc_state(acrtc->base.state); + + DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); + + /* Core vblank handling is done here after end of front-porch in + * vrr mode, as vblank timestamping will give valid results + * while now done after front-porch. This will also deliver + * page-flip completion events that have been queued to us + * if a pageflip happened inside front-porch. + */ + if (amdgpu_dm_vrr_active(acrtc_state)) { + drm_crtc_handle_vblank(&acrtc->base); + + /* BTR processing for pre-DCE12 ASICs */ + if (acrtc_state->stream && + adev->family < AMDGPU_FAMILY_AI) { + spin_lock_irqsave(&adev->ddev->event_lock, flags); + mod_freesync_handle_v_update( + adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); + + dc_stream_adjust_vmin_vmax( + adev->dm.dc, + acrtc_state->stream, + &acrtc_state->vrr_params.adjust); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + } + } + } } static void dm_crtc_high_irq(void *interrupt_params) @@ -307,18 +408,33 @@ static void dm_crtc_high_irq(void *interrupt_params) struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; struct dm_crtc_state *acrtc_state; + unsigned long flags; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); if (acrtc) { - drm_crtc_handle_vblank(&acrtc->base); - amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); - acrtc_state = to_dm_crtc_state(acrtc->base.state); - if (acrtc_state->stream && + DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, + amdgpu_dm_vrr_active(acrtc_state)); + + /* Core vblank handling at start of front-porch is only possible + * in non-vrr mode, as only there vblank timestamping will give + * valid results while done in front-porch. Otherwise defer it + * to dm_vupdate_high_irq after end of front-porch. + */ + if (!amdgpu_dm_vrr_active(acrtc_state)) + drm_crtc_handle_vblank(&acrtc->base); + + /* Following stuff must happen at start of vblank, for crc + * computation and below-the-range btr support in vrr mode. + */ + amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + + if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI && acrtc_state->vrr_params.supported && acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { + spin_lock_irqsave(&adev->ddev->event_lock, flags); mod_freesync_handle_v_update( adev->dm.freesync_module, acrtc_state->stream, @@ -328,6 +444,7 @@ static void dm_crtc_high_irq(void *interrupt_params) adev->dm.dc, acrtc_state->stream, &acrtc_state->vrr_params.adjust); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); } } } @@ -438,6 +555,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; + init_data.flags.power_down_display_on_boot = true; + /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -888,9 +1007,16 @@ static int dm_resume(void *handle) struct drm_plane *plane; struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; + struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; int i; + /* Recreate dc_state - DC invalidates it when setting power state to S3. */ + dc_release_state(dm_state->context); + dm_state->context = dc_create_state(dm->dc); + /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ + dc_resource_state_construct(dm->dc, dm_state->context); + /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -1433,6 +1559,27 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) dm_crtc_high_irq, c_irq_params); } + /* Use VUPDATE interrupt */ + for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { + r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); + if (r) { + DRM_ERROR("Failed to add vupdate irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params); + } + /* Use GRPH_PFLIP interrupt */ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { @@ -1518,6 +1665,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) dm_crtc_high_irq, c_irq_params); } + /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to + * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx + * to trigger at end of each vblank, regardless of state of the lock, + * matching DCE behaviour. + */ + for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; + i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; + i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); + + if (r) { + DRM_ERROR("Failed to add vupdate irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params); + } + /* Use GRPH_PFLIP interrupt */ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; @@ -1629,17 +1804,16 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj) __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); - new_state->context = dc_create_state(); + old_state = to_dm_atomic_state(obj->state); + + if (old_state && old_state->context) + new_state->context = dc_copy_state(old_state->context); + if (!new_state->context) { kfree(new_state); return NULL; } - old_state = to_dm_atomic_state(obj->state); - if (old_state && old_state->context) - dc_resource_state_copy_construct(old_state->context, - new_state->context); - return &new_state->base; } @@ -1683,7 +1857,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) if (!state) return -ENOMEM; - state->context = dc_create_state(); + state->context = dc_create_state(adev->dm.dc); if (!state->context) { kfree(state); return -ENOMEM; @@ -1811,7 +1985,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) static int initialize_plane(struct amdgpu_display_manager *dm, struct amdgpu_mode_info *mode_info, int plane_id, - enum drm_plane_type plane_type) + enum drm_plane_type plane_type, + const struct dc_plane_cap *plane_cap) { struct drm_plane *plane; unsigned long possible_crtcs; @@ -1834,7 +2009,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, if (plane_id >= dm->dc->caps.max_streams) possible_crtcs = 0xff; - ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs); + ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); if (ret) { DRM_ERROR("KMS: Failed to initialize plane\n"); @@ -1887,8 +2062,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) struct amdgpu_encoder *aencoder = NULL; struct amdgpu_mode_info *mode_info = &adev->mode_info; uint32_t link_cnt; - int32_t overlay_planes, primary_planes; + int32_t primary_planes; enum dc_connection_type new_connection_type = dc_connection_none; + const struct dc_plane_cap *plane; link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { @@ -1896,24 +2072,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) return -EINVAL; } - /* - * Determine the number of overlay planes supported. - * Only support DCN for now, and cap so we don't encourage - * userspace to use up all the planes. - */ - overlay_planes = 0; - - for (i = 0; i < dm->dc->caps.max_planes; ++i) { - struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; - - if (plane->type == DC_PLANE_TYPE_DCN_UNIVERSAL && - plane->blends_with_above && plane->blends_with_below && - plane->supports_argb8888) - overlay_planes += 1; - } - - overlay_planes = min(overlay_planes, 1); - /* There is one primary plane per CRTC */ primary_planes = dm->dc->caps.max_streams; ASSERT(primary_planes <= AMDGPU_MAX_PLANES); @@ -1923,8 +2081,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) * Order is reversed to match iteration order in atomic check. */ for (i = (primary_planes - 1); i >= 0; i--) { + plane = &dm->dc->caps.planes[i]; + if (initialize_plane(dm, mode_info, i, - DRM_PLANE_TYPE_PRIMARY)) { + DRM_PLANE_TYPE_PRIMARY, plane)) { DRM_ERROR("KMS: Failed to initialize primary plane\n"); goto fail; } @@ -1935,13 +2095,30 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) * These planes have a higher DRM index than the primary planes since * they should be considered as having a higher z-order. * Order is reversed to match iteration order in atomic check. + * + * Only support DCN for now, and only expose one so we don't encourage + * userspace to use up all the pipes. */ - for (i = (overlay_planes - 1); i >= 0; i--) { + for (i = 0; i < dm->dc->caps.max_planes; ++i) { + struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; + + if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) + continue; + + if (!plane->blends_with_above || !plane->blends_with_below) + continue; + + if (!plane->pixel_format_support.argb8888) + continue; + if (initialize_plane(dm, NULL, primary_planes + i, - DRM_PLANE_TYPE_OVERLAY)) { + DRM_PLANE_TYPE_OVERLAY, plane)) { DRM_ERROR("KMS: Failed to initialize overlay plane\n"); goto fail; } + + /* Only create one overlay plane. */ + break; } for (i = 0; i < dm->dc->caps.max_streams; i++) @@ -2231,56 +2408,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { .destroy = amdgpu_dm_encoder_destroy, }; -static bool fill_rects_from_plane_state(const struct drm_plane_state *state, - struct dc_plane_state *plane_state) + +static int fill_dc_scaling_info(const struct drm_plane_state *state, + struct dc_scaling_info *scaling_info) { - plane_state->src_rect.x = state->src_x >> 16; - plane_state->src_rect.y = state->src_y >> 16; - /* we ignore the mantissa for now and do not deal with floating pixels :( */ - plane_state->src_rect.width = state->src_w >> 16; + int scale_w, scale_h; - if (plane_state->src_rect.width == 0) - return false; + memset(scaling_info, 0, sizeof(*scaling_info)); - plane_state->src_rect.height = state->src_h >> 16; - if (plane_state->src_rect.height == 0) - return false; + /* Source is fixed 16.16 but we ignore mantissa for now... */ + scaling_info->src_rect.x = state->src_x >> 16; + scaling_info->src_rect.y = state->src_y >> 16; - plane_state->dst_rect.x = state->crtc_x; - plane_state->dst_rect.y = state->crtc_y; + scaling_info->src_rect.width = state->src_w >> 16; + if (scaling_info->src_rect.width == 0) + return -EINVAL; + + scaling_info->src_rect.height = state->src_h >> 16; + if (scaling_info->src_rect.height == 0) + return -EINVAL; + + scaling_info->dst_rect.x = state->crtc_x; + scaling_info->dst_rect.y = state->crtc_y; if (state->crtc_w == 0) - return false; + return -EINVAL; - plane_state->dst_rect.width = state->crtc_w; + scaling_info->dst_rect.width = state->crtc_w; if (state->crtc_h == 0) - return false; + return -EINVAL; - plane_state->dst_rect.height = state->crtc_h; + scaling_info->dst_rect.height = state->crtc_h; - plane_state->clip_rect = plane_state->dst_rect; + /* DRM doesn't specify clipping on destination output. */ + scaling_info->clip_rect = scaling_info->dst_rect; - switch (state->rotation & DRM_MODE_ROTATE_MASK) { - case DRM_MODE_ROTATE_0: - plane_state->rotation = ROTATION_ANGLE_0; - break; - case DRM_MODE_ROTATE_90: - plane_state->rotation = ROTATION_ANGLE_90; - break; - case DRM_MODE_ROTATE_180: - plane_state->rotation = ROTATION_ANGLE_180; - break; - case DRM_MODE_ROTATE_270: - plane_state->rotation = ROTATION_ANGLE_270; - break; - default: - plane_state->rotation = ROTATION_ANGLE_0; - break; - } + /* TODO: Validate scaling per-format with DC plane caps */ + scale_w = scaling_info->dst_rect.width * 1000 / + scaling_info->src_rect.width; - return true; + if (scale_w < 250 || scale_w > 16000) + return -EINVAL; + + scale_h = scaling_info->dst_rect.height * 1000 / + scaling_info->src_rect.height; + + if (scale_h < 250 || scale_h > 16000) + return -EINVAL; + + /* + * The "scaling_quality" can be ignored for now, quality = 0 has DC + * assume reasonable defaults based on the format. + */ + + return 0; } + static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, uint64_t *tiling_flags) { @@ -2309,12 +2493,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags) return offset ? (address + offset * 256) : 0; } -static int fill_plane_dcc_attributes(struct amdgpu_device *adev, - const struct amdgpu_framebuffer *afb, - const struct dc_plane_state *plane_state, - struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address, - uint64_t info) +static int +fill_plane_dcc_attributes(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const union plane_size *plane_size, + const union dc_tiling_info *tiling_info, + const uint64_t info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -2329,24 +2517,20 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev, if (!offset) return 0; - if (plane_state->address.type != PLN_ADDR_TYPE_GRAPHICS) + if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) return 0; if (!dc->cap_funcs.get_dcc_compression_cap) return -EINVAL; - input.format = plane_state->format; - input.surface_size.width = - plane_state->plane_size.grph.surface_size.width; - input.surface_size.height = - plane_state->plane_size.grph.surface_size.height; - input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle; + input.format = format; + input.surface_size.width = plane_size->grph.surface_size.width; + input.surface_size.height = plane_size->grph.surface_size.height; + input.swizzle_mode = tiling_info->gfx9.swizzle; - if (plane_state->rotation == ROTATION_ANGLE_0 || - plane_state->rotation == ROTATION_ANGLE_180) + if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) input.scan = SCAN_DIRECTION_HORIZONTAL; - else if (plane_state->rotation == ROTATION_ANGLE_90 || - plane_state->rotation == ROTATION_ANGLE_270) + else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) input.scan = SCAN_DIRECTION_VERTICAL; if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) @@ -2371,28 +2555,54 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev, } static int -fill_plane_tiling_attributes(struct amdgpu_device *adev, +fill_plane_buffer_attributes(struct amdgpu_device *adev, const struct amdgpu_framebuffer *afb, - const struct dc_plane_state *plane_state, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const uint64_t tiling_flags, union dc_tiling_info *tiling_info, + union plane_size *plane_size, struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address, - uint64_t tiling_flags) + struct dc_plane_address *address) { + const struct drm_framebuffer *fb = &afb->base; int ret; memset(tiling_info, 0, sizeof(*tiling_info)); + memset(plane_size, 0, sizeof(*plane_size)); memset(dcc, 0, sizeof(*dcc)); memset(address, 0, sizeof(*address)); - if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + plane_size->grph.surface_size.x = 0; + plane_size->grph.surface_size.y = 0; + plane_size->grph.surface_size.width = fb->width; + plane_size->grph.surface_size.height = fb->height; + plane_size->grph.surface_pitch = + fb->pitches[0] / fb->format->cpp[0]; + address->type = PLN_ADDR_TYPE_GRAPHICS; address->grph.addr.low_part = lower_32_bits(afb->address); address->grph.addr.high_part = upper_32_bits(afb->address); } else { - const struct drm_framebuffer *fb = &afb->base; uint64_t chroma_addr = afb->address + fb->offsets[1]; + plane_size->video.luma_size.x = 0; + plane_size->video.luma_size.y = 0; + plane_size->video.luma_size.width = fb->width; + plane_size->video.luma_size.height = fb->height; + plane_size->video.luma_pitch = + fb->pitches[0] / fb->format->cpp[0]; + + plane_size->video.chroma_size.x = 0; + plane_size->video.chroma_size.y = 0; + /* TODO: set these based on surface format */ + plane_size->video.chroma_size.width = fb->width / 2; + plane_size->video.chroma_size.height = fb->height / 2; + + plane_size->video.chroma_pitch = + fb->pitches[1] / fb->format->cpp[1]; + address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; address->video_progressive.luma_addr.low_part = lower_32_bits(afb->address); @@ -2453,8 +2663,9 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev, AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); tiling_info->gfx9.shaderEnable = 1; - ret = fill_plane_dcc_attributes(adev, afb, plane_state, dcc, - address, tiling_flags); + ret = fill_plane_dcc_attributes(adev, afb, format, rotation, + plane_size, tiling_info, + tiling_flags, dcc, address); if (ret) return ret; } @@ -2462,166 +2673,223 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev, return 0; } -static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, - struct dc_plane_state *plane_state, - const struct amdgpu_framebuffer *amdgpu_fb) +static void +fill_blending_from_plane_state(const struct drm_plane_state *plane_state, + bool *per_pixel_alpha, bool *global_alpha, + int *global_alpha_value) { - uint64_t tiling_flags; - const struct drm_framebuffer *fb = &amdgpu_fb->base; - int ret = 0; - struct drm_format_name_buf format_name; + *per_pixel_alpha = false; + *global_alpha = false; + *global_alpha_value = 0xff; - ret = get_fb_info( - amdgpu_fb, - &tiling_flags); + if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) + return; - if (ret) - return ret; + if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { + static const uint32_t alpha_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_ABGR8888, + }; + uint32_t format = plane_state->fb->format->format; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { + if (format == alpha_formats[i]) { + *per_pixel_alpha = true; + break; + } + } + } + + if (plane_state->alpha < 0xffff) { + *global_alpha = true; + *global_alpha_value = plane_state->alpha >> 8; + } +} + +static int +fill_plane_color_attributes(const struct drm_plane_state *plane_state, + const enum surface_pixel_format format, + enum dc_color_space *color_space) +{ + bool full_range; + + *color_space = COLOR_SPACE_SRGB; + + /* DRM color properties only affect non-RGB formats. */ + if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return 0; + + full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); + + switch (plane_state->color_encoding) { + case DRM_COLOR_YCBCR_BT601: + if (full_range) + *color_space = COLOR_SPACE_YCBCR601; + else + *color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + + case DRM_COLOR_YCBCR_BT709: + if (full_range) + *color_space = COLOR_SPACE_YCBCR709; + else + *color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + + case DRM_COLOR_YCBCR_BT2020: + if (full_range) + *color_space = COLOR_SPACE_2020_YCBCR; + else + return -EINVAL; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int +fill_dc_plane_info_and_addr(struct amdgpu_device *adev, + const struct drm_plane_state *plane_state, + const uint64_t tiling_flags, + struct dc_plane_info *plane_info, + struct dc_plane_address *address) +{ + const struct drm_framebuffer *fb = plane_state->fb; + const struct amdgpu_framebuffer *afb = + to_amdgpu_framebuffer(plane_state->fb); + struct drm_format_name_buf format_name; + int ret; + + memset(plane_info, 0, sizeof(*plane_info)); switch (fb->format->format) { case DRM_FORMAT_C8: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; + plane_info->format = + SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; break; case DRM_FORMAT_RGB565: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; break; case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ABGR2101010: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; break; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: - plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; break; case DRM_FORMAT_NV21: - plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; break; case DRM_FORMAT_NV12: - plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(fb->format->format, &format_name)); + DRM_ERROR( + "Unsupported screen format %s\n", + drm_get_format_name(fb->format->format, &format_name)); return -EINVAL; } - memset(&plane_state->address, 0, sizeof(plane_state->address)); - - if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - plane_state->plane_size.grph.surface_size.x = 0; - plane_state->plane_size.grph.surface_size.y = 0; - plane_state->plane_size.grph.surface_size.width = fb->width; - plane_state->plane_size.grph.surface_size.height = fb->height; - plane_state->plane_size.grph.surface_pitch = - fb->pitches[0] / fb->format->cpp[0]; - /* TODO: unhardcode */ - plane_state->color_space = COLOR_SPACE_SRGB; - - } else { - plane_state->plane_size.video.luma_size.x = 0; - plane_state->plane_size.video.luma_size.y = 0; - plane_state->plane_size.video.luma_size.width = fb->width; - plane_state->plane_size.video.luma_size.height = fb->height; - plane_state->plane_size.video.luma_pitch = - fb->pitches[0] / fb->format->cpp[0]; - - plane_state->plane_size.video.chroma_size.x = 0; - plane_state->plane_size.video.chroma_size.y = 0; - /* TODO: set these based on surface format */ - plane_state->plane_size.video.chroma_size.width = fb->width / 2; - plane_state->plane_size.video.chroma_size.height = fb->height / 2; - - plane_state->plane_size.video.chroma_pitch = - fb->pitches[1] / fb->format->cpp[1]; - - /* TODO: unhardcode */ - plane_state->color_space = COLOR_SPACE_YCBCR709; + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_0: + plane_info->rotation = ROTATION_ANGLE_0; + break; + case DRM_MODE_ROTATE_90: + plane_info->rotation = ROTATION_ANGLE_90; + break; + case DRM_MODE_ROTATE_180: + plane_info->rotation = ROTATION_ANGLE_180; + break; + case DRM_MODE_ROTATE_270: + plane_info->rotation = ROTATION_ANGLE_270; + break; + default: + plane_info->rotation = ROTATION_ANGLE_0; + break; } - fill_plane_tiling_attributes(adev, amdgpu_fb, plane_state, - &plane_state->tiling_info, - &plane_state->dcc, - &plane_state->address, - tiling_flags); - - plane_state->visible = true; - plane_state->scaling_quality.h_taps_c = 0; - plane_state->scaling_quality.v_taps_c = 0; - - /* is this needed? is plane_state zeroed at allocation? */ - plane_state->scaling_quality.h_taps = 0; - plane_state->scaling_quality.v_taps = 0; - plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE; - - return ret; - -} + plane_info->visible = true; + plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; -static void -fill_blending_from_plane_state(struct drm_plane_state *plane_state, - const struct dc_plane_state *dc_plane_state, - bool *per_pixel_alpha, bool *global_alpha, - int *global_alpha_value) -{ - *per_pixel_alpha = false; - *global_alpha = false; - *global_alpha_value = 0xff; - - if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) - return; + ret = fill_plane_color_attributes(plane_state, plane_info->format, + &plane_info->color_space); + if (ret) + return ret; - if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { - static const uint32_t alpha_formats[] = { - DRM_FORMAT_ARGB8888, - DRM_FORMAT_RGBA8888, - DRM_FORMAT_ABGR8888, - }; - uint32_t format = plane_state->fb->format->format; - unsigned int i; + ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, + plane_info->rotation, tiling_flags, + &plane_info->tiling_info, + &plane_info->plane_size, + &plane_info->dcc, address); + if (ret) + return ret; - for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { - if (format == alpha_formats[i]) { - *per_pixel_alpha = true; - break; - } - } - } + fill_blending_from_plane_state( + plane_state, &plane_info->per_pixel_alpha, + &plane_info->global_alpha, &plane_info->global_alpha_value); - if (plane_state->alpha < 0xffff) { - *global_alpha = true; - *global_alpha_value = plane_state->alpha >> 8; - } + return 0; } -static int fill_plane_attributes(struct amdgpu_device *adev, - struct dc_plane_state *dc_plane_state, - struct drm_plane_state *plane_state, - struct drm_crtc_state *crtc_state) +static int fill_dc_plane_attributes(struct amdgpu_device *adev, + struct dc_plane_state *dc_plane_state, + struct drm_plane_state *plane_state, + struct drm_crtc_state *crtc_state) { const struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(plane_state->fb); - const struct drm_crtc *crtc = plane_state->crtc; - int ret = 0; + struct dc_scaling_info scaling_info; + struct dc_plane_info plane_info; + uint64_t tiling_flags; + int ret; - if (!fill_rects_from_plane_state(plane_state, dc_plane_state)) - return -EINVAL; + ret = fill_dc_scaling_info(plane_state, &scaling_info); + if (ret) + return ret; - ret = fill_plane_attributes_from_fb( - crtc->dev->dev_private, - dc_plane_state, - amdgpu_fb); + dc_plane_state->src_rect = scaling_info.src_rect; + dc_plane_state->dst_rect = scaling_info.dst_rect; + dc_plane_state->clip_rect = scaling_info.clip_rect; + dc_plane_state->scaling_quality = scaling_info.scaling_quality; + ret = get_fb_info(amdgpu_fb, &tiling_flags); if (ret) return ret; + ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, + &plane_info, + &dc_plane_state->address); + if (ret) + return ret; + + dc_plane_state->format = plane_info.format; + dc_plane_state->color_space = plane_info.color_space; + dc_plane_state->format = plane_info.format; + dc_plane_state->plane_size = plane_info.plane_size; + dc_plane_state->rotation = plane_info.rotation; + dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; + dc_plane_state->stereo_format = plane_info.stereo_format; + dc_plane_state->tiling_info = plane_info.tiling_info; + dc_plane_state->visible = plane_info.visible; + dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; + dc_plane_state->global_alpha = plane_info.global_alpha; + dc_plane_state->global_alpha_value = plane_info.global_alpha_value; + dc_plane_state->dcc = plane_info.dcc; + /* * Always set input transfer function, since plane state is refreshed * every time. @@ -2632,11 +2900,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev, dc_plane_state->in_transfer_func = NULL; } - fill_blending_from_plane_state(plane_state, dc_plane_state, - &dc_plane_state->per_pixel_alpha, - &dc_plane_state->global_alpha, - &dc_plane_state->global_alpha_value); - return ret; } @@ -3198,6 +3461,8 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) dc_stream_retain(state->stream); } + state->active_planes = cur->active_planes; + state->interrupts_enabled = cur->interrupts_enabled; state->vrr_params = cur->vrr_params; state->vrr_infopacket = cur->vrr_infopacket; state->abm_level = cur->abm_level; @@ -3210,12 +3475,41 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) return &state->base; } +static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) +{ + enum dc_irq_source irq_source; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + int rc; + + irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; + + rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; + + DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n", + acrtc->crtc_id, enable ? "en" : "dis", rc); + return rc; +} static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) { enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = crtc->dev->dev_private; + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); + int rc = 0; + + if (enable) { + /* vblank irq on -> Only need vupdate irq in vrr mode */ + if (amdgpu_dm_vrr_active(acrtc_state)) + rc = dm_set_vupdate_irq(crtc, true); + } else { + /* vblank irq off -> vupdate irq off */ + rc = dm_set_vupdate_irq(crtc, false); + } + + if (rc) + return rc; irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; @@ -3593,6 +3887,76 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc) { } +static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state) +{ + struct drm_device *dev = new_crtc_state->crtc->dev; + struct drm_plane *plane; + + drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) { + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return true; + } + + return false; +} + +static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) +{ + struct drm_atomic_state *state = new_crtc_state->state; + struct drm_plane *plane; + int num_active = 0; + + drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { + struct drm_plane_state *new_plane_state; + + /* Cursor planes are "fake". */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + new_plane_state = drm_atomic_get_new_plane_state(state, plane); + + if (!new_plane_state) { + /* + * The plane is enable on the CRTC and hasn't changed + * state. This means that it previously passed + * validation and is therefore enabled. + */ + num_active += 1; + continue; + } + + /* We need a framebuffer to be considered enabled. */ + num_active += (new_plane_state->fb != NULL); + } + + return num_active; +} + +/* + * Sets whether interrupts should be enabled on a specific CRTC. + * We require that the stream be enabled and that there exist active + * DC planes on the stream. + */ +static void +dm_update_crtc_interrupt_state(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) +{ + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + + dm_new_crtc_state->active_planes = 0; + dm_new_crtc_state->interrupts_enabled = false; + + if (!dm_new_crtc_state->stream) + return; + + dm_new_crtc_state->active_planes = + count_crtc_active_planes(new_crtc_state); + + dm_new_crtc_state->interrupts_enabled = + dm_new_crtc_state->active_planes > 0; +} + static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -3601,6 +3965,14 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); int ret = -EINVAL; + /* + * Update interrupt state for the CRTC. This needs to happen whenever + * the CRTC has changed or whenever any of its planes have changed. + * Atomic check satisfies both of these requirements since the CRTC + * is added to the state by DRM during drm_atomic_helper_check_planes. + */ + dm_update_crtc_interrupt_state(crtc, state); + if (unlikely(!dm_crtc_state->stream && modeset_required(state, NULL, dm_crtc_state->stream))) { WARN_ON(1); @@ -3611,6 +3983,15 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, if (!dm_crtc_state->stream) return 0; + /* + * We want at least one hardware plane enabled to use + * the stream with a cursor enabled. + */ + if (state->enable && state->active && + does_crtc_have_active_cursor(state) && + dm_crtc_state->active_planes == 0) + return -EINVAL; + if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) return 0; @@ -3762,9 +4143,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; - fill_plane_tiling_attributes( - adev, afb, plane_state, &plane_state->tiling_info, - &plane_state->dcc, &plane_state->address, tiling_flags); + fill_plane_buffer_attributes( + adev, afb, plane_state->format, plane_state->rotation, + tiling_flags, &plane_state->tiling_info, + &plane_state->plane_size, &plane_state->dcc, + &plane_state->address); } return 0; @@ -3796,13 +4179,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane, { struct amdgpu_device *adev = plane->dev->dev_private; struct dc *dc = adev->dm.dc; - struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct dm_plane_state *dm_plane_state; + struct dc_scaling_info scaling_info; + int ret; + + dm_plane_state = to_dm_plane_state(state); if (!dm_plane_state->dc_state) return 0; - if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state)) - return -EINVAL; + ret = fill_dc_scaling_info(state, &scaling_info); + if (ret) + return ret; if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) return 0; @@ -3875,6 +4263,7 @@ static const uint32_t rgb_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565, }; static const uint32_t overlay_formats[] = { @@ -3883,53 +4272,80 @@ static const uint32_t overlay_formats[] = { DRM_FORMAT_RGBA8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565 }; static const u32 cursor_formats[] = { DRM_FORMAT_ARGB8888 }; -static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, - struct drm_plane *plane, - unsigned long possible_crtcs) +static int get_plane_formats(const struct drm_plane *plane, + const struct dc_plane_cap *plane_cap, + uint32_t *formats, int max_formats) { - int res = -EPERM; + int i, num_formats = 0; + + /* + * TODO: Query support for each group of formats directly from + * DC plane caps. This will require adding more formats to the + * caps list. + */ switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: - res = drm_universal_plane_init( - dm->adev->ddev, - plane, - possible_crtcs, - &dm_plane_funcs, - rgb_formats, - ARRAY_SIZE(rgb_formats), - NULL, plane->type, NULL); + for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = rgb_formats[i]; + } + + if (plane_cap && plane_cap->pixel_format_support.nv12) + formats[num_formats++] = DRM_FORMAT_NV12; break; + case DRM_PLANE_TYPE_OVERLAY: - res = drm_universal_plane_init( - dm->adev->ddev, - plane, - possible_crtcs, - &dm_plane_funcs, - overlay_formats, - ARRAY_SIZE(overlay_formats), - NULL, plane->type, NULL); + for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = overlay_formats[i]; + } break; + case DRM_PLANE_TYPE_CURSOR: - res = drm_universal_plane_init( - dm->adev->ddev, - plane, - possible_crtcs, - &dm_plane_funcs, - cursor_formats, - ARRAY_SIZE(cursor_formats), - NULL, plane->type, NULL); + for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = cursor_formats[i]; + } break; } - /* TODO: Check DC plane caps explicitly here for adding propertes */ - if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + return num_formats; +} + +static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct dc_plane_cap *plane_cap) +{ + uint32_t formats[32]; + int num_formats; + int res = -EPERM; + + num_formats = get_plane_formats(plane, plane_cap, formats, + ARRAY_SIZE(formats)); + + res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs, + &dm_plane_funcs, formats, num_formats, + NULL, plane->type, NULL); + if (res) + return res; + + if (plane->type == DRM_PLANE_TYPE_OVERLAY && + plane_cap && plane_cap->per_pixel_alpha) { unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | BIT(DRM_MODE_BLEND_PREMULTI); @@ -3937,14 +4353,25 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_create_blend_mode_property(plane, blend_caps); } + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + plane_cap && plane_cap->pixel_format_support.nv12) { + /* This only affects YUV formats. */ + drm_plane_create_color_properties( + plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); + } + drm_plane_helper_add(plane, &dm_plane_helper_funcs); /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); - - return res; + return 0; } static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, @@ -3961,7 +4388,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, goto fail; cursor_plane->type = DRM_PLANE_TYPE_CURSOR; - res = amdgpu_dm_plane_init(dm, cursor_plane, 0); + res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); if (!acrtc) @@ -4536,9 +4963,13 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, x = plane->state->crtc_x; y = plane->state->crtc_y; - /* avivo cursor are offset into the total surface */ - x += crtc->primary->state->src_x >> 16; - y += crtc->primary->state->src_y >> 16; + + if (crtc->primary->state) { + /* avivo cursor are offset into the total surface */ + x += crtc->primary->state->src_x >> 16; + y += crtc->primary->state->src_y >> 16; + } + if (x < 0) { xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); x = 0; @@ -4596,6 +5027,7 @@ static void handle_cursor_update(struct drm_plane *plane, amdgpu_crtc->cursor_width = plane->state->crtc_w; amdgpu_crtc->cursor_height = plane->state->crtc_h; + memset(&attributes, 0, sizeof(attributes)); attributes.address.high_part = upper_32_bits(address); attributes.address.low_part = lower_32_bits(address); attributes.width = plane->state->crtc_w; @@ -4644,9 +5076,10 @@ static void update_freesync_state_on_stream( struct dc_plane_state *surface, u32 flip_timestamp_in_us) { - struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; + struct mod_vrr_params vrr_params; struct dc_info_packet vrr_infopacket = {0}; - struct mod_freesync_config config = new_crtc_state->freesync_config; + struct amdgpu_device *adev = dm->adev; + unsigned long flags; if (!new_stream) return; @@ -4659,19 +5092,8 @@ static void update_freesync_state_on_stream( if (!new_stream->timing.h_total || !new_stream->timing.v_total) return; - if (new_crtc_state->vrr_supported && - config.min_refresh_in_uhz && - config.max_refresh_in_uhz) { - config.state = new_crtc_state->base.vrr_enabled ? - VRR_STATE_ACTIVE_VARIABLE : - VRR_STATE_INACTIVE; - } else { - config.state = VRR_STATE_UNSUPPORTED; - } - - mod_freesync_build_vrr_params(dm->freesync_module, - new_stream, - &config, &vrr_params); + spin_lock_irqsave(&adev->ddev->event_lock, flags); + vrr_params = new_crtc_state->vrr_params; if (surface) { mod_freesync_handle_preflip( @@ -4680,6 +5102,12 @@ static void update_freesync_state_on_stream( new_stream, flip_timestamp_in_us, &vrr_params); + + if (adev->family < AMDGPU_FAMILY_AI && + amdgpu_dm_vrr_active(new_crtc_state)) { + mod_freesync_handle_v_update(dm->freesync_module, + new_stream, &vrr_params); + } } mod_freesync_build_vrr_infopacket( @@ -4711,6 +5139,100 @@ static void update_freesync_state_on_stream( new_crtc_state->base.crtc->base.id, (int)new_crtc_state->base.vrr_enabled, (int)vrr_params.state); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); +} + +static void pre_update_freesync_state_on_stream( + struct amdgpu_display_manager *dm, + struct dm_crtc_state *new_crtc_state) +{ + struct dc_stream_state *new_stream = new_crtc_state->stream; + struct mod_vrr_params vrr_params; + struct mod_freesync_config config = new_crtc_state->freesync_config; + struct amdgpu_device *adev = dm->adev; + unsigned long flags; + + if (!new_stream) + return; + + /* + * TODO: Determine why min/max totals and vrefresh can be 0 here. + * For now it's sufficient to just guard against these conditions. + */ + if (!new_stream->timing.h_total || !new_stream->timing.v_total) + return; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + vrr_params = new_crtc_state->vrr_params; + + if (new_crtc_state->vrr_supported && + config.min_refresh_in_uhz && + config.max_refresh_in_uhz) { + config.state = new_crtc_state->base.vrr_enabled ? + VRR_STATE_ACTIVE_VARIABLE : + VRR_STATE_INACTIVE; + } else { + config.state = VRR_STATE_UNSUPPORTED; + } + + mod_freesync_build_vrr_params(dm->freesync_module, + new_stream, + &config, &vrr_params); + + new_crtc_state->freesync_timing_changed |= + (memcmp(&new_crtc_state->vrr_params.adjust, + &vrr_params.adjust, + sizeof(vrr_params.adjust)) != 0); + + new_crtc_state->vrr_params = vrr_params; + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); +} + +static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, + struct dm_crtc_state *new_state) +{ + bool old_vrr_active = amdgpu_dm_vrr_active(old_state); + bool new_vrr_active = amdgpu_dm_vrr_active(new_state); + + if (!old_vrr_active && new_vrr_active) { + /* Transition VRR inactive -> active: + * While VRR is active, we must not disable vblank irq, as a + * reenable after disable would compute bogus vblank/pflip + * timestamps if it likely happened inside display front-porch. + * + * We also need vupdate irq for the actual core vblank handling + * at end of vblank. + */ + dm_set_vupdate_irq(new_state->base.crtc, true); + drm_crtc_vblank_get(new_state->base.crtc); + DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", + __func__, new_state->base.crtc->base.id); + } else if (old_vrr_active && !new_vrr_active) { + /* Transition VRR active -> inactive: + * Allow vblank irq disable again for fixed refresh rate. + */ + dm_set_vupdate_irq(new_state->base.crtc, false); + drm_crtc_vblank_put(new_state->base.crtc); + DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", + __func__, new_state->base.crtc->base.id); + } +} + +static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + int i; + + /* + * TODO: Make this per-stream so we don't issue redundant updates for + * commits with multiple streams. + */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) + if (plane->type == DRM_PLANE_TYPE_CURSOR) + handle_cursor_update(plane, old_plane_state); } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, @@ -4734,11 +5256,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, unsigned long flags; struct amdgpu_bo *abo; uint64_t tiling_flags; - uint32_t target, target_vblank; - uint64_t last_flip_vblank; - bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; + uint32_t target_vblank, last_flip_vblank; + bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool pflip_present = false; - struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -4754,12 +5274,19 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, goto cleanup; } + /* + * Disable the cursor first if we're disabling all the planes. + * It'll remain on the screen after the planes are re-enabled + * if we don't. + */ + if (acrtc_state->active_planes == 0) + amdgpu_dm_commit_cursors(state); + /* update planes when needed */ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state; struct drm_framebuffer *fb = new_plane_state->fb; - struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); bool plane_needs_flip; struct dc_plane_state *dc_plane; struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); @@ -4783,27 +5310,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; } + fill_dc_scaling_info(new_plane_state, + &bundle->scaling_infos[planes_count]); - bundle->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality; - bundle->scaling_infos[planes_count].src_rect = dc_plane->src_rect; - bundle->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect; - bundle->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect; - bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count]; - - - bundle->plane_infos[planes_count].color_space = dc_plane->color_space; - bundle->plane_infos[planes_count].format = dc_plane->format; - bundle->plane_infos[planes_count].plane_size = dc_plane->plane_size; - bundle->plane_infos[planes_count].rotation = dc_plane->rotation; - bundle->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror; - bundle->plane_infos[planes_count].stereo_format = dc_plane->stereo_format; - bundle->plane_infos[planes_count].tiling_info = dc_plane->tiling_info; - bundle->plane_infos[planes_count].visible = dc_plane->visible; - bundle->plane_infos[planes_count].global_alpha = dc_plane->global_alpha; - bundle->plane_infos[planes_count].global_alpha_value = dc_plane->global_alpha_value; - bundle->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha; - bundle->plane_infos[planes_count].dcc = dc_plane->dcc; - bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; + bundle->surface_updates[planes_count].scaling_info = + &bundle->scaling_infos[planes_count]; plane_needs_flip = old_plane_state->fb && new_plane_state->fb; @@ -4814,32 +5325,40 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, continue; } + abo = gem_to_amdgpu_bo(fb->obj[0]); + + /* + * Wait for all fences on this FB. Do limited wait to avoid + * deadlock during GPU reset when this fence will not signal + * but we hold reservation lock for the BO. + */ + r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true, + false, + msecs_to_jiffies(5000)); + if (unlikely(r <= 0)) + DRM_ERROR("Waiting for fences timed out or interrupted!"); + /* * TODO This might fail and hence better not used, wait * explicitly on fences instead * and in general should be called for * blocking commit to as per framework helpers */ - abo = gem_to_amdgpu_bo(fb->obj[0]); r = amdgpu_bo_reserve(abo, true); - if (unlikely(r != 0)) { + if (unlikely(r != 0)) DRM_ERROR("failed to reserve buffer before flip\n"); - WARN_ON(1); - } - - /* Wait for all fences on this FB */ - WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false, - MAX_SCHEDULE_TIMEOUT) < 0); amdgpu_bo_get_tiling_flags(abo, &tiling_flags); amdgpu_bo_unreserve(abo); - fill_plane_tiling_attributes(dm->adev, afb, dc_plane, - &bundle->plane_infos[planes_count].tiling_info, - &bundle->plane_infos[planes_count].dcc, - &bundle->flip_addrs[planes_count].address, - tiling_flags); + fill_dc_plane_info_and_addr( + dm->adev, new_plane_state, tiling_flags, + &bundle->plane_infos[planes_count], + &bundle->flip_addrs[planes_count].address); + + bundle->surface_updates[planes_count].plane_info = + &bundle->plane_infos[planes_count]; bundle->flip_addrs[planes_count].flip_immediate = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; @@ -4880,7 +5399,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * clients using the GLX_OML_sync_control extension or * DRI3/Present extension with defined target_msc. */ - last_flip_vblank = drm_crtc_vblank_count(pcrtc); + last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id); } else { /* For variable refresh rate mode only: @@ -4896,11 +5415,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - target = (uint32_t)last_flip_vblank + wait_for_vblank; - - /* Prepare wait for target vblank early - before the fence-waits */ - target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) + - amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id); + target_vblank = last_flip_vblank + wait_for_vblank; /* * Wait until we're out of the vertical blank period before the one @@ -4940,7 +5455,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, } } - if (planes_count) { + /* Update the planes if changed or disable if we don't have any. */ + if (planes_count || acrtc_state->active_planes == 0) { if (new_pcrtc_state->mode_changed) { bundle->stream_update.src = acrtc_state->stream->src; bundle->stream_update.dst = acrtc_state->stream->dst; @@ -4963,15 +5479,72 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, mutex_unlock(&dm->dc_lock); } - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) - if (plane->type == DRM_PLANE_TYPE_CURSOR) - handle_cursor_update(plane, old_plane_state); + /* + * Update cursor state *after* programming all the planes. + * This avoids redundant programming in the case where we're going + * to be disabling a single plane - those pipes are being disabled. + */ + if (acrtc_state->active_planes) + amdgpu_dm_commit_cursors(state); cleanup: kfree(bundle); } /* + * Enable interrupts on CRTCs that are newly active, undergone + * a modeset, or have active planes again. + * + * Done in two passes, based on the for_modeset flag: + * Pass 1: For CRTCs going through modeset + * Pass 2: For CRTCs going from 0 to n active planes + * + * Interrupts can only be enabled after the planes are programmed, + * so this requires a two-pass approach since we don't want to + * just defer the interrupts until after commit planes every time. + */ +static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev, + struct drm_atomic_state *state, + bool for_modeset) +{ + struct amdgpu_device *adev = dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + int i; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + struct dm_crtc_state *dm_old_crtc_state = + to_dm_crtc_state(old_crtc_state); + bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state); + bool run_pass; + + run_pass = (for_modeset && modeset) || + (!for_modeset && !modeset && + !dm_old_crtc_state->interrupts_enabled); + + if (!run_pass) + continue; + + if (!dm_new_crtc_state->interrupts_enabled) + continue; + + manage_dm_interrupts(adev, acrtc, true); + +#ifdef CONFIG_DEBUG_FS + /* The stream has changed so CRC capture needs to re-enabled. */ + if (dm_new_crtc_state->crc_enabled) { + dm_new_crtc_state->crc_enabled = false; + amdgpu_dm_crtc_set_crc_source(crtc, "auto"); + } +#endif + } +} + +/* * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC * @crtc_state: the DRM CRTC state * @stream_state: the DC stream state. @@ -4995,30 +5568,41 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, int i; /* - * We evade vblanks and pflips on crtc that - * should be changed. We do it here to flush & disable - * interrupts before drm_swap_state is called in drm_atomic_helper_commit - * it will update crtc->dm_crtc_state->stream pointer which is used in - * the ISRs. + * We evade vblank and pflip interrupts on CRTCs that are undergoing + * a modeset, being disabled, or have no active planes. + * + * It's done in atomic commit rather than commit tail for now since + * some of these interrupt handlers access the current CRTC state and + * potentially the stream pointer itself. + * + * Since the atomic state is swapped within atomic commit and not within + * commit tail this would leave to new state (that hasn't been committed yet) + * being accesssed from within the handlers. + * + * TODO: Fix this so we can do this in commit tail and not have to block + * in atomic check. */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - if (drm_atomic_crtc_needs_modeset(new_crtc_state) - && dm_old_crtc_state->stream) { + if (dm_old_crtc_state->interrupts_enabled && + (!dm_new_crtc_state->interrupts_enabled || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { /* - * If the stream is removed and CRC capture was - * enabled on the CRTC the extra vblank reference - * needs to be dropped since CRC capture will be - * disabled. + * Drop the extra vblank reference added by CRC + * capture if applicable. */ - if (!dm_new_crtc_state->stream - && dm_new_crtc_state->crc_enabled) { + if (dm_new_crtc_state->crc_enabled) drm_crtc_vblank_put(crtc); + + /* + * Only keep CRC capture enabled if there's + * still a stream for the CRTC. + */ + if (!dm_new_crtc_state->stream) dm_new_crtc_state->crc_enabled = false; - } manage_dm_interrupts(adev, acrtc, false); } @@ -5065,7 +5649,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_state = dm_state->context; } else { /* No state changes, retain current state. */ - dc_state_temp = dc_create_state(); + dc_state_temp = dc_create_state(dm->dc); ASSERT(dc_state_temp); dc_state = dc_state_temp; dc_resource_state_copy_construct_current(dm->dc, dc_state); @@ -5234,36 +5818,26 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) mutex_unlock(&dm->dc_lock); } + /* Count number of newly disabled CRTCs for dropping PM refs later. */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - /* - * loop to enable interrupts on newly arrived crtc - */ - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - bool modeset_needed; - + new_crtc_state, i) { if (old_crtc_state->active && !new_crtc_state->active) crtc_disable_count++; dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - modeset_needed = modeset_required( - new_crtc_state, - dm_new_crtc_state->stream, - dm_old_crtc_state->stream); - if (dm_new_crtc_state->stream == NULL || !modeset_needed) - continue; - - manage_dm_interrupts(adev, acrtc, true); + /* Update freesync active state. */ + pre_update_freesync_state_on_stream(dm, dm_new_crtc_state); -#ifdef CONFIG_DEBUG_FS - /* The stream has changed so CRC capture needs to re-enabled. */ - if (dm_new_crtc_state->crc_enabled) - amdgpu_dm_crtc_set_crc_source(crtc, "auto"); -#endif + /* Handle vrr on->off / off->on transitions */ + amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, + dm_new_crtc_state); } + /* Enable interrupts for CRTCs going through a modeset. */ + amdgpu_dm_enable_crtc_interrupts(dev, state, true); + for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) wait_for_vblank = false; @@ -5277,6 +5851,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm, crtc, wait_for_vblank); } + /* Enable interrupts for CRTCs going from 0 to n active planes. */ + amdgpu_dm_enable_crtc_interrupts(dev, state, false); /* * send vblank event on all events not handled in flip and @@ -5462,9 +6038,11 @@ static void get_freesync_config_for_crtc( struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); struct drm_display_mode *mode = &new_crtc_state->base.mode; + int vrefresh = drm_mode_vrefresh(mode); new_crtc_state->vrr_supported = new_con_state->freesync_capable && - aconnector->min_vfreq <= drm_mode_vrefresh(mode); + vrefresh >= aconnector->min_vfreq && + vrefresh <= aconnector->max_vfreq; if (new_crtc_state->vrr_supported) { new_crtc_state->stream->ignore_msa_timing_param = true; @@ -5514,21 +6092,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; - struct drm_plane_state *new_plane_state = NULL; new_stream = NULL; dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); - - new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); - - if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { - ret = -EINVAL; - goto fail; - } - aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); /* TODO This hack should go away */ @@ -5718,6 +6287,69 @@ fail: return ret; } +static bool should_reset_plane(struct drm_atomic_state *state, + struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state) +{ + struct drm_plane *other; + struct drm_plane_state *old_other_state, *new_other_state; + struct drm_crtc_state *new_crtc_state; + int i; + + /* + * TODO: Remove this hack once the checks below are sufficient + * enough to determine when we need to reset all the planes on + * the stream. + */ + if (state->allow_modeset) + return true; + + /* Exit early if we know that we're adding or removing the plane. */ + if (old_plane_state->crtc != new_plane_state->crtc) + return true; + + /* old crtc == new_crtc == NULL, plane not in context. */ + if (!new_plane_state->crtc) + return false; + + new_crtc_state = + drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); + + if (!new_crtc_state) + return true; + + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) + return true; + + /* + * If there are any new primary or overlay planes being added or + * removed then the z-order can potentially change. To ensure + * correct z-order and pipe acquisition the current DC architecture + * requires us to remove and recreate all existing planes. + * + * TODO: Come up with a more elegant solution for this. + */ + for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { + if (other->type == DRM_PLANE_TYPE_CURSOR) + continue; + + if (old_other_state->crtc != new_plane_state->crtc && + new_other_state->crtc != new_plane_state->crtc) + continue; + + if (old_other_state->crtc != new_other_state->crtc) + return true; + + /* TODO: Remove this once we can handle fast format changes. */ + if (old_other_state->fb && new_other_state->fb && + old_other_state->fb->format != new_other_state->fb->format) + return true; + } + + return false; +} + static int dm_update_plane_state(struct dc *dc, struct drm_atomic_state *state, struct drm_plane *plane, @@ -5732,8 +6364,7 @@ static int dm_update_plane_state(struct dc *dc, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; - /* TODO return page_flip_needed() function */ - bool pflip_needed = !state->allow_modeset; + bool needs_reset; int ret = 0; @@ -5746,10 +6377,12 @@ static int dm_update_plane_state(struct dc *dc, if (plane->type == DRM_PLANE_TYPE_CURSOR) return 0; + needs_reset = should_reset_plane(state, plane, old_plane_state, + new_plane_state); + /* Remove any changed/removed planes */ if (!enable) { - if (pflip_needed && - plane->type != DRM_PLANE_TYPE_OVERLAY) + if (!needs_reset) return 0; if (!old_plane_crtc) @@ -5800,7 +6433,7 @@ static int dm_update_plane_state(struct dc *dc, if (!dm_new_crtc_state->stream) return 0; - if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY) + if (!needs_reset) return 0; WARN_ON(dm_new_plane_state->dc_state); @@ -5812,7 +6445,7 @@ static int dm_update_plane_state(struct dc *dc, DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", plane->base.id, new_plane_crtc->base.id); - ret = fill_plane_attributes( + ret = fill_dc_plane_attributes( new_plane_crtc->dev->dev_private, dc_new_plane_state, new_plane_state, @@ -5860,10 +6493,11 @@ static int dm_update_plane_state(struct dc *dc, } static int -dm_determine_update_type_for_commit(struct dc *dc, +dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, struct drm_atomic_state *state, enum surface_update_type *out_type) { + struct dc *dc = dm->dc; struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; int i, j, num_plane, ret = 0; struct drm_plane_state *old_plane_state, *new_plane_state; @@ -5877,21 +6511,22 @@ dm_determine_update_type_for_commit(struct dc *dc, struct dc_stream_status *status = NULL; struct dc_surface_update *updates; - struct dc_plane_state *surface; enum surface_update_type update_type = UPDATE_TYPE_FAST; updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); - surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL); - if (!updates || !surface) { - DRM_ERROR("Plane or surface update failed to allocate"); + if (!updates) { + DRM_ERROR("Failed to allocate plane updates\n"); /* Set type to FULL to avoid crashing in DC*/ update_type = UPDATE_TYPE_FULL; goto cleanup; } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct dc_stream_update stream_update = { 0 }; + struct dc_scaling_info scaling_info; + struct dc_stream_update stream_update; + + memset(&stream_update, 0, sizeof(stream_update)); new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -5919,23 +6554,12 @@ dm_determine_update_type_for_commit(struct dc *dc, goto cleanup; } - if (!state->allow_modeset) - continue; - if (crtc != new_plane_crtc) continue; - updates[num_plane].surface = &surface[num_plane]; + updates[num_plane].surface = new_dm_plane_state->dc_state; if (new_crtc_state->mode_changed) { - updates[num_plane].surface->src_rect = - new_dm_plane_state->dc_state->src_rect; - updates[num_plane].surface->dst_rect = - new_dm_plane_state->dc_state->dst_rect; - updates[num_plane].surface->rotation = - new_dm_plane_state->dc_state->rotation; - updates[num_plane].surface->in_transfer_func = - new_dm_plane_state->dc_state->in_transfer_func; stream_update.dst = new_dm_crtc_state->stream->dst; stream_update.src = new_dm_crtc_state->stream->src; } @@ -5951,6 +6575,13 @@ dm_determine_update_type_for_commit(struct dc *dc, new_dm_crtc_state->stream->out_transfer_func; } + ret = fill_dc_scaling_info(new_plane_state, + &scaling_info); + if (ret) + goto cleanup; + + updates[num_plane].scaling_info = &scaling_info; + num_plane++; } @@ -5970,8 +6601,14 @@ dm_determine_update_type_for_commit(struct dc *dc, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); + /* + * TODO: DC modifies the surface during this call so we need + * to lock here - find a way to do this without locking. + */ + mutex_lock(&dm->dc_lock); update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, &stream_update, status); + mutex_unlock(&dm->dc_lock); if (update_type > UPDATE_TYPE_MED) { update_type = UPDATE_TYPE_FULL; @@ -5981,7 +6618,6 @@ dm_determine_update_type_for_commit(struct dc *dc, cleanup: kfree(updates); - kfree(surface); *out_type = update_type; return ret; @@ -6165,7 +6801,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } - ret = dm_determine_update_type_for_commit(dc, state, &update_type); + ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type); if (ret) goto fail; @@ -6180,9 +6816,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST) WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL"); - else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST) - WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST"); - if (overall_update_type > UPDATE_TYPE_FAST) { ret = dm_atomic_get_state(state, &dm_state); @@ -6193,7 +6826,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; - if (dc_validate_global_state(dc, dm_state->context) != DC_OK) { + if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 773ef5ca8441..978ff14a7d45 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -182,6 +182,15 @@ struct amdgpu_display_manager { struct common_irq_params vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; + /** + * @vupdate_params: + * + * Vertical update IRQ parameters, passed to registered handlers when + * triggered. + */ + struct common_irq_params + vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; + spinlock_t irq_handler_list_table_lock; struct backlight_device *backlight_dev; @@ -262,6 +271,9 @@ struct dm_crtc_state { struct drm_crtc_state base; struct dc_stream_state *stream; + int active_planes; + bool interrupts_enabled; + int crc_skip_count; bool crc_enabled; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 1a9e3d3dfa38..1d5fc5ad3bee 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -995,6 +995,35 @@ static const struct drm_info_list amdgpu_dm_debugfs_list[] = { {"amdgpu_target_backlight_pwm", &target_backlight_read}, }; +/* + * Sets the DC visual confirm debug option from the given string. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm + */ +static int visual_confirm_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val; + + return 0; +} + +/* + * Reads the DC visual confirm debug option value into the given buffer. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm + */ +static int visual_confirm_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.visual_confirm; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get, + visual_confirm_set, "%llu\n"); + int dtn_debugfs_init(struct amdgpu_device *adev) { static const struct file_operations dtn_log_fops = { @@ -1020,5 +1049,13 @@ int dtn_debugfs_init(struct amdgpu_device *adev) adev, &dtn_log_fops); - return PTR_ERR_OR_ZERO(ent); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + ent = debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, + adev, &visual_confirm_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index cd10f77cdeb0..fd22b4474dbf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -674,11 +674,30 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, __func__); } +static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_VUPDATE, + __func__); +} + static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { .set = amdgpu_dm_set_crtc_irq_state, .process = amdgpu_dm_irq_handler, }; +static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = { + .set = amdgpu_dm_set_vupdate_irq_state, + .process = amdgpu_dm_irq_handler, +}; + static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { .set = amdgpu_dm_set_pflip_irq_state, .process = amdgpu_dm_irq_handler, @@ -695,6 +714,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) adev->crtc_irq.num_types = adev->mode_info.num_crtc; adev->crtc_irq.funcs = &dm_crtc_irq_funcs; + adev->vupdate_irq.num_types = adev->mode_info.num_crtc; + adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs; + adev->pageflip_irq.num_types = adev->mode_info.num_crtc; adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 1e23ddc7d088..1b4b51657f5e 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -544,28 +544,28 @@ static void calc_wm_sets_and_perf_params( v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8; dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = v->stutter_exit_watermark * 1000; - context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = v->stutter_enter_plus_exit_watermark * 1000; - context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = v->dram_clock_change_watermark * 1000; - context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000; v->dcfclk_per_state[1] = v->dcfclkv_nom0p8; v->dcfclk_per_state[0] = v->dcfclkv_nom0p8; v->dcfclk = v->dcfclkv_nom0p8; dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = v->stutter_exit_watermark * 1000; - context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = v->stutter_enter_plus_exit_watermark * 1000; - context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = v->dram_clock_change_watermark * 1000; - context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000; } if (v->voltage_level < 3) { @@ -579,14 +579,14 @@ static void calc_wm_sets_and_perf_params( v->dcfclk = v->dcfclkv_max0p9; dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = v->stutter_exit_watermark * 1000; - context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = v->stutter_enter_plus_exit_watermark * 1000; - context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = v->dram_clock_change_watermark * 1000; - context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000; } v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8; @@ -599,20 +599,20 @@ static void calc_wm_sets_and_perf_params( v->dcfclk = v->dcfclk_per_state[v->voltage_level]; dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = v->stutter_exit_watermark * 1000; - context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = v->stutter_enter_plus_exit_watermark * 1000; - context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = v->dram_clock_change_watermark * 1000; - context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; if (v->voltage_level >= 2) { - context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a; - context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; } if (v->voltage_level >= 3) - context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; } #endif @@ -701,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, bool dcn_validate_bandwidth( struct dc *dc, - struct dc_state *context) + struct dc_state *context, + bool fast_validate) { + /* + * we want a breakdown of the various stages of validation, which the + * perf_trace macro doesn't support + */ + BW_VAL_TRACE_SETUP(); + const struct resource_pool *pool = dc->res_pool; struct dcn_bw_internal_vars *v = &context->dcn_bw_vars; int i, input_idx; @@ -711,6 +718,9 @@ bool dcn_validate_bandwidth( float bw_limit; PERFORMANCE_TRACE_START(); + + BW_VAL_TRACE_COUNT(); + if (dcn_bw_apply_registry_override(dc)) dcn_bw_sync_calcs_and_dml(dc); @@ -1008,13 +1018,16 @@ bool dcn_validate_bandwidth( dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f; if (dc->debug.sr_exit_time_dpm0_ns) v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f; - dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time; - dc->dml.soc.sr_exit_time_us = v->sr_exit_time; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time; + context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time; mode_support_and_system_configuration(v); } - if (v->voltage_level != 5) { + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (v->voltage_level != number_of_states_plus_one && !fast_validate) { float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; + if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65; else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72) @@ -1035,58 +1048,60 @@ bool dcn_validate_bandwidth( */ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = v->stutter_exit_watermark * 1000; - context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = v->stutter_enter_plus_exit_watermark * 1000; - context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = v->dram_clock_change_watermark * 1000; - context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; - context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a; - context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a; - context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; - context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / + context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / (ddr4_dram_factor_single_Channel * v->number_of_channels)); if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) { - context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); + context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); } - context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); - context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000); + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); + context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000); - context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000); + context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000); if (dc->debug.max_disp_clk == true) - context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000); + context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000); - if (context->bw.dcn.clk.dispclk_khz < + if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) { - context->bw.dcn.clk.dispclk_khz = + context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; } - context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio; - context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level]; + context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio; + context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level]; switch (v->voltage_level) { case 0: - context->bw.dcn.clk.max_supported_dppclk_khz = + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000); break; case 1: - context->bw.dcn.clk.max_supported_dppclk_khz = + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000); break; case 2: - context->bw.dcn.clk.max_supported_dppclk_khz = + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000); break; default: - context->bw.dcn.clk.max_supported_dppclk_khz = + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000); break; } + BW_VAL_TRACE_END_WATERMARKS(); + for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1177,13 +1192,17 @@ bool dcn_validate_bandwidth( input_idx++; } + } else if (v->voltage_level == number_of_states_plus_one) { + BW_VAL_TRACE_SKIP(fail); + } else if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); } if (v->voltage_level == 0) { - dc->dml.soc.sr_enter_plus_exit_time_us = + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time; - dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time; } /* @@ -1196,6 +1215,7 @@ bool dcn_validate_bandwidth( kernel_fpu_end(); PERFORMANCE_TRACE_END(); + BW_VAL_TRACE_FINISH(); if (bw_limit_pass && v->voltage_level != 5) return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c7415772e280..dda10b1f8574 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -584,6 +584,28 @@ void dc_link_set_test_pattern(struct dc_link *link, cust_pattern_size); } +uint32_t dc_link_bandwidth_kbps( + const struct dc_link *link, + const struct dc_link_settings *link_setting) +{ + uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */ + + link_bw_kbps *= 8; /* 8 bits per byte*/ + link_bw_kbps *= link_setting->lane_count; + + return link_bw_kbps; + +} + +const struct dc_link_settings *dc_link_get_link_cap( + const struct dc_link *link) +{ + if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) + return &link->preferred_link_setting; + return &link->verified_link_cap; +} + static void destruct(struct dc *dc) { dc_release_state(dc->current_state); @@ -632,6 +654,8 @@ static bool construct(struct dc *dc, #endif enum dce_version dc_version = DCE_VERSION_UNKNOWN; + dc->config = init_params->flags; + memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); @@ -681,13 +705,6 @@ static bool construct(struct dc *dc, dc_ctx->dc_stream_id_count = 0; dc->ctx = dc_ctx; - dc->current_state = dc_create_state(); - - if (!dc->current_state) { - dm_error("%s: failed to create validate ctx\n", __func__); - goto fail; - } - /* Create logger */ dc_ctx->dce_environment = init_params->dce_environment; @@ -739,6 +756,18 @@ static bool construct(struct dc *dc, if (!dc->res_pool) goto fail; + /* Creation of current_state must occur after dc->dml + * is initialized in dc_create_resource_pool because + * on creation it copies the contents of dc->dml + */ + + dc->current_state = dc_create_state(dc); + + if (!dc->current_state) { + dm_error("%s: failed to create validate ctx\n", __func__); + goto fail; + } + dc_resource_state_construct(dc, dc->current_state); if (!create_links(dc, init_params->num_virtual_links)) @@ -755,7 +784,7 @@ fail: static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; - struct dc_state *dangling_context = dc_create_state(); + struct dc_state *dangling_context = dc_create_state(dc); struct dc_state *current_ctx; if (dangling_context == NULL) @@ -820,8 +849,6 @@ struct dc *dc_create(const struct dc_init_data *init_params) if (dc->res_pool->dmcu != NULL) dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; - dc->config = init_params->flags; - dc->build_id = DC_BUILD_ID; DC_LOG_DC("Display Core initialized\n"); @@ -1213,18 +1240,60 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) return true; } -struct dc_state *dc_create_state(void) +struct dc_state *dc_create_state(struct dc *dc) { struct dc_state *context = kzalloc(sizeof(struct dc_state), GFP_KERNEL); if (!context) return NULL; + /* Each context must have their own instance of VBA and in order to + * initialize and obtain IP and SOC the base DML instance from DC is + * initially copied into every context + */ +#ifdef CONFIG_DRM_AMD_DC_DCN1_0 + memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); +#endif kref_init(&context->refcount); + return context; } +struct dc_state *dc_copy_state(struct dc_state *src_ctx) +{ + int i, j; + struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state), + GFP_KERNEL); + + if (!new_ctx) + return NULL; + + memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; + + if (cur_pipe->top_pipe) + cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; + + if (cur_pipe->bottom_pipe) + cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; + + } + + for (i = 0; i < new_ctx->stream_count; i++) { + dc_stream_retain(new_ctx->streams[i]); + for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) + dc_plane_state_retain( + new_ctx->stream_status[i].plane_states[j]); + } + + kref_init(&new_ctx->refcount); + + return new_ctx; +} + void dc_retain_state(struct dc_state *context) { kref_get(&context->refcount); @@ -1824,7 +1893,7 @@ void dc_commit_updates_for_stream(struct dc *dc, if (update_type >= UPDATE_TYPE_FULL) { /* initialize scratch memory for building context */ - context = dc_create_state(); + context = dc_create_state(dc); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); return; @@ -2109,13 +2178,13 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) { - info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz; - info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz; - info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz; - info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz; - info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz; - info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz; - info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz; - info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz; - info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz; + info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; + info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; + info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; + info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; + info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; + info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; + info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; + info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; + info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 73d049506618..5903e7822f98 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -351,19 +351,19 @@ void context_clock_trace( DC_LOGGER_INIT(dc->ctx->logger); CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", - context->bw.dcn.clk.dispclk_khz, - context->bw.dcn.clk.dppclk_khz, - context->bw.dcn.clk.dcfclk_khz, - context->bw.dcn.clk.dcfclk_deep_sleep_khz, - context->bw.dcn.clk.fclk_khz, - context->bw.dcn.clk.socclk_khz); + context->bw_ctx.bw.dcn.clk.dispclk_khz, + context->bw_ctx.bw.dcn.clk.dppclk_khz, + context->bw_ctx.bw.dcn.clk.dcfclk_khz, + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz, + context->bw_ctx.bw.dcn.clk.fclk_khz, + context->bw_ctx.bw.dcn.clk.socclk_khz); CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", - context->bw.dcn.clk.dispclk_khz, - context->bw.dcn.clk.dppclk_khz, - context->bw.dcn.clk.dcfclk_khz, - context->bw.dcn.clk.dcfclk_deep_sleep_khz, - context->bw.dcn.clk.fclk_khz, - context->bw.dcn.clk.socclk_khz); + context->bw_ctx.bw.dcn.clk.dispclk_khz, + context->bw_ctx.bw.dcn.clk.dppclk_khz, + context->bw_ctx.bw.dcn.clk.dcfclk_khz, + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz, + context->bw_ctx.bw.dcn.clk.fclk_khz, + context->bw_ctx.bw.dcn.clk.socclk_khz); #endif } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index b39f76e61039..b37ecc3ede61 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -58,7 +58,6 @@ ******************************************************************************/ enum { - LINK_RATE_REF_FREQ_IN_MHZ = 27, PEAK_FACTOR_X1000 = 1006, /* * Some receivers fail to train on first try and are good @@ -515,6 +514,40 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin } +static void read_edp_current_link_settings_on_detect(struct dc_link *link) +{ + union lane_count_set lane_count_set = { {0} }; + uint8_t link_bw_set; + uint8_t link_rate_set; + + // Read DPCD 00101h to find out the number of lanes currently set + core_link_read_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, sizeof(lane_count_set)); + link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET; + + // Read DPCD 00100h to find if standard link rates are set + core_link_read_dpcd(link, DP_LINK_BW_SET, + &link_bw_set, sizeof(link_bw_set)); + + if (link_bw_set == 0) { + /* If standard link rates are not being used, + * Read DPCD 00115h to find the link rate set used + */ + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + link->cur_link_settings.link_rate = + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->cur_link_settings.link_rate_set = link_rate_set; + link->cur_link_settings.use_link_rate_set = true; + } + } else { + link->cur_link_settings.link_rate = link_bw_set; + link->cur_link_settings.use_link_rate_set = false; + } +} + static bool detect_dp( struct dc_link *link, struct display_sink_capability *sink_caps, @@ -649,9 +682,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return false; } - if (link->connector_signal == SIGNAL_TYPE_EDP && - link->local_sink) - return true; + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* On detect, we want to make sure current link settings are + * up to date, especially if link was powered on by GOP. + */ + read_edp_current_link_settings_on_detect(link); + if (link->local_sink) + return true; + } if (link->connector_signal == SIGNAL_TYPE_LVDS && link->local_sink) @@ -1397,9 +1435,24 @@ static enum dc_status enable_link_dp( /* get link settings for video mode timing */ decide_link_settings(stream, &link_settings); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { + /* If link settings are different than current and link already enabled + * then need to disable before programming to new rate. + */ + if (link->link_status.link_active && + (link->cur_link_settings.lane_count != link_settings.lane_count || + link->cur_link_settings.link_rate != link_settings.link_rate)) { + dp_disable_link_phy(link, pipe_ctx->stream->signal); + } + + /*in case it is not on*/ + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + } + pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - state->dccg->funcs->update_clocks(state->dccg, state, false); + state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); dp_enable_link_phy( link, @@ -1440,15 +1493,9 @@ static enum dc_status enable_link_edp( struct pipe_ctx *pipe_ctx) { enum dc_status status; - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - /*in case it is not on*/ - link->dc->hwss.edp_power_control(link, true); - link->dc->hwss.edp_wait_for_hpd_ready(link, true); status = enable_link_dp(state, pipe_ctx); - return status; } @@ -2150,7 +2197,7 @@ static bool dp_active_dongle_validate_timing( return false; } - if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk * 10)) + if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) return false; return true; @@ -2289,14 +2336,13 @@ void core_link_resume(struct dc_link *link) static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) { - struct dc_link_settings *link_settings = - &stream->link->cur_link_settings; - uint32_t link_rate_in_mbps = - link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ; - struct fixed31_32 mbps = dc_fixpt_from_int( - link_rate_in_mbps * link_settings->lane_count); - - return dc_fixpt_div_int(mbps, 54); + struct fixed31_32 mbytes_per_sec; + uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings); + link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */ + + mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec); + + return dc_fixpt_div_int(mbytes_per_sec, 54); } static int get_color_depth(enum dc_color_depth color_depth) @@ -2321,7 +2367,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) uint32_t denominator; bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth); - kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3; + kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing); /* * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 @@ -2666,12 +2712,18 @@ void core_link_enable_stream( void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; core_dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + dal_ddc_service_write_scdc_data( + stream->link->ddc, 0, + stream->timing.flags.LTE_340MCSC_SCRAMBLE); + core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); @@ -2736,3 +2788,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) } } +uint32_t dc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing) +{ + uint32_t bits_per_channel = 0; + uint32_t kbps; + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + bits_per_channel = 6; + break; + case COLOR_DEPTH_888: + bits_per_channel = 8; + break; + case COLOR_DEPTH_101010: + bits_per_channel = 10; + break; + case COLOR_DEPTH_121212: + bits_per_channel = 12; + break; + case COLOR_DEPTH_141414: + bits_per_channel = 14; + break; + case COLOR_DEPTH_161616: + bits_per_channel = 16; + break; + default: + break; + } + + ASSERT(bits_per_channel != 0); + + kbps = timing->pix_clk_100hz / 10; + kbps *= bits_per_channel; + + if (timing->flags.Y_ONLY != 1) { + /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ + kbps *= 3; + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + kbps /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + kbps = kbps * 2 / 3; + } + + return kbps; + +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 063d019a3f6f..1ee544a32ebb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1533,69 +1533,6 @@ static bool decide_fallback_link_setting( return true; } -static uint32_t bandwidth_in_kbps_from_timing( - const struct dc_crtc_timing *timing) -{ - uint32_t bits_per_channel = 0; - uint32_t kbps; - - switch (timing->display_color_depth) { - case COLOR_DEPTH_666: - bits_per_channel = 6; - break; - case COLOR_DEPTH_888: - bits_per_channel = 8; - break; - case COLOR_DEPTH_101010: - bits_per_channel = 10; - break; - case COLOR_DEPTH_121212: - bits_per_channel = 12; - break; - case COLOR_DEPTH_141414: - bits_per_channel = 14; - break; - case COLOR_DEPTH_161616: - bits_per_channel = 16; - break; - default: - break; - } - - ASSERT(bits_per_channel != 0); - - kbps = timing->pix_clk_100hz / 10; - kbps *= bits_per_channel; - - if (timing->flags.Y_ONLY != 1) { - /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ - kbps *= 3; - if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) - kbps /= 2; - else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) - kbps = kbps * 2 / 3; - } - - return kbps; - -} - -static uint32_t bandwidth_in_kbps_from_link_settings( - const struct dc_link_settings *link_setting) -{ - uint32_t link_rate_in_kbps = link_setting->link_rate * - LINK_RATE_REF_FREQ_IN_KHZ; - - uint32_t lane_count = link_setting->lane_count; - uint32_t kbps = link_rate_in_kbps; - - kbps *= lane_count; - kbps *= 8; /* 8 bits per byte*/ - - return kbps; - -} - bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing) @@ -1611,8 +1548,7 @@ bool dp_validate_mode_timing( timing->v_addressable == (uint32_t) 480) return true; - /* We always use verified link settings */ - link_setting = &link->verified_link_cap; + link_setting = dc_link_get_link_cap(link); /* TODO: DYNAMIC_VALIDATION needs to be implemented */ /*if (flags.DYNAMIC_VALIDATION == 1 && @@ -1620,8 +1556,8 @@ bool dp_validate_mode_timing( link_setting = &link->verified_link_cap; */ - req_bw = bandwidth_in_kbps_from_timing(timing); - max_bw = bandwidth_in_kbps_from_link_settings(link_setting); + req_bw = dc_bandwidth_in_kbps_from_timing(timing); + max_bw = dc_link_bandwidth_kbps(link, link_setting); if (req_bw <= max_bw) { /* remember the biggest mode here, during @@ -1656,7 +1592,8 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting */ while (current_link_setting.link_rate <= link->verified_link_cap.link_rate) { - link_bw = bandwidth_in_kbps_from_link_settings( + link_bw = dc_link_bandwidth_kbps( + link, ¤t_link_setting); if (req_bw <= link_bw) { *link_setting = current_link_setting; @@ -1707,7 +1644,8 @@ static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settin */ while (current_link_setting.link_rate <= link->verified_link_cap.link_rate) { - link_bw = bandwidth_in_kbps_from_link_settings( + link_bw = dc_link_bandwidth_kbps( + link, ¤t_link_setting); if (req_bw <= link_bw) { *link_setting = current_link_setting; @@ -1739,7 +1677,7 @@ void decide_link_settings(struct dc_stream_state *stream, struct dc_link *link; uint32_t req_bw; - req_bw = bandwidth_in_kbps_from_timing(&stream->timing); + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); link = stream->link; @@ -2247,6 +2185,30 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) return -1; } +static void read_dp_device_vendor_id(struct dc_link *link) +{ + struct dp_device_vendor_id dp_id; + + /* read IEEE branch device id */ + core_link_read_dpcd( + link, + DP_BRANCH_OUI, + (uint8_t *)&dp_id, + sizeof(dp_id)); + + link->dpcd_caps.branch_dev_id = + (dp_id.ieee_oui[0] << 16) + + (dp_id.ieee_oui[1] << 8) + + dp_id.ieee_oui[2]; + + memmove( + link->dpcd_caps.branch_dev_name, + dp_id.ieee_device_id, + sizeof(dp_id.ieee_device_id)); +} + + + static void get_active_converter_info( uint8_t data, struct dc_link *link) { @@ -2304,8 +2266,8 @@ static void get_active_converter_info( hdmi_caps = {.raw = det_caps[3] }; union dwnstream_port_caps_byte2 hdmi_color_caps = {.raw = det_caps[2] }; - link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk = - det_caps[1] * 25000; + link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = + det_caps[1] * 2500; link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; @@ -2322,7 +2284,7 @@ static void get_active_converter_info( translate_dpcd_max_bpc( hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); - if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0) + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) link->dpcd_caps.dongle_caps.extendedCapValid = true; } @@ -2333,27 +2295,6 @@ static void get_active_converter_info( ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); { - struct dp_device_vendor_id dp_id; - - /* read IEEE branch device id */ - core_link_read_dpcd( - link, - DP_BRANCH_OUI, - (uint8_t *)&dp_id, - sizeof(dp_id)); - - link->dpcd_caps.branch_dev_id = - (dp_id.ieee_oui[0] << 16) + - (dp_id.ieee_oui[1] << 8) + - dp_id.ieee_oui[2]; - - memmove( - link->dpcd_caps.branch_dev_name, - dp_id.ieee_device_id, - sizeof(dp_id.ieee_device_id)); - } - - { struct dp_sink_hw_fw_revision dp_hw_fw_revision; core_link_read_dpcd( @@ -2517,6 +2458,8 @@ static bool retrieve_link_cap(struct dc_link *link) ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - DP_DPCD_REV]; + read_dp_device_vendor_id(link); + get_active_converter_info(ds_port.byte, link); dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index f7f7515f65f4..b0dea759cd86 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -58,6 +58,8 @@ void dp_enable_link_phy( const struct dc_link_settings *link_settings) { struct link_encoder *link_enc = link->link_enc; + struct dc *core_dc = link->ctx->dc; + struct dmcu *dmcu = core_dc->res_pool->dmcu; struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; @@ -84,6 +86,9 @@ void dp_enable_link_phy( } } + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + if (dc_is_dp_sst_signal(signal)) { link_enc->funcs->enable_dp_output( link_enc, @@ -95,6 +100,10 @@ void dp_enable_link_phy( link_settings, clock_source); } + + if (dmcu != NULL && dmcu->funcs->unlock_phy) + dmcu->funcs->unlock_phy(dmcu); + link->cur_link_settings = *link_settings; dp_receiver_power_ctrl(link, true); @@ -150,15 +159,25 @@ bool edp_receiver_ready_T7(struct dc_link *link) void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) { + struct dc *core_dc = link->ctx->dc; + struct dmcu *dmcu = core_dc->res_pool->dmcu; + if (!link->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(link, false); if (signal == SIGNAL_TYPE_EDP) { link->link_enc->funcs->disable_output(link->link_enc, signal); link->dc->hwss.edp_power_control(link, false); - } else + } else { + if (dmcu != NULL && dmcu->funcs->lock_phy) + dmcu->funcs->lock_phy(dmcu); + link->link_enc->funcs->disable_output(link->link_enc, signal); + if (dmcu != NULL && dmcu->funcs->unlock_phy) + dmcu->funcs->unlock_phy(dmcu); + } + /* Clear current link setting.*/ memset(&link->cur_link_settings, 0, sizeof(link->cur_link_settings)); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index d0ed95eda508..eac7186e4f08 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1305,18 +1305,13 @@ struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx) bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx) { struct pipe_ctx *top_pipe = pipe_ctx->top_pipe; - bool result = false; + if (!top_pipe) + return false; if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp) return false; - while (top_pipe) { - if (!top_pipe->top_pipe && top_pipe->stream_res.opp != pipe_ctx->stream_res.opp) - result = true; - top_pipe = top_pipe->top_pipe; - } - - return result; + return true; } bool dc_remove_plane_from_context( @@ -2064,7 +2059,7 @@ void dc_resource_state_construct( const struct dc *dc, struct dc_state *dst_ctx) { - dst_ctx->dccg = dc->res_pool->clk_mgr; + dst_ctx->clk_mgr = dc->res_pool->clk_mgr; } /** @@ -2072,12 +2067,14 @@ void dc_resource_state_construct( * Checks HW resource availability and bandwidth requirement. * @dc: dc struct for this driver * @new_ctx: state to be validated + * @fast_validate: set to true if only yes/no to support matters * * Return: DC_OK if the result can be programmed. Otherwise, an error code. */ enum dc_status dc_validate_global_state( struct dc *dc, - struct dc_state *new_ctx) + struct dc_state *new_ctx, + bool fast_validate) { enum dc_status result = DC_ERROR_UNEXPECTED; int i, j; @@ -2132,7 +2129,7 @@ enum dc_status dc_validate_global_state( result = resource_build_scaling_params_for_context(dc, new_ctx); if (result == DC_OK) - if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx)) + if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; return result; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index f7a293902234..96e97d25d639 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -163,6 +163,27 @@ struct dc_stream_state *dc_create_stream_for_sink( return stream; } +struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) +{ + struct dc_stream_state *new_stream; + + new_stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL); + if (!new_stream) + return NULL; + + memcpy(new_stream, stream, sizeof(struct dc_stream_state)); + + if (new_stream->sink) + dc_sink_retain(new_stream->sink); + + if (new_stream->out_transfer_func) + dc_transfer_func_retain(new_stream->out_transfer_func); + + kref_init(&new_stream->refcount); + + return new_stream; +} + /** * dc_stream_get_status_from_state - Get stream status from given dc state * @state: DC state to find the stream status in @@ -211,7 +232,8 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx); - dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos); + if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos)) + return; if (vpos >= vupdate_line) return; @@ -311,7 +333,7 @@ bool dc_stream_set_cursor_position( (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || !pipe_ctx->plane_state || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || - !pipe_ctx->plane_res.ipp) + (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp)) continue; if (!pipe_to_program) { diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c09a19046fd8..70edd9ea5afe 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.24" +#define DC_VER "3.2.27" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -66,8 +66,27 @@ struct dc_plane_cap { uint32_t blends_with_above : 1; uint32_t blends_with_below : 1; uint32_t per_pixel_alpha : 1; - uint32_t supports_argb8888 : 1; - uint32_t supports_nv12 : 1; + struct { + uint32_t argb8888 : 1; + uint32_t nv12 : 1; + uint32_t fp16 : 1; + } pixel_format_support; + // max upscaling factor x1000 + // upscaling factors are always >= 1 + // for example, 1080p -> 8K is 4.0, or 4000 raw value + struct { + uint32_t argb8888; + uint32_t nv12; + uint32_t fp16; + } max_upscale_factor; + // max downscale factor x1000 + // downscale factors are always <= 1 + // for example, 8K -> 1080p is 0.25, or 250 raw value + struct { + uint32_t argb8888; + uint32_t nv12; + uint32_t fp16; + } max_downscale_factor; }; struct dc_caps { @@ -183,7 +202,9 @@ struct dc_config { bool disable_disp_pll_sharing; bool fbc_support; bool optimize_edp_link_rate; + bool disable_fractional_pwm; bool allow_seamless_boot_optimization; + bool power_down_display_on_boot; }; enum visual_confirm { @@ -226,6 +247,57 @@ struct dc_clocks { bool p_state_change_support; }; +struct dc_bw_validation_profile { + bool enable; + + unsigned long long total_ticks; + unsigned long long voltage_level_ticks; + unsigned long long watermark_ticks; + unsigned long long rq_dlg_ticks; + + unsigned long long total_count; + unsigned long long skip_fast_count; + unsigned long long skip_pass_count; + unsigned long long skip_fail_count; +}; + +#define BW_VAL_TRACE_SETUP() \ + unsigned long long end_tick = 0; \ + unsigned long long voltage_level_tick = 0; \ + unsigned long long watermark_tick = 0; \ + unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \ + dm_get_timestamp(dc->ctx) : 0 + +#define BW_VAL_TRACE_COUNT() \ + if (dc->debug.bw_val_profile.enable) \ + dc->debug.bw_val_profile.total_count++ + +#define BW_VAL_TRACE_SKIP(status) \ + if (dc->debug.bw_val_profile.enable) { \ + if (!voltage_level_tick) \ + voltage_level_tick = dm_get_timestamp(dc->ctx); \ + dc->debug.bw_val_profile.skip_ ## status ## _count++; \ + } + +#define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \ + if (dc->debug.bw_val_profile.enable) \ + voltage_level_tick = dm_get_timestamp(dc->ctx) + +#define BW_VAL_TRACE_END_WATERMARKS() \ + if (dc->debug.bw_val_profile.enable) \ + watermark_tick = dm_get_timestamp(dc->ctx) + +#define BW_VAL_TRACE_FINISH() \ + if (dc->debug.bw_val_profile.enable) { \ + end_tick = dm_get_timestamp(dc->ctx); \ + dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \ + dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \ + if (watermark_tick) { \ + dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \ + dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \ + } \ + } + struct dc_debug_options { enum visual_confirm visual_confirm; bool sanity_checks; @@ -279,6 +351,7 @@ struct dc_debug_options { unsigned int force_odm_combine; //bit vector based on otg inst unsigned int force_fclk_khz; bool disable_tri_buf; + struct dc_bw_validation_profile bw_val_profile; }; struct dc_debug_data { @@ -638,9 +711,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info); +/* + * fast_validate: we return after determining if we can support the new state, + * but before we populate the programming info + */ enum dc_status dc_validate_global_state( struct dc *dc, - struct dc_state *new_ctx); + struct dc_state *new_ctx, + bool fast_validate); void dc_resource_state_construct( @@ -669,7 +747,8 @@ void dc_resource_state_destruct(struct dc_state *context); bool dc_commit_state(struct dc *dc, struct dc_state *context); -struct dc_state *dc_create_state(void); +struct dc_state *dc_create_state(struct dc *dc); +struct dc_state *dc_copy_state(struct dc_state *src_ctx); void dc_retain_state(struct dc_state *context); void dc_release_state(struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 8fc223defed4..7b9429e30d82 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -120,6 +120,7 @@ struct dc_link { /* MST record stream using this link */ struct link_flags { bool dp_keep_receiver_powered; + bool dp_skip_DID2; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -246,10 +247,18 @@ void dc_link_set_test_pattern(struct dc_link *link, const struct link_training_settings *p_link_settings, const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); +uint32_t dc_link_bandwidth_kbps( + const struct dc_link *link, + const struct dc_link_settings *link_setting); + +const struct dc_link_settings *dc_link_get_link_cap( + const struct dc_link *link); bool dc_submit_i2c( struct dc *dc, uint32_t link_index, struct i2c_command *cmd); +uint32_t dc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 17fa3bf6cf7b..189bdab929a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -307,6 +307,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream( */ struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); +struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream); + void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index c8e2dc5ec62a..6c2a3d9a4c2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -395,7 +395,7 @@ struct dc_dongle_caps { bool is_dp_hdmi_ycbcr422_converter; bool is_dp_hdmi_ycbcr420_converter; uint32_t dp_hdmi_max_bpc; - uint32_t dp_hdmi_max_pixel_clk; + uint32_t dp_hdmi_max_pixel_clk_in_khz; }; /* Scaling format */ enum scaling_transformation { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index 6e142c2db986..963686380738 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -222,7 +222,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state( * all required clocks */ for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) - if (context->bw.dce.dispclk_khz > + if (context->bw_ctx.bw.dce.dispclk_khz > clk_mgr_dce->max_clks_by_state[i].display_clk_khz || max_pix_clk > clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz) @@ -232,7 +232,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state( if (low_req_clk > clk_mgr_dce->max_clks_state) { /* set max clock state for high phyclock, invalid on exceeding display clock */ if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz - < context->bw.dce.dispclk_khz) + < context->bw_ctx.bw.dce.dispclk_khz) low_req_clk = DM_PP_CLOCKS_STATE_INVALID; else low_req_clk = clk_mgr_dce->max_clks_state; @@ -610,22 +610,22 @@ static void dce11_pplib_apply_display_requirements( struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; pp_display_cfg->all_displays_in_sync = - context->bw.dce.all_displays_in_sync; + context->bw_ctx.bw.dce.all_displays_in_sync; pp_display_cfg->nb_pstate_switch_disable = - context->bw.dce.nbp_state_change_enable == false; + context->bw_ctx.bw.dce.nbp_state_change_enable == false; pp_display_cfg->cpu_cc6_disable = - context->bw.dce.cpuc_state_change_enable == false; + context->bw_ctx.bw.dce.cpuc_state_change_enable == false; pp_display_cfg->cpu_pstate_disable = - context->bw.dce.cpup_state_change_enable == false; + context->bw_ctx.bw.dce.cpup_state_change_enable == false; pp_display_cfg->cpu_pstate_separation_time = - context->bw.dce.blackout_recovery_time_us; + context->bw_ctx.bw.dce.blackout_recovery_time_us; - pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz + pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz / MEMORY_TYPE_MULTIPLIER_CZ; pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( dc, - context->bw.dce.sclk_khz); + context->bw_ctx.bw.dce.sclk_khz); /* * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. @@ -638,7 +638,7 @@ static void dce11_pplib_apply_display_requirements( pp_display_cfg->min_engine_clock_khz : 0; pp_display_cfg->min_engine_clock_deep_sleep_khz - = context->bw.dce.sclk_deep_sleep_khz; + = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); @@ -669,7 +669,7 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw.dce.dispclk_khz; + int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) @@ -696,7 +696,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw.dce.dispclk_khz; + int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) @@ -711,7 +711,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, } if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { - context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); + context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); @@ -723,7 +723,7 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw.dce.dispclk_khz; + int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) @@ -751,7 +751,7 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr, struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; int max_pix_clk = get_max_pixel_clock_for_all_paths(context); - int patched_disp_clk = context->bw.dce.dispclk_khz; + int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index aa586672e8cd..818536eea00a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -51,6 +51,9 @@ #define PSR_SET_WAITLOOP 0x31 #define MCP_INIT_DMCU 0x88 #define MCP_INIT_IRAM 0x89 +#define MCP_SYNC_PHY_LOCK 0x90 +#define MCP_SYNC_PHY_UNLOCK 0x91 +#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L static bool dce_dmcu_init(struct dmcu *dmcu) @@ -339,9 +342,32 @@ static void dcn10_get_dmcu_version(struct dmcu *dmcu) IRAM_RD_ADDR_AUTO_INC, 0); } +static void dcn10_dmcu_enable_fractional_pwm(struct dmcu *dmcu, + uint32_t fractional_pwm) +{ + struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + + /* Wait until microcontroller is ready to process interrupt */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); + + /* Set PWM fractional enable/disable */ + REG_WRITE(MASTER_COMM_DATA_REG1, fractional_pwm); + + /* Set command to enable or disable fractional PWM microcontroller */ + REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, + MCP_BL_SET_PWM_FRAC); + + /* Notify microcontroller of new command */ + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + + /* Ensure command has been executed before continuing */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); +} + static bool dcn10_dmcu_init(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + const struct dc_config *config = &dmcu->ctx->dc->config; bool status = false; /* Definition of DC_DMCU_SCRATCH @@ -379,9 +405,14 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) if (dmcu->dmcu_state == DMCU_RUNNING) { /* Retrieve and cache the DMCU firmware version. */ dcn10_get_dmcu_version(dmcu); + + /* Initialize DMCU to use fractional PWM or not */ + dcn10_dmcu_enable_fractional_pwm(dmcu, + (config->disable_fractional_pwm == false) ? 1 : 0); status = true; - } else + } else { status = false; + } break; case DMCU_RUNNING: @@ -690,7 +721,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu) return true; } -#endif +#endif //(CONFIG_DRM_AMD_DC_DCN1_0) static const struct dmcu_funcs dce_funcs = { .dmcu_init = dce_dmcu_init, diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index b733dc17db87..e938bf9986d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -380,7 +380,24 @@ static const struct resource_caps res_cap = { static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, - .supports_argb8888 = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = false + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } }; #define CTX ctx @@ -761,7 +778,8 @@ static enum dc_status build_mapped_resource( bool dce100_validate_bandwidth( struct dc *dc, - struct dc_state *context) + struct dc_state *context, + bool fast_validate) { int i; bool at_least_one_pipe = false; @@ -773,11 +791,11 @@ bool dce100_validate_bandwidth( if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ - context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + context->bw_ctx.bw.dce.dispclk_khz = 681000; + context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { - context->bw.dce.dispclk_khz = 0; - context->bw.dce.yclk_khz = 0; + context->bw_ctx.bw.dce.dispclk_khz = 0; + context->bw_ctx.bw.dce.yclk_khz = 0; } return true; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 1019c59c2e3b..7ac50ab1b762 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1166,8 +1166,8 @@ static void build_audio_output( if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { audio_output->pll_info.dp_dto_source_clock_in_khz = - state->dccg->funcs->get_dp_ref_clk_frequency( - state->dccg); + state->clk_mgr->funcs->get_dp_ref_clk_frequency( + state->clk_mgr); } audio_output->pll_info.feed_back_divider = @@ -1630,18 +1630,18 @@ static void dce110_set_displaymarks( dc->bw_vbios->blackout_duration, pipe_ctx->stream); pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks( pipe_ctx->plane_res.mi, - context->bw.dce.nbp_state_change_wm_ns[num_pipes], - context->bw.dce.stutter_exit_wm_ns[num_pipes], - context->bw.dce.stutter_entry_wm_ns[num_pipes], - context->bw.dce.urgent_wm_ns[num_pipes], + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes], + context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes], + context->bw_ctx.bw.dce.stutter_entry_wm_ns[num_pipes], + context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes], total_dest_line_time_ns); if (i == underlay_idx) { num_pipes++; pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks( pipe_ctx->plane_res.mi, - context->bw.dce.nbp_state_change_wm_ns[num_pipes], - context->bw.dce.stutter_exit_wm_ns[num_pipes], - context->bw.dce.urgent_wm_ns[num_pipes], + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes], + context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes], + context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes], total_dest_line_time_ns); } num_pipes++; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 50af7e17db3b..dcd04e9ea76b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -397,14 +397,48 @@ static const struct dc_plane_cap plane_cap = { .blends_with_below = true, .blends_with_above = true, .per_pixel_alpha = 1, - .supports_argb8888 = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = false + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } }; static const struct dc_plane_cap underlay_plane_cap = { .type = DC_PLANE_TYPE_DCE_UNDERLAY, .blends_with_above = true, .per_pixel_alpha = 1, - .supports_nv12 = true + + .pixel_format_support = { + .argb8888 = false, + .nv12 = true, + .fp16 = false + }, + + .max_upscale_factor = { + .argb8888 = 1, + .nv12 = 16000, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 1, + .nv12 = 250, + .fp16 = 1 + } }; #define CTX ctx @@ -869,7 +903,8 @@ static enum dc_status build_mapped_resource( static bool dce110_validate_bandwidth( struct dc *dc, - struct dc_state *context) + struct dc_state *context, + bool fast_validate) { bool result = false; @@ -883,7 +918,7 @@ static bool dce110_validate_bandwidth( dc->bw_vbios, context->res_ctx.pipe_ctx, dc->res_pool->pipe_count, - &context->bw.dce)) + &context->bw_ctx.bw.dce)) result = true; if (!result) @@ -893,8 +928,8 @@ static bool dce110_validate_bandwidth( context->streams[0]->timing.v_addressable, context->streams[0]->timing.pix_clk_100hz / 10); - if (memcmp(&dc->current_state->bw.dce, - &context->bw.dce, sizeof(context->bw.dce))) { + if (memcmp(&dc->current_state->bw_ctx.bw.dce, + &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { DC_LOG_BANDWIDTH_CALCS( "%s: finish,\n" @@ -908,34 +943,34 @@ static bool dce110_validate_bandwidth( "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" , __func__, - context->bw.dce.nbp_state_change_wm_ns[0].b_mark, - context->bw.dce.nbp_state_change_wm_ns[0].a_mark, - context->bw.dce.urgent_wm_ns[0].b_mark, - context->bw.dce.urgent_wm_ns[0].a_mark, - context->bw.dce.stutter_exit_wm_ns[0].b_mark, - context->bw.dce.stutter_exit_wm_ns[0].a_mark, - context->bw.dce.nbp_state_change_wm_ns[1].b_mark, - context->bw.dce.nbp_state_change_wm_ns[1].a_mark, - context->bw.dce.urgent_wm_ns[1].b_mark, - context->bw.dce.urgent_wm_ns[1].a_mark, - context->bw.dce.stutter_exit_wm_ns[1].b_mark, - context->bw.dce.stutter_exit_wm_ns[1].a_mark, - context->bw.dce.nbp_state_change_wm_ns[2].b_mark, - context->bw.dce.nbp_state_change_wm_ns[2].a_mark, - context->bw.dce.urgent_wm_ns[2].b_mark, - context->bw.dce.urgent_wm_ns[2].a_mark, - context->bw.dce.stutter_exit_wm_ns[2].b_mark, - context->bw.dce.stutter_exit_wm_ns[2].a_mark, - context->bw.dce.stutter_mode_enable, - context->bw.dce.cpuc_state_change_enable, - context->bw.dce.cpup_state_change_enable, - context->bw.dce.nbp_state_change_enable, - context->bw.dce.all_displays_in_sync, - context->bw.dce.dispclk_khz, - context->bw.dce.sclk_khz, - context->bw.dce.sclk_deep_sleep_khz, - context->bw.dce.yclk_khz, - context->bw.dce.blackout_recovery_time_us); + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.stutter_mode_enable, + context->bw_ctx.bw.dce.cpuc_state_change_enable, + context->bw_ctx.bw.dce.cpup_state_change_enable, + context->bw_ctx.bw.dce.nbp_state_change_enable, + context->bw_ctx.bw.dce.all_displays_in_sync, + context->bw_ctx.bw.dce.dispclk_khz, + context->bw_ctx.bw.dce.sclk_khz, + context->bw_ctx.bw.dce.sclk_deep_sleep_khz, + context->bw_ctx.bw.dce.yclk_khz, + context->bw_ctx.bw.dce.blackout_recovery_time_us); } return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 3d31c273f41f..a480b15f6885 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -399,7 +399,24 @@ static const struct resource_caps polaris_11_resource_cap = { static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, - .supports_argb8888 = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = false + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } }; #define CTX ctx @@ -809,7 +826,8 @@ static enum dc_status build_mapped_resource( bool dce112_validate_bandwidth( struct dc *dc, - struct dc_state *context) + struct dc_state *context, + bool fast_validate) { bool result = false; @@ -823,7 +841,7 @@ bool dce112_validate_bandwidth( dc->bw_vbios, context->res_ctx.pipe_ctx, dc->res_pool->pipe_count, - &context->bw.dce)) + &context->bw_ctx.bw.dce)) result = true; if (!result) @@ -831,8 +849,8 @@ bool dce112_validate_bandwidth( "%s: Bandwidth validation failed!", __func__); - if (memcmp(&dc->current_state->bw.dce, - &context->bw.dce, sizeof(context->bw.dce))) { + if (memcmp(&dc->current_state->bw_ctx.bw.dce, + &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { DC_LOG_BANDWIDTH_CALCS( "%s: finish,\n" @@ -846,34 +864,34 @@ bool dce112_validate_bandwidth( "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" , __func__, - context->bw.dce.nbp_state_change_wm_ns[0].b_mark, - context->bw.dce.nbp_state_change_wm_ns[0].a_mark, - context->bw.dce.urgent_wm_ns[0].b_mark, - context->bw.dce.urgent_wm_ns[0].a_mark, - context->bw.dce.stutter_exit_wm_ns[0].b_mark, - context->bw.dce.stutter_exit_wm_ns[0].a_mark, - context->bw.dce.nbp_state_change_wm_ns[1].b_mark, - context->bw.dce.nbp_state_change_wm_ns[1].a_mark, - context->bw.dce.urgent_wm_ns[1].b_mark, - context->bw.dce.urgent_wm_ns[1].a_mark, - context->bw.dce.stutter_exit_wm_ns[1].b_mark, - context->bw.dce.stutter_exit_wm_ns[1].a_mark, - context->bw.dce.nbp_state_change_wm_ns[2].b_mark, - context->bw.dce.nbp_state_change_wm_ns[2].a_mark, - context->bw.dce.urgent_wm_ns[2].b_mark, - context->bw.dce.urgent_wm_ns[2].a_mark, - context->bw.dce.stutter_exit_wm_ns[2].b_mark, - context->bw.dce.stutter_exit_wm_ns[2].a_mark, - context->bw.dce.stutter_mode_enable, - context->bw.dce.cpuc_state_change_enable, - context->bw.dce.cpup_state_change_enable, - context->bw.dce.nbp_state_change_enable, - context->bw.dce.all_displays_in_sync, - context->bw.dce.dispclk_khz, - context->bw.dce.sclk_khz, - context->bw.dce.sclk_deep_sleep_khz, - context->bw.dce.yclk_khz, - context->bw.dce.blackout_recovery_time_us); + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.stutter_mode_enable, + context->bw_ctx.bw.dce.cpuc_state_change_enable, + context->bw_ctx.bw.dce.cpup_state_change_enable, + context->bw_ctx.bw.dce.nbp_state_change_enable, + context->bw_ctx.bw.dce.all_displays_in_sync, + context->bw_ctx.bw.dce.dispclk_khz, + context->bw_ctx.bw.dce.sclk_khz, + context->bw_ctx.bw.dce.sclk_deep_sleep_khz, + context->bw_ctx.bw.dce.yclk_khz, + context->bw_ctx.bw.dce.blackout_recovery_time_us); } return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h index 95a403396219..1f57ebc6f9b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h @@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context( bool dce112_validate_bandwidth( struct dc *dc, - struct dc_state *context); + struct dc_state *context, + bool fast_validate); enum dc_status dce112_add_stream_to_ctx( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 01ea503faa12..6d49c7143c67 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -456,7 +456,24 @@ static const struct resource_caps res_cap = { static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, - .supports_argb8888 = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = false + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } }; static const struct dc_debug_options debug_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 066fd89747c2..27d0cc394963 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -389,7 +389,24 @@ static const struct resource_caps res_cap_83 = { static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCE_RGB, - .supports_argb8888 = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = false + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } }; static const struct dce_dmcu_registers dmcu_regs = { @@ -795,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool) bool dce80_validate_bandwidth( struct dc *dc, - struct dc_state *context) + struct dc_state *context, + bool fast_validate) { int i; bool at_least_one_pipe = false; @@ -807,11 +825,11 @@ bool dce80_validate_bandwidth( if (at_least_one_pipe) { /* TODO implement when needed but for now hardcode max value*/ - context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + context->bw_ctx.bw.dce.dispclk_khz = 681000; + context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; } else { - context->bw.dce.dispclk_khz = 0; - context->bw.dce.yclk_khz = 0; + context->bw_ctx.bw.dce.dispclk_khz = 0; + context->bw_ctx.bw.dce.yclk_khz = 0; } return true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c index 78b28c9b498b..2b2de1d913c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c @@ -150,10 +150,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, { struct dc *dc = clk_mgr->ctx->dc; struct dc_debug_options *debug = &dc->debug; - struct dc_clocks *new_clocks = &context->bw.dcn.clk; - struct pp_smu_display_requirement_rv *smu_req_cur = - &dc->res_pool->pp_smu_req; - struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct pp_smu_funcs_rv *pp_smu = NULL; bool send_request_to_increase = false; bool send_request_to_lower = false; @@ -175,8 +172,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, */ if (pp_smu && pp_smu->set_display_count) pp_smu->set_display_count(&pp_smu->pp_smu, display_count); - - smu_req.display_count = display_count; } if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz @@ -187,7 +182,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) { clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; - send_request_to_lower = true; } @@ -197,24 +191,18 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; - smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; - send_request_to_lower = true; } //DCF Clock if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) { clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; - smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000; - send_request_to_lower = true; } if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; - smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000; - send_request_to_lower = true; } @@ -227,9 +215,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, pp_smu->set_hard_min_dcfclk_by_freq && pp_smu->set_min_deep_sleep_dcfclk) { - pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); - pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); - pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); } } @@ -239,7 +227,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) { dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks); clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; - send_request_to_lower = true; } @@ -249,13 +236,11 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, pp_smu->set_hard_min_dcfclk_by_freq && pp_smu->set_min_deep_sleep_dcfclk) { - pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); - pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); - pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); } } - - *smu_req_cur = smu_req; } static const struct clk_mgr_funcs dcn1_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index f91e4b49d211..6f4b24756323 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -385,6 +385,10 @@ void dpp1_cnv_setup ( default: break; } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, CNVC_SURFACE_PIXEL_FORMAT, pixel_format); REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); @@ -396,7 +400,7 @@ void dpp1_cnv_setup ( for (i = 0; i < 12; i++) tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - tbl_entry.color_space = input_color_space; + tbl_entry.color_space = color_space; if (color_space >= COLOR_SPACE_YCBCR601) select = INPUT_CSC_SELECT_ICSC; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index 295cbd5b843f..0db2a6e96fc0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -283,7 +283,8 @@ void hubbub1_program_watermarks( hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", @@ -310,7 +311,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); @@ -323,7 +325,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n", watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); @@ -337,7 +340,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); @@ -348,7 +352,8 @@ void hubbub1_program_watermarks( hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", @@ -375,7 +380,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); @@ -388,7 +394,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n", watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); @@ -402,7 +409,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); @@ -413,7 +421,8 @@ void hubbub1_program_watermarks( hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", @@ -440,7 +449,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); @@ -453,7 +463,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n", watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); @@ -467,7 +478,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); @@ -478,7 +490,8 @@ void hubbub1_program_watermarks( hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", @@ -505,7 +518,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); @@ -518,7 +532,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n", watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); @@ -532,7 +547,8 @@ void hubbub1_program_watermarks( prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" "HW register value = 0x%x\n\n", watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); @@ -867,6 +883,7 @@ static const struct hubbub_funcs hubbub1_funcs = { .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap, .wm_read_state = hubbub1_wm_read_state, + .program_watermarks = hubbub1_program_watermarks, }; void hubbub1_construct(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 9cd4a5194154..85811b24a497 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -32,18 +32,14 @@ #define TO_DCN10_HUBBUB(hubbub)\ container_of(hubbub, struct dcn10_hubbub, base) -#define HUBHUB_REG_LIST_DCN()\ +#define HUBBUB_REG_LIST_DCN_COMMON()\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\ @@ -54,6 +50,12 @@ SR(DCHUBBUB_TEST_DEBUG_DATA),\ SR(DCHUBBUB_SOFT_RESET) +#define HUBBUB_VM_REG_LIST() \ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D) + #define HUBBUB_SR_WATERMARK_REG_LIST()\ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\ @@ -65,7 +67,8 @@ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D) #define HUBBUB_REG_LIST_DCN10(id)\ - HUBHUB_REG_LIST_DCN(), \ + HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_VM_REG_LIST(), \ HUBBUB_SR_WATERMARK_REG_LIST(), \ SR(DCHUBBUB_SDPIF_FB_TOP),\ SR(DCHUBBUB_SDPIF_FB_BASE),\ @@ -122,8 +125,7 @@ struct dcn_hubbub_registers { #define HUBBUB_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix - -#define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\ +#define HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh)\ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \ @@ -133,10 +135,29 @@ struct dcn_hubbub_registers { HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh) + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh) + +#define HUBBUB_MASK_SH_LIST_STUTTER(mask_sh) \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, mask_sh) #define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN(mask_sh), \ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \ HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \ HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \ @@ -167,15 +188,35 @@ struct dcn_hubbub_registers { type FB_OFFSET;\ type AGP_BOT;\ type AGP_TOP;\ - type AGP_BASE + type AGP_BASE;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D + +#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D struct dcn_hubbub_shift { DCN_HUBBUB_REG_FIELD_LIST(uint8_t); + HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); }; struct dcn_hubbub_mask { DCN_HUBBUB_REG_FIELD_LIST(uint32_t); + HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); }; struct dc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 683829466a44..54b219a710d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position( REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en); - //account for cases where we see negative offset relative to overlay plane - if (src_x_offset < 0 && src_y_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, 0, - CURSOR_Y_POSITION, 0); - x_hotspot -= src_x_offset; - y_hotspot -= src_y_offset; - } else if (src_x_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, 0, - CURSOR_Y_POSITION, pos->y); - x_hotspot -= src_x_offset; - } else if (src_y_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, + REG_SET_2(CURSOR_POSITION, 0, CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, 0); - y_hotspot -= src_y_offset; - } else { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, pos->y); - } + CURSOR_Y_POSITION, pos->y); REG_SET_2(CURSOR_HOT_SPOT, 0, CURSOR_HOT_SPOT_X, x_hotspot, @@ -1197,6 +1178,10 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst) REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst); } +void hubp1_init(struct hubp *hubp) +{ + //do nothing +} static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_program_surface_flip_and_addr = hubp1_program_surface_flip_and_addr, @@ -1220,7 +1205,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_clear_underflow = hubp1_clear_underflow, .hubp_disable_control = hubp1_disable_control, .hubp_get_underflow_status = hubp1_get_underflow_status, - + .hubp_init = hubp1_init, }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 3268ab089363..99d2b7e2a578 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -34,6 +34,7 @@ #define HUBP_REG_LIST_DCN(id)\ SRI(DCHUBP_CNTL, HUBP, id),\ SRI(HUBPREQ_DEBUG_DB, HUBP, id),\ + SRI(HUBPREQ_DEBUG, HUBP, id),\ SRI(DCSURF_ADDR_CONFIG, HUBP, id),\ SRI(DCSURF_TILING_CONFIG, HUBP, id),\ SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\ @@ -138,6 +139,7 @@ #define HUBP_COMMON_REG_VARIABLE_LIST \ uint32_t DCHUBP_CNTL; \ uint32_t HUBPREQ_DEBUG_DB; \ + uint32_t HUBPREQ_DEBUG; \ uint32_t DCSURF_ADDR_CONFIG; \ uint32_t DCSURF_TILING_CONFIG; \ uint32_t DCSURF_SURFACE_PITCH; \ @@ -247,7 +249,7 @@ .field_name = reg_name ## __ ## field_name ## post_fix /* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */ -#define HUBP_MASK_SH_LIST_DCN(mask_sh)\ +#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ @@ -331,7 +333,6 @@ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\ - HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\ @@ -339,7 +340,6 @@ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\ - HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\ @@ -373,6 +373,11 @@ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh) +#define HUBP_MASK_SH_LIST_DCN(mask_sh)\ + HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh) + /* Mask/shift struct generation macro for ASICs with VM */ #define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\ @@ -746,4 +751,6 @@ enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); void hubp1_vready_workaround(struct hubp *hubp, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); +void hubp1_init(struct hubp *hubp); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 47653fe0bb2d..33d311cea28c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -345,13 +345,13 @@ void dcn10_log_hw_state(struct dc *dc, DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n" "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n", - dc->current_state->bw.dcn.clk.dcfclk_khz, - dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz, - dc->current_state->bw.dcn.clk.dispclk_khz, - dc->current_state->bw.dcn.clk.dppclk_khz, - dc->current_state->bw.dcn.clk.max_supported_dppclk_khz, - dc->current_state->bw.dcn.clk.fclk_khz, - dc->current_state->bw.dcn.clk.socclk_khz); + dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz, + dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz); log_mpc_crc(dc, log_ctx); @@ -979,16 +979,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) * to non-preferred front end. If pipe_ctx->stream is not NULL, * we will use the pipe, so don't disable */ - if (pipe_ctx->stream != NULL) + if (pipe_ctx->stream != NULL && can_apply_seamless_boot) continue; - if (tg->funcs->is_tg_enabled(tg)) - tg->funcs->lock(tg); - /* Blank controller using driver code instead of * command table. */ if (tg->funcs->is_tg_enabled(tg)) { + tg->funcs->lock(tg); tg->funcs->set_blank(tg, true); hwss_wait_for_blank_complete(tg); } @@ -1120,14 +1118,17 @@ static void dcn10_init_hw(struct dc *dc) * Otherwise, if taking control is not possible, we need to power * everything down. */ - if (dcb->funcs->is_accelerated_mode(dcb)) { + if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { for (i = 0; i < dc->res_pool->pipe_count; i++) { struct hubp *hubp = dc->res_pool->hubps[i]; struct dpp *dpp = dc->res_pool->dpps[i]; + hubp->funcs->hubp_init(hubp); dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; plane_atomic_power_down(dc, dpp, hubp); } + + apply_DEGVIDCN10_253_wa(dc); } for (i = 0; i < dc->res_pool->audio_count; i++) { @@ -1854,7 +1855,7 @@ void dcn10_get_hdr_visual_confirm_color( switch (top_pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB2101010: - if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) { + if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { /* HDR10, ARGB2101010 - set boarder color to red */ color->color_r_cr = color_value; } @@ -1949,7 +1950,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) plane_state->format, EXPANSION_MODE_ZERO, plane_state->input_csc_color_matrix, - COLOR_SPACE_YCBCR601_LIMITED); + plane_state->color_space); //set scale and bias registers build_prescale_params(&bns_params, plane_state); @@ -2069,7 +2070,7 @@ void update_dchubp_dpp( * divided by 2 */ if (plane_state->update_flags.bits.full_update) { - bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <= + bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <= dc->res_pool->clk_mgr->clks.dispclk_khz / 2; dpp->funcs->dpp_dppclk_control( @@ -2138,6 +2139,9 @@ void update_dchubp_dpp( if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { dc->hwss.set_cursor_position(pipe_ctx); dc->hwss.set_cursor_attribute(pipe_ctx); + + if (dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); } if (plane_state->update_flags.bits.full_update) { @@ -2328,6 +2332,7 @@ static void dcn10_apply_ctx_for_surface( int i; struct timing_generator *tg; bool removed_pipe[4] = { false }; + bool interdependent_update = false; struct pipe_ctx *top_pipe_to_program = find_top_pipe_for_stream(dc, context, stream); DC_LOGGER_INIT(dc->ctx->logger); @@ -2337,7 +2342,13 @@ static void dcn10_apply_ctx_for_surface( tg = top_pipe_to_program->stream_res.tg; - dcn10_pipe_control_lock(dc, top_pipe_to_program, true); + interdependent_update = top_pipe_to_program->plane_state && + top_pipe_to_program->plane_state->update_flags.bits.full_update; + + if (interdependent_update) + lock_all_pipes(dc, context, true); + else + dcn10_pipe_control_lock(dc, top_pipe_to_program, true); if (num_planes == 0) { /* OTG blank before remove all front end */ @@ -2357,15 +2368,9 @@ static void dcn10_apply_ctx_for_surface( */ if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) { if (old_pipe_ctx->stream_res.tg == tg && - old_pipe_ctx->plane_res.hubp && - old_pipe_ctx->plane_res.hubp->opp_id != 0xf) { + old_pipe_ctx->plane_res.hubp && + old_pipe_ctx->plane_res.hubp->opp_id != 0xf) dcn10_disable_plane(dc, old_pipe_ctx); - /* - * power down fe will unlock when calling reset, need - * to lock it back here. Messy, need rework. - */ - pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); - } } if ((!pipe_ctx->plane_state || @@ -2384,29 +2389,25 @@ static void dcn10_apply_ctx_for_surface( if (num_planes > 0) program_all_pipe_in_tree(dc, top_pipe_to_program, context); - dcn10_pipe_control_lock(dc, top_pipe_to_program, false); - - if (top_pipe_to_program->plane_state && - top_pipe_to_program->plane_state->update_flags.bits.full_update) + if (interdependent_update) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - tg = pipe_ctx->stream_res.tg; /* Skip inactive pipes and ones already updated */ - if (!pipe_ctx->stream || pipe_ctx->stream == stream - || !pipe_ctx->plane_state - || !tg->funcs->is_tg_enabled(tg)) + if (!pipe_ctx->stream || pipe_ctx->stream == stream || + !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg)) continue; - tg->funcs->lock(tg); - pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( pipe_ctx->plane_res.hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs); - - tg->funcs->unlock(tg); } + if (interdependent_update) + lock_all_pipes(dc, context, false); + else + dcn10_pipe_control_lock(dc, top_pipe_to_program, false); + if (num_planes == 0) false_optc_underflow_wa(dc, stream, tg); @@ -2438,12 +2439,14 @@ static void dcn10_prepare_bandwidth( struct dc *dc, struct dc_state *context) { + struct hubbub *hubbub = dc->res_pool->hubbub; + if (dc->debug.sanity_checks) dcn10_verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) - context->bw.dcn.clk.phyclk_khz = 0; + context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; dc->res_pool->clk_mgr->funcs->update_clocks( dc->res_pool->clk_mgr, @@ -2451,8 +2454,8 @@ static void dcn10_prepare_bandwidth( false); } - hubbub1_program_watermarks(dc->res_pool->hubbub, - &context->bw.dcn.watermarks, + hubbub->funcs->program_watermarks(hubbub, + &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); dcn10_stereo_hw_frame_pack_wa(dc, context); @@ -2468,12 +2471,14 @@ static void dcn10_optimize_bandwidth( struct dc *dc, struct dc_state *context) { + struct hubbub *hubbub = dc->res_pool->hubbub; + if (dc->debug.sanity_checks) dcn10_verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { if (context->stream_count == 0) - context->bw.dcn.clk.phyclk_khz = 0; + context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; dc->res_pool->clk_mgr->funcs->update_clocks( dc->res_pool->clk_mgr, @@ -2481,8 +2486,8 @@ static void dcn10_optimize_bandwidth( true); } - hubbub1_program_watermarks(dc->res_pool->hubbub, - &context->bw.dcn.watermarks, + hubbub->funcs->program_watermarks(hubbub, + &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); dcn10_stereo_hw_frame_pack_wa(dc, context); @@ -2710,9 +2715,15 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .rotation = pipe_ctx->plane_state->rotation, .mirror = pipe_ctx->plane_state->horizontal_mirror }; + uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x; + uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y; + uint32_t x_offset = min(x_plane, pos_cpy.x); + uint32_t y_offset = min(y_plane, pos_cpy.y); - pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x; - pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y; + pos_cpy.x -= x_offset; + pos_cpy.y -= y_offset; + pos_cpy.x_hotspot += (x_plane - x_offset); + pos_cpy.y_hotspot += (y_plane - y_offset); if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) @@ -2807,6 +2818,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) return vertical_line_start; } +void lock_all_pipes(struct dc *dc, + struct dc_state *context, + bool lock) +{ + struct pipe_ctx *pipe_ctx; + struct timing_generator *tg; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx = &context->res_ctx.pipe_ctx[i]; + tg = pipe_ctx->stream_res.tg; + /* + * Only lock the top pipe's tg to prevent redundant + * (un)locking. Also skip if pipe is disabled. + */ + if (pipe_ctx->top_pipe || + !pipe_ctx->stream || !pipe_ctx->plane_state || + !tg->funcs->is_tg_enabled(tg)) + continue; + + if (lock) + tg->funcs->lock(tg); + else + tg->funcs->unlock(tg); + } +} + static void calc_vupdate_position( struct pipe_ctx *pipe_ctx, uint32_t *start_line, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 6d66084df55f..4b3b27a5d23b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream( int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); +void lock_all_pipes(struct dc *dc, + struct dc_state *context, + bool lock); + #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index aa7a5163c40a..991622da9ed5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -472,12 +472,12 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk," "dppclk,fclk,socclk\n" "%d,%d,%d,%d,%d,%d\n", - dc->current_state->bw.dcn.clk.dcfclk_khz, - dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz, - dc->current_state->bw.dcn.clk.dispclk_khz, - dc->current_state->bw.dcn.clk.dppclk_khz, - dc->current_state->bw.dcn.clk.fclk_khz, - dc->current_state->bw.dcn.clk.socclk_khz); + dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz, + dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz, + dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz); remaining_buffer -= chars_printed; pBuf += chars_printed; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 7c37836bb9cc..7eccb54c421d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -521,8 +521,24 @@ static const struct dc_plane_cap plane_cap = { .blends_with_above = true, .blends_with_below = true, .per_pixel_alpha = true, - .supports_argb8888 = true, - .supports_nv12 = true + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 250, + .fp16 = 1 + } }; static const struct dc_debug_options debug_defaults_drv = { @@ -1146,7 +1162,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont continue; if (context->stream_status[i].plane_count > 2) - return false; + return DC_FAIL_UNSUPPORTED_1; for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index cc6891b8ea69..4fc4208d1472 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -74,29 +74,6 @@ struct pp_smu_wm_range_sets { struct pp_smu_wm_set_range writer_wm_sets[MAX_WATERMARK_SETS]; }; -struct pp_smu_display_requirement_rv { - /* PPSMC_MSG_SetDisplayCount: count - * 0 triggers S0i2 optimization - */ - unsigned int display_count; - - /* PPSMC_MSG_SetHardMinFclkByFreq: mhz - * FCLK will vary with DPM, but never below requested hard min - */ - unsigned int hard_min_fclk_mhz; - - /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz - * fixed clock at requested freq, either from FCH bypass or DFS - */ - unsigned int hard_min_dcefclk_mhz; - - /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz - * when DF is in cstate, dcf clock is further divided down - * to just above given frequency - */ - unsigned int min_deep_sleep_dcefclk_mhz; -}; - struct pp_smu_funcs_rv { struct pp_smu pp_smu; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index 2e61a22ef4b2..8dca3b7700e5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -43,7 +43,7 @@ enum dc_status { DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */ DC_FAIL_SCALING = 14, DC_FAIL_DP_LINK_TRAINING = 15, - + DC_FAIL_UNSUPPORTED_1 = 18, DC_ERROR_UNEXPECTED = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index b986c67d5b4b..6f5ab05d6467 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -97,7 +97,8 @@ struct resource_funcs { const struct encoder_init_data *init); bool (*validate_bandwidth)( struct dc *dc, - struct dc_state *context); + struct dc_state *context, + bool fast_validate); enum dc_status (*validate_global)( struct dc *dc, @@ -144,7 +145,6 @@ struct resource_pool { struct hubbub *hubbub; struct mpc *mpc; struct pp_smu_funcs *pp_smu; - struct pp_smu_display_requirement_rv pp_smu_req; struct dce_aux *engines[MAX_PIPES]; struct dce_i2c_hw *hw_i2cs[MAX_PIPES]; struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; @@ -266,18 +266,22 @@ struct dcn_bw_output { struct dcn_watermark_set watermarks; }; -union bw_context { +union bw_output { struct dcn_bw_output dcn; struct dce_bw_output dce; }; +struct bw_context { + union bw_output bw; + struct display_mode_lib dml; +}; /** * struct dc_state - The full description of a state requested by a user * * @streams: Stream properties * @stream_status: The planes on a given stream * @res_ctx: Persistent state of resources - * @bw: The output from bandwidth and watermark calculations + * @bw_ctx: The output from bandwidth and watermark calculations and the DML * @pp_display_cfg: PowerPlay clocks and settings * @dcn_bw_vars: non-stack memory to support bandwidth calculations * @@ -289,7 +293,7 @@ struct dc_state { struct resource_context res_ctx; - union bw_context bw; + struct bw_context bw_ctx; /* Note: these are big structures, do *not* put on stack! */ struct dm_pp_display_configuration pp_display_cfg; @@ -297,7 +301,7 @@ struct dc_state { struct dcn_bw_internal_vars dcn_bw_vars; #endif - struct clk_mgr *dccg; + struct clk_mgr *clk_mgr; struct { bool full_update_needed : 1; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 86ec3f69c765..263c09630c06 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults; bool dcn_validate_bandwidth( struct dc *dc, - struct dc_state *context); + struct dc_state *context, + bool fast_validate); unsigned int dcn_find_dcfclk_suits_all( const struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 5e8fead3c09a..93667e8b23b3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -77,6 +77,12 @@ struct hubbub_funcs { void (*get_dchub_ref_freq)(struct hubbub *hubbub, unsigned int dccg_ref_freq_inKhz, unsigned int *dchub_ref_freq_inKhz); + + void (*program_watermarks)( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index cbaa43853611..c68f0ce346c7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -70,6 +70,8 @@ struct dmcu_funcs { void (*get_psr_wait_loop)(struct dmcu *dmcu, unsigned int *psr_wait_loop_number); bool (*is_dmcu_initialized)(struct dmcu *dmcu); + bool (*lock_phy)(struct dmcu *dmcu); + bool (*unlock_phy)(struct dmcu *dmcu); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 1cd07e94ee63..455df4999797 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -130,6 +130,7 @@ struct hubp_funcs { void (*hubp_clear_underflow)(struct hubp *hubp); void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); + void (*hubp_init)(struct hubp *hubp); }; diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index afe0876fe6f8..86987f5e8bd5 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -81,6 +81,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs vupdate_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #define hpd_int_entry(reg_num)\ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\ .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\ @@ -137,7 +142,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\ .ack_value =\ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\ - .funcs = &vblank_irq_info_funcs\ + .funcs = &vupdate_irq_info_funcs\ } #define vblank_int_entry(reg_num)\ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c index 1ea7256ec89b..750ba0ab4106 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c @@ -84,6 +84,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs vupdate_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg @@ -140,7 +145,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { IRQ_REG_ENTRY(CRTC, reg_num,\ CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\ CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ + .funcs = &vupdate_irq_info_funcs\ } #define vblank_int_entry(reg_num)\ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c index 8a2066c313fe..de218fe84a43 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c @@ -84,6 +84,10 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs vupdate_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; #define hpd_int_entry(reg_num)\ [DC_IRQ_SOURCE_INVALID + reg_num] = {\ @@ -142,7 +146,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\ .ack_value =\ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\ - .funcs = &vblank_irq_info_funcs\ + .funcs = &vupdate_irq_info_funcs\ } #define vblank_int_entry(reg_num)\ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index e04ae49243f6..10ac6deff5ff 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -56,6 +56,18 @@ enum dc_irq_source to_dal_irq_source_dcn10( return DC_IRQ_SOURCE_VBLANK5; case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK6; + case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE1; + case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE2; + case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE3; + case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE4; + case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE5; + case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE6; case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: return DC_IRQ_SOURCE_PFLIP1; case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: @@ -153,6 +165,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .ack = NULL }; +static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + #define BASE_INNER(seg) \ DCE_BASE__INST0_SEG ## seg @@ -203,12 +220,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = { .funcs = &pflip_irq_info_funcs\ } -#define vupdate_int_entry(reg_num)\ +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic + * of DCE's DC_IRQ_SOURCE_VUPDATEx. + */ +#define vupdate_no_lock_int_entry(reg_num)\ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ IRQ_REG_ENTRY(OTG, reg_num,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ + .funcs = &vupdate_no_lock_irq_info_funcs\ } #define vblank_int_entry(reg_num)\ @@ -315,12 +335,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = { dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), - vupdate_int_entry(0), - vupdate_int_entry(1), - vupdate_int_entry(2), - vupdate_int_entry(3), - vupdate_int_entry(4), - vupdate_int_entry(5), + vupdate_no_lock_int_entry(0), + vupdate_no_lock_int_entry(1), + vupdate_no_lock_int_entry(2), + vupdate_no_lock_int_entry(3), + vupdate_no_lock_int_entry(4), + vupdate_no_lock_int_entry(5), vblank_int_entry(0), vblank_int_entry(1), vblank_int_entry(2), diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 3d867e34f8b3..19b1eaebe484 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -437,10 +437,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync, inserted_frame_duration_in_us = last_render_time_in_us / frames_to_insert; - if (inserted_frame_duration_in_us < - (1000000 / in_out_vrr->max_refresh_in_uhz)) - inserted_frame_duration_in_us = - (1000000 / in_out_vrr->max_refresh_in_uhz); + if (inserted_frame_duration_in_us < in_out_vrr->min_duration_in_us) + inserted_frame_duration_in_us = in_out_vrr->min_duration_in_us; /* Cache the calculated variables */ in_out_vrr->btr.inserted_duration_in_us = diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index efd386f3ca53..b3810b864676 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -43,10 +43,10 @@ static const unsigned char max_reduction_table[13] = { /* Possible ABM 2.2 Min Reduction configs from least aggressive to most aggressive * 0 1 2 3 4 5 6 7 8 9 10 11 12 - * 100 100 100 100 100 100 100 90.2 85.1 80.0 80.0 75.3 75.3 % + * 100 100 100 100 100 100 100 100 100 92.2 83.1 75.3 75.3 % */ static const unsigned char min_reduction_table_v_2_2[13] = { -0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xd9, 0xcc, 0xcc, 0xc0, 0xc0}; +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd4, 0xc0, 0xc0}; /* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive * 0 1 2 3 4 5 6 7 8 9 10 11 12 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h index 721c61171045..5a44e614ab7e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h @@ -2347,6 +2347,8 @@ #define mmHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2 #define mmHUBP0_HUBPREQ_DEBUG_DB 0x0569 #define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP0_HUBPREQ_DEBUG 0x056a +#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2 #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x056e #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x056f @@ -2631,6 +2633,8 @@ #define mmHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2 #define mmHUBP1_HUBPREQ_DEBUG_DB 0x062d #define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP1_HUBPREQ_DEBUG 0x062e +#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0632 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0633 @@ -2915,6 +2919,8 @@ #define mmHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2 #define mmHUBP2_HUBPREQ_DEBUG_DB 0x06f1 #define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP2_HUBPREQ_DEBUG 0x06f2 +#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06f6 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06f7 @@ -3199,6 +3205,8 @@ #define mmHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2 #define mmHUBP3_HUBPREQ_DEBUG_DB 0x07b5 #define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP3_HUBPREQ_DEBUG 0x07b6 +#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2 #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07ba #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07bb diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 08769b4b7a74..d3075adb3297 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -718,6 +718,7 @@ enum atom_encoder_caps_def ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not. ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board. + ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type. }; struct atom_encoder_caps_record diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 5f3c10ebff08..b897aca9b4c9 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -85,18 +85,6 @@ enum kgd_memory_pool { KGD_POOL_FRAMEBUFFER = 3, }; -enum kgd_engine_type { - KGD_ENGINE_PFP = 1, - KGD_ENGINE_ME, - KGD_ENGINE_CE, - KGD_ENGINE_MEC1, - KGD_ENGINE_MEC2, - KGD_ENGINE_RLC, - KGD_ENGINE_SDMA1, - KGD_ENGINE_SDMA2, - KGD_ENGINE_MAX -}; - /** * enum kfd_sched_policy * @@ -230,8 +218,6 @@ struct tile_config { * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that * SDMA hqd slot. * - * @get_fw_version: Returns FW versions from the header - * * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID. * Only used for no cp scheduling mode * @@ -311,8 +297,6 @@ struct kfd2kgd_calls { struct kgd_dev *kgd, uint8_t vmid); - uint16_t (*get_fw_version)(struct kgd_dev *kgd, - enum kgd_engine_type type); void (*set_scratch_backing_va)(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 7e8c74da6a74..c058c784180e 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -97,16 +97,19 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, return ret; } -int smu_update_table(struct smu_context *smu, uint32_t table_id, +int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg, void *table_data, bool drv2smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = NULL; int ret = 0; + uint32_t table_index; if (!table_data || table_id >= smu_table->table_count) return -EINVAL; + table_index = (exarg << 16) | table_id; + table = &smu_table->tables[table_id]; if (drv2smu) @@ -123,7 +126,7 @@ int smu_update_table(struct smu_context *smu, uint32_t table_id, ret = smu_send_smc_msg_with_param(smu, drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram, - table_id); + table_index); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f32e3d0aaea6..9a595f7525e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -35,6 +35,7 @@ #include "smu10_hwmgr.h" #include "power_state.h" #include "soc15_common.h" +#include "smu10.h" #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ @@ -204,18 +205,13 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) return 0; } -static inline uint32_t convert_10k_to_mhz(uint32_t clock) -{ - return (clock + 99) / 100; -} - static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); if (smu10_data->need_min_deep_sleep_dcefclk && - smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) { - smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock); + smu10_data->deep_sleep_dcefclk != clock) { + smu10_data->deep_sleep_dcefclk = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, smu10_data->deep_sleep_dcefclk); @@ -228,8 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); if (smu10_data->dcf_actual_hard_min_freq && - smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) { - smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock); + smu10_data->dcf_actual_hard_min_freq != clock) { + smu10_data->dcf_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinDcefclkByFreq, smu10_data->dcf_actual_hard_min_freq); @@ -242,8 +238,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); if (smu10_data->f_actual_hard_min_freq && - smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) { - smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock); + smu10_data->f_actual_hard_min_freq != clock) { + smu10_data->f_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, smu10_data->f_actual_hard_min_freq); @@ -572,7 +568,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { struct smu10_hwmgr *data = hwmgr->backend; - struct amdgpu_device *adev = hwmgr->adev; uint32_t min_sclk = hwmgr->display_config->min_core_set_clock; uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100; @@ -581,11 +576,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, return 0; } - /* Disable UMDPSTATE support on rv2 temporarily */ - if ((adev->asic_type == CHIP_RAVEN) && - (adev->rev_id >= 8)) - return 0; - if (min_sclk < data->gfx_min_freq_limit) min_sclk = data->gfx_min_freq_limit; @@ -1200,6 +1190,94 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) } } +static int conv_power_profile_to_pplib_workload(int power_profile) +{ + int pplib_workload = 0; + + switch (power_profile) { + case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: + pplib_workload = WORKLOAD_DEFAULT_BIT; + break; + case PP_SMC_POWER_PROFILE_FULLSCREEN3D: + pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; + break; + case PP_SMC_POWER_PROFILE_POWERSAVING: + pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; + break; + case PP_SMC_POWER_PROFILE_VIDEO: + pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; + break; + case PP_SMC_POWER_PROFILE_VR: + pplib_workload = WORKLOAD_PPLIB_VR_BIT; + break; + case PP_SMC_POWER_PROFILE_COMPUTE: + pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; + break; + } + + return pplib_workload; +} + +static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) +{ + uint32_t i, size = 0; + static const uint8_t + profile_mode_setting[6][4] = {{70, 60, 0, 0,}, + {70, 60, 1, 3,}, + {90, 60, 0, 0,}, + {70, 60, 0, 0,}, + {70, 90, 0, 0,}, + {30, 60, 0, 6,}, + }; + static const char *profile_name[6] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE"}; + static const char *title[6] = {"NUM", + "MODE_NAME", + "BUSY_SET_POINT", + "FPS", + "USE_RLC_BUSY", + "MIN_ACTIVE_LEVEL"}; + + if (!buf) + return -EINVAL; + + size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0], + title[1], title[2], title[3], title[4], title[5]); + + for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++) + size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", + i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", + profile_mode_setting[i][0], profile_mode_setting[i][1], + profile_mode_setting[i][2], profile_mode_setting[i][3]); + + return size; +} + +static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) +{ + int workload_type = 0; + + if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) { + pr_err("Invalid power profile mode %ld\n", input[size]); + return -EINVAL; + } + hwmgr->power_profile_mode = input[size]; + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = + conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify, + 1 << workload_type); + + return 0; +} + + static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .backend_init = smu10_hwmgr_backend_init, .backend_fini = smu10_hwmgr_backend_fini, @@ -1241,6 +1319,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .powergate_sdma = smu10_powergate_sdma, .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq, .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq, + .get_power_profile_mode = smu10_get_power_profile_mode, + .set_power_profile_mode = smu10_set_power_profile_mode, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 83d3d935f3ac..048757e8f494 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -77,7 +77,7 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) -static const struct profile_mode_setting smu7_profiling[7] = +static struct profile_mode_setting smu7_profiling[7] = {{0, 0, 0, 0, 0, 0, 0, 0}, {1, 0, 100, 30, 1, 0, 100, 10}, {1, 10, 0, 30, 0, 0, 0, 0}, @@ -4984,17 +4984,27 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint mode = input[size]; switch (mode) { case PP_SMC_POWER_PROFILE_CUSTOM: - if (size < 8) + if (size < 8 && size != 0) return -EINVAL; - - tmp.bupdate_sclk = input[0]; - tmp.sclk_up_hyst = input[1]; - tmp.sclk_down_hyst = input[2]; - tmp.sclk_activity = input[3]; - tmp.bupdate_mclk = input[4]; - tmp.mclk_up_hyst = input[5]; - tmp.mclk_down_hyst = input[6]; - tmp.mclk_activity = input[7]; + /* If only CUSTOM is passed in, use the saved values. Check + * that we actually have a CUSTOM profile by ensuring that + * the "use sclk" or the "use mclk" bits are set + */ + tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM]; + if (size == 0) { + if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0) + return -EINVAL; + } else { + tmp.bupdate_sclk = input[0]; + tmp.sclk_up_hyst = input[1]; + tmp.sclk_down_hyst = input[2]; + tmp.sclk_activity = input[3]; + tmp.bupdate_mclk = input[4]; + tmp.mclk_up_hyst = input[5]; + tmp.mclk_down_hyst = input[6]; + tmp.mclk_activity = input[7]; + smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp; + } if (!smum_update_dpm_settings(hwmgr, &tmp)) { memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); hwmgr->power_profile_mode = mode; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 85a536924571..384c37875cd0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -1427,6 +1427,15 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) vega10_setup_default_pcie_table(hwmgr); + /* Zero out the saved copy of the CUSTOM profile + * This will be checked when trying to set the profile + * and will require that new values be passed in + */ + data->custom_profile_mode[0] = 0; + data->custom_profile_mode[1] = 0; + data->custom_profile_mode[2] = 0; + data->custom_profile_mode[3] = 0; + /* save a copy of the default DPM table */ memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct vega10_dpm_table)); @@ -4906,13 +4915,21 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui uint8_t min_active_level; uint32_t power_profile_mode = input[size]; - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, - 1 << power_profile_mode); - if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size == 0 || size > 4) + if (size != 0 && size != 4) return -EINVAL; + /* If size = 0 and the CUSTOM profile has been set already + * then just apply the profile. The copy stored in the hwmgr + * is zeroed out on init + */ + if (size == 0) { + if (data->custom_profile_mode[0] != 0) + goto out; + else + return -EINVAL; + } + data->custom_profile_mode[0] = busy_set_point = input[0]; data->custom_profile_mode[1] = FPS = input[1]; data->custom_profile_mode[2] = use_rlc_busy = input[2]; @@ -4923,6 +4940,9 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui use_rlc_busy << 16 | min_active_level<<24); } +out: + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, + 1 << power_profile_mode); hwmgr->power_profile_mode = power_profile_mode; return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 3f349ada8de0..9b9f87b84910 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) * MP0CLK DS */ data->registry_data.disallowed_features = 0xE0041C00; + /* ECC feature should be disabled on old SMUs */ + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); + hwmgr->smu_version = smum_get_argument(hwmgr); + if (hwmgr->smu_version < 0x282100) + data->registry_data.disallowed_features |= FEATURE_ECC_MASK; + data->registry_data.od_state_in_dc_support = 0; data->registry_data.thermal_support = 1; data->registry_data.skip_baco_hardware = 0; @@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; + data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; for (i = 0; i < GNLD_FEATURES_MAX; i++) { data->smu_features[i].smu_feature_bitmap = @@ -427,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.clockStep.memoryClock = 500; data->total_active_cus = adev->gfx.cu_info.number; + data->is_custom_profile_set = false; return 0; } @@ -3029,7 +3037,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) "FCLK_DS", "MP1CLK_DS", "MP0CLK_DS", - "XGMI"}; + "XGMI", + "ECC"}; static const char *output_title[] = { "FEATURES", "BITMASK", @@ -3451,7 +3460,18 @@ static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) return ; data->vce_power_gated = bgate; - vega20_enable_disable_vce_dpm(hwmgr, !bgate); + if (bgate) { + vega20_enable_disable_vce_dpm(hwmgr, !bgate); + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + } else { + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + vega20_enable_disable_vce_dpm(hwmgr, !bgate); + } + } static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) @@ -3471,6 +3491,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) struct vega20_single_dpm_table *dpm_table; bool vblank_too_short = false; bool disable_mclk_switching; + bool disable_fclk_switching; uint32_t i, latency; disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && @@ -3546,13 +3567,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) if (hwmgr->display_config->nb_pstate_switch_disable) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + if ((disable_mclk_switching && + (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || + hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) + disable_fclk_switching = true; + else + disable_fclk_switching = false; + /* fclk */ dpm_table = &(data->dpm_table.fclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; - if (hwmgr->display_config->nb_pstate_switch_disable) + if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; /* vclk */ @@ -3827,7 +3855,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui } if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size < 10) + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + if (size == 0 && !data->is_custom_profile_set) + return -EINVAL; + if (size < 10 && size != 0) return -EINVAL; result = vega20_get_activity_monitor_coeff(hwmgr, @@ -3837,6 +3869,13 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui "[SetPowerProfile] Failed to get activity monitor!", return result); + /* If size==0, then we want to apply the already-configured + * CUSTOM profile again. Just apply it, since we checked its + * validity above + */ + if (size == 0) + goto out; + switch (input[0]) { case 0: /* Gfxclk */ activity_monitor.Gfx_FPS = input[1]; @@ -3887,11 +3926,13 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui result = vega20_set_activity_monitor_coeff(hwmgr, (uint8_t *)(&activity_monitor), WORKLOAD_PPLIB_CUSTOM_BIT); + data->is_custom_profile_set = true; PP_ASSERT_WITH_CODE(!result, "[SetPowerProfile] Failed to set activity monitor!", return result); } +out: /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = conv_power_profile_to_pplib_workload(power_profile_mode); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index a5bc758ae097..2c3125f82b24 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -80,6 +80,7 @@ enum { GNLD_DS_MP1CLK, GNLD_DS_MP0CLK, GNLD_XGMI, + GNLD_ECC, GNLD_FEATURES_MAX }; @@ -530,6 +531,8 @@ struct vega20_hwmgr { bool pcie_parameters_override; uint32_t pcie_gen_level1; uint32_t pcie_width_level1; + + bool is_custom_profile_set; }; #define VEGA20_DPM2_NEAR_TDP_DEC 10 diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 2083139533e9..c8b168b3413b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -524,12 +524,6 @@ struct smu_funcs struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*set_od8_default_settings)(struct smu_context *smu, bool initialize); - int (*get_activity_monitor_coeff)(struct smu_context *smu, - uint8_t *table, - uint16_t workload_type); - int (*set_activity_monitor_coeff)(struct smu_context *smu, - uint8_t *table, - uint16_t workload_type); int (*conv_power_profile_to_pplib_workload)(int power_profile); int (*get_power_profile_mode)(struct smu_context *smu, char *buf); int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); @@ -546,6 +540,8 @@ struct smu_funcs int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); + int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); + }; #define smu_init_microcode(smu) \ @@ -729,6 +725,8 @@ struct smu_funcs ((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0) #define smu_get_mclk(smu, low) \ ((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0) +#define smu_set_xgmi_pstate(smu, pstate) \ + ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, @@ -745,8 +743,11 @@ extern int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool extern int smu_feature_is_supported(struct smu_context *smu, int feature_id); extern int smu_feature_set_supported(struct smu_context *smu, int feature_id, bool enable); -int smu_update_table(struct smu_context *smu, uint32_t table_id, +int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg, void *table_data, bool drv2smu); +#define smu_update_table(smu, table_id, table_data, drv2smu) \ + smu_update_table_with_arg((smu), (table_id), 0, (table_data), (drv2smu)) + bool is_support_sw_smu(struct amdgpu_device *adev); int smu_reset(struct smu_context *smu); int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h index a2991fa2e6f8..90879e4092a3 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h @@ -85,7 +85,6 @@ #define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36 #define PPSMC_Message_Count 0x37 - typedef uint16_t PPSMC_Result; typedef int PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10.h b/drivers/gpu/drm/amd/powerplay/inc/smu10.h index 9e837a5014c5..b96520528240 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu10.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu10.h @@ -136,12 +136,14 @@ #define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT) /* Workload bits */ -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 -#define WORKLOAD_PPLIB_VIDEO_BIT 2 -#define WORKLOAD_PPLIB_VR_BIT 3 -#define WORKLOAD_PPLIB_COMPUTE_BIT 4 -#define WORKLOAD_PPLIB_CUSTOM_BIT 5 -#define WORKLOAD_PPLIB_COUNT 6 +#define WORKLOAD_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_COUNT 7 typedef struct { /* MP1_EXT_SCRATCH0 */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 63d5cf691549..195c4ae67058 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -99,7 +99,7 @@ #define FEATURE_DS_MP1CLK_BIT 30 #define FEATURE_DS_MP0CLK_BIT 31 #define FEATURE_XGMI_BIT 32 -#define FEATURE_SPARE_33_BIT 33 +#define FEATURE_ECC_BIT 33 #define FEATURE_SPARE_34_BIT 34 #define FEATURE_SPARE_35_BIT 35 #define FEATURE_SPARE_36_BIT 36 @@ -165,7 +165,8 @@ #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) -#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) +#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT ) +#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT ) #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 0e4b4b88af24..92903a4cc4d8 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1460,62 +1460,6 @@ static int smu_v11_0_set_od8_default_settings(struct smu_context *smu, return 0; } -static int smu_v11_0_set_activity_monitor_coeff(struct smu_context *smu, - uint8_t *table, uint16_t workload_type) -{ - int ret = 0; - memcpy(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].cpu_addr, - table, smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].size); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address)); - if (ret) { - pr_err("[%s] Attempt to Set Dram Addr High Failed!", __func__); - return ret; - } - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address)); - if (ret) { - pr_err("[%s] Attempt to Set Dram Addr Low Failed!", __func__); - return ret; - } - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableSmu2Dram, - TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16)); - if (ret) { - pr_err("[%s] Attempt to Transfer Table From SMU Failed!", __func__); - return ret; - } - - return ret; -} - -static int smu_v11_0_get_activity_monitor_coeff(struct smu_context *smu, - uint8_t *table, uint16_t workload_type) -{ - int ret = 0; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address)); - if (ret) { - pr_err("[%s] Attempt to Set Dram Addr High Failed!", __func__); - return ret; - } - - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address)); - if (ret) { - pr_err("[%s] Attempt to Set Dram Addr Low Failed!", __func__); - return ret; - } - - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableSmu2Dram, - TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16)); - if (ret) { - pr_err("[%s] Attempt to Transfer Table From SMU Failed!", __func__); - return ret; - } - - return ret; -} - static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile) { int pplib_workload = 0; @@ -1584,9 +1528,8 @@ static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf) for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i); - result = smu_v11_0_get_activity_monitor_coeff(smu, - (uint8_t *)(&activity_monitor), - workload_type); + result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF, + workload_type, &activity_monitor, false); if (result) { pr_err("[%s] Failed to get activity monitor!", __func__); return result; @@ -1658,7 +1601,7 @@ static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf) static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type, ret = 0; + int workload_type = 0, ret = 0; smu->power_profile_mode = input[size]; @@ -1668,9 +1611,8 @@ static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input } if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - ret = smu_v11_0_get_activity_monitor_coeff(smu, - (uint8_t *)(&activity_monitor), - WORKLOAD_PPLIB_CUSTOM_BIT); + ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false); if (ret) { pr_err("[%s] Failed to get activity monitor!", __func__); return ret; @@ -1723,9 +1665,8 @@ static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input break; } - ret = smu_v11_0_set_activity_monitor_coeff(smu, - (uint8_t *)(&activity_monitor), - WORKLOAD_PPLIB_CUSTOM_BIT); + ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true); if (ret) { pr_err("[%s] Failed to set activity monitor!", __func__); return ret; @@ -1952,6 +1893,18 @@ set_fan_speed_rpm_failed: return ret; } +static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate) +{ + int ret = 0; + mutex_lock(&(smu->mutex)); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetXgmiMode, + pstate ? XGMI_STATE_D0 : XGMI_STATE_D3); + mutex_unlock(&(smu->mutex)); + return ret; +} + static const struct smu_funcs smu_v11_0_funcs = { .init_microcode = smu_v11_0_init_microcode, .load_microcode = smu_v11_0_load_microcode, @@ -1994,8 +1947,6 @@ static const struct smu_funcs smu_v11_0_funcs = { .get_sclk = smu_v11_0_dpm_get_sclk, .get_mclk = smu_v11_0_dpm_get_mclk, .set_od8_default_settings = smu_v11_0_set_od8_default_settings, - .get_activity_monitor_coeff = smu_v11_0_get_activity_monitor_coeff, - .set_activity_monitor_coeff = smu_v11_0_set_activity_monitor_coeff, .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload, .get_power_profile_mode = smu_v11_0_get_power_profile_mode, .set_power_profile_mode = smu_v11_0_set_power_profile_mode, @@ -2008,6 +1959,7 @@ static const struct smu_funcs smu_v11_0_funcs = { .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent, .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, }; void smu_v11_0_set_smu_funcs(struct smu_context *smu) @@ -2015,7 +1967,6 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; smu->funcs = &smu_v11_0_funcs; - switch (adev->asic_type) { case CHIP_VEGA20: vega20_set_ppt_funcs(smu); |