From 6df442a03d1a839242397259fa13168858c52413 Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Mon, 25 Apr 2022 22:19:58 +0800 Subject: drm/amdgpu: add new doorbell assignment table for aqua_vanjaram Four basic reasons as below to do the change: 1. number of ring expand a lot on aqua_vanjaram, and adjustment on old assignment cannot make each ring in a continuous doorbell space. 2. the SDMA doorbell index should not exceed 0x1FF on aqua_vanjaram due to regDOORBELLx_CTRL_ENTRY.BIF_DOORBELLx_RANGE_OFFSET_ENTRY field width. 3. re-design the doorbell assignment and unify the calculation as "start + ring/inst id" will make the code much concise. 4. only defining the START/END makes the table look simple v2: (Lijo) 1. replace name 2. use num_inst_per_aid/sdma_doorbell_range instead of hardcoding Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 52 ++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c new file mode 100644 index 000000000000..3b97bc922d4a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -0,0 +1,52 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "soc15.h" + +#include "soc15_common.h" + +void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) +{ + int i; + + adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START; + + adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START; + + adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START; + adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END; + + adev->doorbell_index.sdma_doorbell_range = 20; + for (i = 0; i < adev->num_aid * adev->sdma.num_inst_per_aid; i++) + adev->doorbell_index.sdma_engine[i] = + AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START + + i * (adev->doorbell_index.sdma_doorbell_range >> 1); + + adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH; + adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START; + + adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP; + adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP; + + adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; +} -- cgit v1.2.3 From cab7d478da112e66f2ad8eec7dcfc0aa2a5babe1 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Wed, 29 Jun 2022 15:45:06 +0530 Subject: drm/amdgpu: Add IP instance map for aqua vanjaram Add XCC logical to physical instance map for aqua vanjaram v2: Keep look up table only for required IPs, for others return default mapping (Felix). Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Le Ma <Le.Ma@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 13 ++++++++ .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 39 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + 3 files changed, 53 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b58d94dc1924..915ae0bcdab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1951,6 +1951,17 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) return 0; } +static void amdgpu_discovery_ip_map_init(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 4, 3): + aqua_vanjaram_ip_map_init(adev); + break; + default: + break; + } +} + int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) { int r; @@ -2128,6 +2139,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; } + amdgpu_discovery_ip_map_init(adev); + switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 2, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 3b97bc922d4a..dbff8220fc19 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -50,3 +50,42 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } + +static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + int8_t inst) +{ + int8_t dev_inst; + + switch (block) { + case GC_HWIP: + dev_inst = adev->ip_map.dev_inst[block][inst]; + break; + default: + /* For rest of the IPs, no look up required. + * Assume 'logical instance == physical instance' for all configs. */ + dev_inst = inst; + break; + } + + return dev_inst; +} + +void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) +{ + int xcc_mask; + int l, i; + + /* Map GC instances */ + l = 0; + xcc_mask = adev->gfx.xcc_mask; + while (xcc_mask) { + i = ffs(xcc_mask) - 1; + adev->ip_map.dev_inst[GC_HWIP][l++] = i; + xcc_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[GC_HWIP][l] = -1; + + adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; +} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 2b41ee968dd1..d8a2a6c2c6e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -111,6 +111,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev); int vega20_reg_base_init(struct amdgpu_device *adev); int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); +void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); -- cgit v1.2.3 From cf24f6a0d7137bc703a23187ffa4a65ed3f17820 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Wed, 29 Jun 2022 16:26:49 +0530 Subject: drm/amdgpu: Add mask for SDMA instances Add a mask of SDMA instances available for use. On certain ASIC configs, not all SDMA instances are available for software use. v2: Change sdma mask type to uint32_t (Le) Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Le Ma <Le.Ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index dbff8220fc19..5a1511a22367 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -37,7 +37,7 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END; adev->doorbell_index.sdma_doorbell_range = 20; - for (i = 0; i < adev->num_aid * adev->sdma.num_inst_per_aid; i++) + for (i = 0; i < adev->sdma.num_instances; i++) adev->doorbell_index.sdma_engine[i] = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START + i * (adev->doorbell_index.sdma_doorbell_range >> 1); -- cgit v1.2.3 From 5c606836eff823ea14c481ad6374bc6d87121182 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Wed, 29 Jun 2022 21:04:39 +0530 Subject: drm/amdgpu: Use SDMA instance table for aqua vanjaram For aqua vanjaram, add mapping for logical to physical instances. v2: Register accesses on bare metal should be based on physical instance. Use GET_INST() to get physical instance. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Le Ma <Le.Ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 5a1511a22367..12379c374457 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -59,6 +59,7 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, switch (block) { case GC_HWIP: + case SDMA0_HWIP: dev_inst = adev->ip_map.dev_inst[block][inst]; break; default: @@ -73,7 +74,7 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) { - int xcc_mask; + int xcc_mask, sdma_mask; int l, i; /* Map GC instances */ @@ -87,5 +88,15 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) for (; l < HWIP_MAX_INSTANCE; l++) adev->ip_map.dev_inst[GC_HWIP][l] = -1; + l = 0; + sdma_mask = adev->sdma.sdma_mask; + while (sdma_mask) { + i = ffs(sdma_mask) - 1; + adev->ip_map.dev_inst[SDMA0_HWIP][l++] = i; + sdma_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[SDMA0_HWIP][l] = -1; + adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; } -- cgit v1.2.3 From 2fa480d36eb302712e48dce4d2f6564b24426be3 Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Tue, 27 Sep 2022 17:51:33 +0800 Subject: drm/amdgpu: add helpers to access registers on different AIDs SMN address which is larger than 32bit has different indications through bit[34:32] on different AIDs. v2: put smn addressing of different AIDs into asic specific place v3: change to ext_id/ext_offset naming Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 19 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + drivers/gpu/drm/amd/amdgpu/soc15_common.h | 10 ++++++++++ 5 files changed, 33 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e17a3697a23f..4c4ce33c8359 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -574,6 +574,8 @@ struct amdgpu_asic_funcs { /* query video codecs */ int (*query_video_codecs)(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs); + /* encode "> 32bits" smn addressing */ + u64 (*encode_ext_smn_addressing)(int ext_id); }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 12379c374457..2616bdb40418 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -100,3 +100,22 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; } + +/* Fixed pattern for smn addressing on different AIDs: + * bit[34]: indicate cross AID access + * bit[33:32]: indicate target AID id + * AID id range is 0 ~ 3 as maximum AID number is 4. + */ +u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id) +{ + u64 ext_offset; + + /* local routing and bit[34:32] will be zeros */ + if (ext_id == 0) + return 0; + + /* Initiated from host, accessing to all non-zero aids are cross traffic */ + ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34); + + return ext_offset; +} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 29d2e08834c7..206013d276c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -873,6 +873,7 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .supports_baco = &soc15_supports_baco, .pre_asic_init = &soc15_pre_asic_init, .query_video_codecs = &soc15_query_video_codecs, + .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing, }; static int soc15_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 9cc2dda087c4..dd48db09aa51 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -112,6 +112,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev); int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); +u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 39e4406da4ae..1c9e924b5f8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -188,4 +188,14 @@ #define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \ __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP) +/* inst equals to ext for some IPs */ +#define RREG32_SOC15_EXT(ip, inst, reg, ext) \ + RREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \ + + adev->asic_funcs->encode_ext_smn_addressing(ext)) \ + +#define WREG32_SOC15_EXT(ip, inst, reg, ext, value) \ + WREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \ + + adev->asic_funcs->encode_ext_smn_addressing(ext), \ + value) \ + #endif -- cgit v1.2.3 From 9cb18287d8f1c4549d95280e2cf60f4d1bab64f8 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Mon, 19 Sep 2022 17:34:02 +0530 Subject: drm/amdgpu: Add SOC partition funcs for GC v9.4.3 Switching the partition mode configuration of ASIC is SOC level function rather than something at GFX core level. Add partition mode switch functions as SOC specific callbacks. Implement the XCP manager callbacks needed for partition switch for GC 9.4.3 based ASICs. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 + .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 232 +++++++++++++++++++++ 2 files changed, 236 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 5adfe4277641..8be4ab50b171 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -278,8 +278,12 @@ struct amdgpu_gfx_funcs { (*query_partition_mode)(struct amdgpu_device *adev); enum amdgpu_memory_partition (*query_mem_partition_mode)(struct amdgpu_device *adev); + int (*switch_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); + + int (*switch_gfx_partition_mode)(struct amdgpu_device *adev, + int num_xccs_per_xcp); }; struct sq_work { diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 2616bdb40418..4264fbd267ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -24,6 +24,12 @@ #include "soc15.h" #include "soc15_common.h" +#include "amdgpu_xcp.h" + +#define XCP_INST_MASK(num_inst, xcp_id) \ + (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) + +#define AMDGPU_XCP_OPS_KFD (1 << 0) void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) { @@ -119,3 +125,229 @@ u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id) return ext_offset; } + +static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + struct amdgpu_device *adev = xcp_mgr->adev; + + if (adev->nbio.funcs->get_compute_partition_mode) + mode = adev->nbio.funcs->get_compute_partition_mode(adev); + + return mode; +} + +int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +{ + int num_xcc, num_xcc_per_xcp = 0; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc; + break; + case AMDGPU_DPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + num_xcc_per_xcp = 1; + break; + } + + return num_xcc_per_xcp; +} + +int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + enum AMDGPU_XCP_IP_BLOCK ip_id, + struct amdgpu_xcp_ip *ip) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp; + int num_sdma, num_vcn; + + num_sdma = adev->sdma.num_instances; + num_vcn = adev->vcn.num_vcn_inst; + + switch (xcp_mgr->mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_sdma_xcp = num_sdma; + num_vcn_xcp = num_vcn; + break; + case AMDGPU_DPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 2; + num_vcn_xcp = num_vcn / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 3; + num_vcn_xcp = num_vcn / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 4; + num_vcn_xcp = num_vcn / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + num_sdma_xcp = 2; + num_vcn_xcp = num_vcn ? 1 : 0; + break; + default: + return -EINVAL; + } + + num_xcc_xcp = adev->gfx.num_xcc_per_xcp; + + switch (ip_id) { + case AMDGPU_XCP_GFXHUB: + ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_GFX: + ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_SDMA: + ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_VCN: + ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + default: + return -EINVAL; + } + + ip->ip_id = ip_id; + + return 0; +} + +static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, + enum amdgpu_gfx_partition mode) +{ + int num_xcc, num_xccs_per_xcp; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + return num_xcc > 0; + case AMDGPU_DPX_PARTITION_MODE: + return (num_xcc % 4) == 0; + case AMDGPU_TPX_PARTITION_MODE: + return (num_xcc % 3) == 0; + case AMDGPU_QPX_PARTITION_MODE: + num_xccs_per_xcp = num_xcc / 4; + return (num_xccs_per_xcp >= 2); + case AMDGPU_CPX_PARTITION_MODE: + return (num_xcc > 1); + default: + return false; + } + + return false; +} + +static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + /* TODO: + * Stop user queues and threads, and make sure GPU is empty of work. + */ + + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); + + return 0; +} + +static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int ret = 0; + + if (flags & AMDGPU_XCP_OPS_KFD) { + amdgpu_amdkfd_device_probe(xcp_mgr->adev); + amdgpu_amdkfd_device_init(xcp_mgr->adev); + /* If KFD init failed, return failure */ + if (!xcp_mgr->adev->kfd.init_complete) + ret = -EIO; + } + + return ret; +} + +static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, int *num_xcps) +{ + int num_xcc_per_xcp, num_xcc, ret; + struct amdgpu_device *adev; + u32 flags = 0; + + adev = xcp_mgr->adev; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + return -EINVAL; + + if (adev->kfd.init_complete) + flags |= AMDGPU_XCP_OPS_KFD; + + if (flags & AMDGPU_XCP_OPS_KFD) { + ret = amdgpu_amdkfd_check_and_lock_kfd(adev); + if (ret) + goto out; + } + + ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags); + if (ret) + goto unlock; + + num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode); + if (adev->gfx.funcs->switch_gfx_partition_mode) + adev->gfx.funcs->switch_gfx_partition_mode(xcp_mgr->adev, + num_xcc_per_xcp); + + if (adev->nbio.funcs->set_compute_partition_mode) + adev->nbio.funcs->set_compute_partition_mode(adev, mode); + + ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); +unlock: + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_unlock_kfd(adev); +out: + return ret; +} + +int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + enum AMDGPU_XCP_IP_BLOCK ip_id, + struct amdgpu_xcp_ip *ip) +{ + if (!ip) + return -EINVAL; + + return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip); +} + +struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { + .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, + .query_partition_mode = &aqua_vanjaram_query_partition_mode, + .get_ip_details = &aqua_vanjaram_get_xcp_ip_details +}; + +static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) +{ + int ret; + + ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1, + &aqua_vanjaram_xcp_funcs); + if (ret) + return ret; + + /* TODO: Default memory node affinity init */ + + return ret; +} -- cgit v1.2.3 From e56c9ef6cb35f33dc83f635419ae55adf69db9fc Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Fri, 23 Sep 2022 14:43:17 +0530 Subject: drm/amdgpu: Add soc config init for GC9.4.3 ASICs Add function to initialize soc configuration information for GC 9.4.3 ASICs. Use it to map IPs and other SOC related information once IP configuration information is available through discovery. For GC9.4.3 compute partition related callbacks are initialized as part of configuration init. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b602df4b445a..e6d10a3f1753 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1983,11 +1983,11 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) return 0; } -static void amdgpu_discovery_ip_map_init(struct amdgpu_device *adev) +static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) { switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 3): - aqua_vanjaram_ip_map_init(adev); + aqua_vanjaram_init_soc_config(adev); break; default: break; @@ -2171,7 +2171,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) break; } - amdgpu_discovery_ip_map_init(adev); + amdgpu_discovery_init_soc_config(adev); switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 4264fbd267ae..a9de229a2828 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -351,3 +351,16 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) return ret; } + +int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) +{ + int ret; + + ret = aqua_vanjaram_xcp_mgr_init(adev); + if (ret) + return ret; + + aqua_vanjaram_ip_map_init(adev); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index dd48db09aa51..eac54042c6c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -113,6 +113,7 @@ int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); +int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); -- cgit v1.2.3 From 8e7fd19380f9187dae3ad18a61793b1752dfa097 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Wed, 16 Nov 2022 17:15:47 +0530 Subject: drm/amdgpu: Switch to SOC partition funcs For GFXv9.4.3, use SOC level partition switch implementation rather than keeping them at GFX IP level. Change the exisiting implementation in GFX IP for keeping partition mode and restrict it to only GFX related switch. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 31 ++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 5 -- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 59 ++++------------------ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 +-- 5 files changed, 20 insertions(+), 87 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 5ff49737d7c6..f895a4b8ca0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -28,6 +28,7 @@ #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" #include "amdgpu_ras.h" +#include "amdgpu_xcp.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -1170,10 +1171,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - enum amdgpu_gfx_partition mode; + int mode; char *partition_mode; - mode = adev->gfx.funcs->query_partition_mode(adev); + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: @@ -1254,31 +1255,7 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return -EINVAL; } - if (!adev->kfd.init_complete) - return -EPERM; - - mutex_lock(&adev->gfx.partition_mutex); - - if (mode == adev->gfx.funcs->query_partition_mode(adev)) - goto out; - - ret = amdgpu_amdkfd_check_and_lock_kfd(adev); - if (ret) - goto out; - - amdgpu_amdkfd_device_fini_sw(adev); - - adev->gfx.funcs->switch_partition_mode(adev, mode); - - amdgpu_amdkfd_device_probe(adev); - amdgpu_amdkfd_device_init(adev); - /* If KFD init failed, return failure */ - if (!adev->kfd.init_complete) - ret = -EIO; - - amdgpu_amdkfd_unlock_kfd(adev); -out: - mutex_unlock(&adev->gfx.partition_mutex); + ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8be4ab50b171..2287768ed141 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -278,11 +278,7 @@ struct amdgpu_gfx_funcs { (*query_partition_mode)(struct amdgpu_device *adev); enum amdgpu_memory_partition (*query_mem_partition_mode)(struct amdgpu_device *adev); - int (*switch_partition_mode)(struct amdgpu_device *adev, - enum amdgpu_gfx_partition mode); - - int (*switch_gfx_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); }; @@ -416,7 +412,6 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ - enum amdgpu_gfx_partition partition_mode; uint16_t xcc_mask; enum amdgpu_memory_partition mem_partition_mode; uint32_t num_xcc_per_xcp; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index a9de229a2828..bbcdececfd2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -307,8 +307,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, goto unlock; num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode); - if (adev->gfx.funcs->switch_gfx_partition_mode) - adev->gfx.funcs->switch_gfx_partition_mode(xcp_mgr->adev, + if (adev->gfx.funcs->switch_partition_mode) + adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev, num_xcc_per_xcp); if (adev->nbio.funcs->set_compute_partition_mode) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 93a0baa4515c..d684037a7a5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -38,6 +38,7 @@ #include "gc/gc_9_4_3_sh_mask.h" #include "gfx_v9_4_3.h" +#include "amdgpu_xcp.h" MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); @@ -614,61 +615,23 @@ gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev) return mode; } -static enum amdgpu_gfx_partition -gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) -{ - enum amdgpu_gfx_partition mode = adev->gfx.partition_mode; - - if (adev->nbio.funcs->get_compute_partition_mode) - mode = adev->nbio.funcs->get_compute_partition_mode(adev); - - return mode; -} - static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, - enum amdgpu_gfx_partition mode) + int num_xccs_per_xcp) { + int i, num_xcc; u32 tmp = 0; - int num_xcc_per_partition, i, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - switch (mode) { - case AMDGPU_SPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc; - break; - case AMDGPU_DPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 2; - break; - case AMDGPU_TPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 3; - break; - case AMDGPU_QPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 4; - break; - case AMDGPU_CPX_PARTITION_MODE: - num_xcc_per_partition = 1; - break; - default: - return -EINVAL; - } - - /* TODO: - * Stop user queues and threads, and make sure GPU is empty of work. - */ for (i = 0; i < num_xcc; i++) { tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, - num_xcc_per_partition); + num_xccs_per_xcp); tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, - i % num_xcc_per_partition); + i % num_xccs_per_xcp); WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp); } - if (adev->nbio.funcs->set_compute_partition_mode) - adev->nbio.funcs->set_compute_partition_mode(adev, mode); - - adev->gfx.num_xcc_per_xcp = num_xcc_per_partition; - adev->gfx.partition_mode = mode; + adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; return 0; } @@ -680,7 +643,6 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, - .query_partition_mode = &gfx_v9_4_3_query_compute_partition, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, }; @@ -1899,10 +1861,6 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) return r; } - if (adev->gfx.partition_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - gfx_v9_4_3_switch_compute_partition(adev, - amdgpu_user_partt_mode); - /* set the virtual and physical id based on partition_mode */ gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id); @@ -1931,6 +1889,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { int r, i, num_xcc; + if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode); + num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { r = gfx_v9_4_3_xcc_cp_resume(adev, i); @@ -2146,8 +2107,6 @@ static int gfx_v9_4_3_early_init(void *handle) num_xcc = NUM_XCC(adev->gfx.xcc_mask); - adev->gfx.partition_mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; - adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_4_3_set_kiq_pm4_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 293787290e36..7a963d0a34e2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -34,6 +34,7 @@ #include "kfd_smi_events.h" #include "kfd_migrate.h" #include "amdgpu.h" +#include "amdgpu_xcp.h" #define MQD_SIZE_ALIGNED 768 @@ -592,7 +593,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; - int num_xcd; + int num_xcd, partition_mode; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -644,8 +645,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * If the VMID range changes for GFX9.4.3, then this code MUST be * revisited. */ + partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr); if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { vmid_num_kfd /= 2; first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; @@ -761,7 +763,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->start_xcc_id = node->num_xcc_per_node * i; if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { /* For GFX9.4.3 and CPX mode, first XCD gets VMID range * 4-9 and second XCD gets VMID range 10-15. -- cgit v1.2.3 From 845c9b313f3122191c847fcc4092492ce039542a Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Fri, 23 Sep 2022 17:15:15 +0530 Subject: drm/amdgpu: Add XCP IP callback funcs for each IP Initialize with the IP specific functions needed for GFXHUB, GFX and SDMA. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 9 ++++++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h | 2 ++ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h | 2 ++ drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h | 2 ++ 4 files changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index bbcdececfd2f..2cbac0bccd80 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -25,6 +25,9 @@ #include "soc15_common.h" #include "amdgpu_xcp.h" +#include "gfx_v9_4_3.h" +#include "gfxhub_v1_2.h" +#include "sdma_v4_4_2.h" #define XCP_INST_MASK(num_inst, xcp_id) \ (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) @@ -205,15 +208,15 @@ int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, switch (ip_id) { case AMDGPU_XCP_GFXHUB: ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); - /* TODO : Assign IP funcs */ + ip->ip_funcs = &gfxhub_v1_2_xcp_funcs; break; case AMDGPU_XCP_GFX: ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); - /* TODO : Assign IP funcs */ + ip->ip_funcs = &gfx_v9_4_3_xcp_funcs; break; case AMDGPU_XCP_SDMA: ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id); - /* TODO : Assign IP funcs */ + ip->ip_funcs = &sdma_v4_4_2_xcp_funcs; break; case AMDGPU_XCP_VCN: ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h index 4b530f4c1295..42d67ee0e7ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h @@ -26,4 +26,6 @@ extern const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block; +extern struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs; + #endif /* __GFX_V9_4_3_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h index e2d508f5a7ee..997e9f90c990 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h @@ -26,4 +26,6 @@ extern const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs; +extern struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs; + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h index 4814e8a074d6..d516145529bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h @@ -27,4 +27,6 @@ extern const struct amd_ip_funcs sdma_v4_4_2_ip_funcs; extern const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block; +extern struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs; + #endif -- cgit v1.2.3 From dc6df2095deaaefe38a94d62a51b0d07c0794eaf Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Mon, 28 Nov 2022 09:57:51 +0530 Subject: drm/amdgpu: Move generic logic to soc config Move soc specific configuration details to aqua vanjaram specific file. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 2cbac0bccd80..a6204b588829 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -359,6 +359,15 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { int ret; + /* generally 1 AID supports 4 instances */ + adev->sdma.num_inst_per_aid = 4; + adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask); + + adev->vcn.num_inst_per_aid = 1; + adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * adev->num_aid; + adev->jpeg.num_inst_per_aid = 1; + adev->jpeg.num_jpeg_inst = adev->jpeg.num_inst_per_aid * adev->num_aid; + ret = aqua_vanjaram_xcp_mgr_init(adev); if (ret) return ret; -- cgit v1.2.3 From bbca579fd2ea8cbc170df33587f8a4b572a4f025 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Mon, 28 Nov 2022 11:17:15 +0530 Subject: drm/amdgpu: Derive active clusters from SDMA SDMA instances per active cluster and SDMA instance mask are used to find the number of active clusters. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index a6204b588829..6f7226b5d446 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -357,12 +357,22 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { + u32 inst_mask = adev->sdma.sdma_mask; int ret; /* generally 1 AID supports 4 instances */ adev->sdma.num_inst_per_aid = 4; adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask); + adev->num_aid = 1; + inst_mask >>= adev->sdma.num_inst_per_aid; + + for (const u32 mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; + inst_mask >>= adev->sdma.num_inst_per_aid) { + if ((inst_mask & mask) == mask) + adev->num_aid++; + } + adev->vcn.num_inst_per_aid = 1; adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * adev->num_aid; adev->jpeg.num_inst_per_aid = 1; -- cgit v1.2.3 From 7a1efad04c210594069c4ab9f9c25039cd6915e4 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Tue, 29 Nov 2022 14:00:37 +0530 Subject: drm/amdgpu: Use mask for active clusters Use a mask of available active clusters instead of using only the number of active clusters. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 13 ++--- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 56 ++++++++++++++-------- 4 files changed, 49 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3858d29baef1..279057ec7a0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1070,7 +1070,8 @@ struct amdgpu_device { bool job_hang; bool dc_enabled; - uint32_t num_aid; + /* Mask of active clusters */ + uint32_t aid_mask; }; static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 6f7226b5d446..0d7bc212def1 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -358,25 +358,26 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { u32 inst_mask = adev->sdma.sdma_mask; - int ret; + int ret, i, num_inst; /* generally 1 AID supports 4 instances */ adev->sdma.num_inst_per_aid = 4; adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask); - adev->num_aid = 1; + adev->aid_mask = i = 1; inst_mask >>= adev->sdma.num_inst_per_aid; for (const u32 mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; - inst_mask >>= adev->sdma.num_inst_per_aid) { + inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { if ((inst_mask & mask) == mask) - adev->num_aid++; + adev->aid_mask |= (1 << i); } + num_inst = hweight32(adev->aid_mask); adev->vcn.num_inst_per_aid = 1; - adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * adev->num_aid; + adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * num_inst; adev->jpeg.num_inst_per_aid = 1; - adev->jpeg.num_jpeg_inst = adev->jpeg.num_inst_per_aid * adev->num_aid; + adev->jpeg.num_jpeg_inst = adev->jpeg.num_inst_per_aid * num_inst; ret = aqua_vanjaram_xcp_mgr_init(adev); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0792c48fe347..b3f64f2f306d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1674,6 +1674,7 @@ static int gmc_v9_0_sw_init(void *handle) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned long inst_mask = adev->aid_mask; adev->gfxhub.funcs->init(adev); @@ -1757,7 +1758,9 @@ static int gmc_v9_0_sw_init(void *handle) case IP_VERSION(9, 4, 3): bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), NUM_XCC(adev->gfx.xcc_mask)); - bitmap_set(adev->vmhubs_mask, AMDGPU_MMHUB0(0), adev->num_aid); + + inst_mask <<= AMDGPU_MMHUB0(0); + bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 6f469b9aa9a0..a530e2a3cc28 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -54,9 +54,11 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi uint64_t page_table_base) { struct amdgpu_vmhub *hub; + u32 inst_mask; int i; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; WREG32_SOC15_OFFSET(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -73,6 +75,7 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) { uint64_t pt_base; + u32 inst_mask; int i; if (adev->gmc.pdb0_bo) @@ -85,7 +88,8 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) /* If use GART for FB translation, vmid0 page table covers both * vram and system memory (gart) */ - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { if (adev->gmc.pdb0_bo) { WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -121,11 +125,12 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) { + uint32_t tmp, inst_mask; uint64_t value; - uint32_t tmp; int i; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { /* Program the AGP BAR */ WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0); WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, @@ -183,11 +188,12 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, inst_mask; int i; /* Setup TLB control */ - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, @@ -208,14 +214,15 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, inst_mask; int i; if (amdgpu_sriov_vf(adev)) return; /* Setup L2 cache */ - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, @@ -267,10 +274,11 @@ static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, inst_mask; int i; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, @@ -286,12 +294,14 @@ static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) { + u32 inst_mask; int i; if (amdgpu_sriov_vf(adev)) return; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { WREG32_SOC15(MMHUB, i, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0XFFFFFFFF); @@ -317,7 +327,7 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; unsigned num_level, block_size; - uint32_t tmp; + uint32_t tmp, inst_mask; int i, j; num_level = adev->vm_manager.num_level; @@ -327,7 +337,8 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) else block_size -= 9; - for (j = 0; j < adev->num_aid; j++) { + inst_mask = adev->aid_mask; + for_each_inst(j, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, @@ -382,9 +393,10 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - unsigned i, j; + u32 i, j, inst_mask; - for (j = 0; j < adev->num_aid; j++) { + inst_mask = adev->aid_mask; + for_each_inst(j, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; for (i = 0; i < 18; ++i) { WREG32_SOC15_OFFSET(MMHUB, j, @@ -429,10 +441,11 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; u32 tmp; - u32 i, j; + u32 i, j, inst_mask; /* Disable all tables */ - for (j = 0; j < adev->num_aid; j++) { + inst_mask = adev->aid_mask; + for_each_inst(j, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; for (i = 0; i < 16; i++) WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL, @@ -465,13 +478,14 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) */ static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value) { - u32 tmp; + u32 tmp, inst_mask; int i; if (amdgpu_sriov_vf(adev)) return; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -510,9 +524,11 @@ static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_8_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; + u32 inst_mask; int i; - for (i = 0; i < adev->num_aid; i++) { + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i, -- cgit v1.2.3 From 13a94f3f130ee4db6b4d2a0843104807a7299aa4 Mon Sep 17 00:00:00 2001 From: James Zhu <James.Zhu@amd.com> Date: Tue, 10 Jan 2023 09:05:35 -0500 Subject: drm/amdgpu: add num_xcps return Add num_xcps return. Signed-off-by: James Zhu <James.Zhu@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 0d7bc212def1..6591d39c6518 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -318,6 +318,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev->nbio.funcs->set_compute_partition_mode(adev, mode); ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); + + *num_xcps = num_xcc / num_xcc_per_xcp; unlock: if (flags & AMDGPU_XCP_OPS_KFD) amdgpu_amdkfd_unlock_kfd(adev); -- cgit v1.2.3 From 233bb3733bd43966696f4a5e95129476e86bf4e3 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Thu, 19 Jan 2023 14:47:22 +0530 Subject: drm/amdgpu: Use unique doorbell range per xcc Program different ranges in each XCC with MEC_DOORBELL_RANGE_LOWER/HIGHER. Keeping the same range causes CPF in other XCCs also to be busy when an IB packet is submitted to KCQ. Only the XCC which processes the packet comes back to idle afterwards and this causes other CPs not be idle. This in turn affects clockgating behavior as RLC doesn't get idle interrupt. LOWER/HIGHER covers only KIQ/KCQs which are per XCC queues. Assigning different ranges doesn't seem to have any side effect as user queue ranges are outside of this range. User queue tests - PM4 through KFD and AQL through rocr - have the same results after this change. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | 35 ++++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 5 +++- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 31 +++++++++++++------ 4 files changed, 47 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index 3c3ae2b4dbc8..f637574644c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -86,6 +86,8 @@ struct amdgpu_doorbell_index { uint32_t max_assignment; /* Per engine SDMA doorbell size in dword */ uint32_t sdma_doorbell_range; + /* Per xcc doorbell size for KIQ/KCQ */ + uint32_t xcc_doorbell_range; }; typedef enum _AMDGPU_DOORBELL_ASSIGNMENT @@ -309,28 +311,31 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT AMDGPU_DOORBELL64_INVALID = 0xFFFF } AMDGPU_DOORBELL64_ASSIGNMENT; -typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 -{ - /* KIQ: 0~7 for maximum 8 XCD */ - AMDGPU_DOORBELL_LAYOUT1_KIQ_START = 0x000, - AMDGPU_DOORBELL_LAYOUT1_HIQ = 0x008, - AMDGPU_DOORBELL_LAYOUT1_DIQ = 0x009, - /* Compute: 0x0A ~ 0x49 */ - AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START = 0x00A, - AMDGPU_DOORBELL_LAYOUT1_MEC_RING_END = 0x049, - AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START = 0x04A, - AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END = 0x0C9, +typedef enum _AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 { + /* XCC0: 0x00 ~20, XCC1: 20 ~ 2F ... */ + + /* KIQ/HIQ/DIQ */ + AMDGPU_DOORBELL_LAYOUT1_KIQ_START = 0x000, + AMDGPU_DOORBELL_LAYOUT1_HIQ = 0x001, + AMDGPU_DOORBELL_LAYOUT1_DIQ = 0x002, + /* Compute: 0x08 ~ 0x20 */ + AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START = 0x008, + AMDGPU_DOORBELL_LAYOUT1_MEC_RING_END = 0x00F, + AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START = 0x010, + AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END = 0x01F, + AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE = 0x020, + /* SDMA: 0x100 ~ 0x19F */ - AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START = 0x100, - AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F, + AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START = 0x100, + AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F, /* IH: 0x1A0 ~ 0x1AF */ AMDGPU_DOORBELL_LAYOUT1_IH = 0x1A0, /* VCN: 0x1B0 ~ 0x1D4 */ AMDGPU_DOORBELL_LAYOUT1_VCN_START = 0x1B0, AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1D4, - AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START, - AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END, + AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START, + AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END, AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1D4, AMDGPU_DOORBELL_LAYOUT1_INVALID = 0xFFFF diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f895a4b8ca0d..70c6099353b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -316,7 +316,10 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->use_doorbell = true; ring->xcc_id = xcc_id; ring->vm_hub = AMDGPU_GFXHUB(xcc_id); - ring->doorbell_index = (adev->doorbell_index.kiq + xcc_id) << 1; + ring->doorbell_index = + (adev->doorbell_index.kiq + + xcc_id * adev->doorbell_index.xcc_doorbell_range) + << 1; r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 6591d39c6518..55a6ebb940ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -44,6 +44,7 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START; adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END; + adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE; adev->doorbell_index.sdma_doorbell_range = 20; for (i = 0; i < adev->sdma.num_instances; i++) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 55d99c4ea48c..557a2458ef5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -729,8 +729,10 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, unsigned irq_type; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; unsigned int hw_prio; + uint32_t xcc_doorbell_start; - ring = &adev->gfx.compute_ring[ring_id]; + ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings + + ring_id]; /* mec0 is me1 */ ring->xcc_id = xcc_id; @@ -740,9 +742,12 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->ring_obj = NULL; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; - ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr - + (ring_id * GFX9_MEC_HPD_SIZE); + xcc_doorbell_start = adev->doorbell_index.mec_ring0 + + xcc_id * adev->doorbell_index.xcc_doorbell_range; + ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1; + ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + + (ring_id + xcc_id * adev->gfx.num_compute_rings) * + GFX9_MEC_HPD_SIZE; ring->vm_hub = AMDGPU_GFXHUB(xcc_id); sprintf(ring->name, "comp_%d.%d.%d.%d", ring->xcc_id, ring->me, ring->pipe, ring->queue); @@ -801,8 +806,8 @@ static int gfx_v9_4_3_sw_init(void *handle) } /* set up the compute queues - allocate horizontally across pipes */ - ring_id = 0; for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { + ring_id = 0; for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; @@ -1654,10 +1659,18 @@ static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, /* enable the doorbell if requested */ if (ring->use_doorbell) { - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_LOWER, - (adev->doorbell_index.kiq * 2) << 2); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEC_DOORBELL_RANGE_UPPER, - (adev->doorbell_index.userqueue_end * 2) << 2); + WREG32_SOC15( + GC, GET_INST(GC, xcc_id), + regCP_MEC_DOORBELL_RANGE_LOWER, + ((adev->doorbell_index.kiq + + xcc_id * adev->doorbell_index.xcc_doorbell_range) * + 2) << 2); + WREG32_SOC15( + GC, GET_INST(GC, xcc_id), + regCP_MEC_DOORBELL_RANGE_UPPER, + ((adev->doorbell_index.userqueue_end + + xcc_id * adev->doorbell_index.xcc_doorbell_range) * + 2) << 2); } WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, -- cgit v1.2.3 From 5ca1ceebab140b36928dabc9a5b36b9a3010e844 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com> Date: Sat, 21 Jan 2023 15:47:11 -0500 Subject: drm/amd: fix compiler error to support older compilers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ‘for’ loop initial declarations are only allowed in C99 or C11 mode Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com> Reviewed-by: Mukul Joshi <mukul.joshi@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 55a6ebb940ba..fdc728f678d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -360,7 +360,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { - u32 inst_mask = adev->sdma.sdma_mask; + u32 mask, inst_mask = adev->sdma.sdma_mask; int ret, i, num_inst; /* generally 1 AID supports 4 instances */ @@ -370,7 +370,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) adev->aid_mask = i = 1; inst_mask >>= adev->sdma.num_inst_per_aid; - for (const u32 mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; + for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { if ((inst_mask & mask) == mask) adev->aid_mask |= (1 << i); -- cgit v1.2.3 From 07bc0ac8ff49c9868a66526634fbc21cb194afca Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Fri, 17 Feb 2023 18:44:20 +0530 Subject: drm/amdgpu: Add VCN logical to physical id mapping Add mappings for logical to physical id for VCN/JPEG 4.0.3 v2: make local function static (Alex) Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Acked-by: Leo Liu <leo.liu@amd.com> Tested-by: James Zhu <James.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index fdc728f678d7..90fe77db9bee 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -82,6 +82,21 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, return dev_inst; } +static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, + enum amd_hw_ip_block_type ip_block, + uint32_t inst_mask) +{ + int l = 0, i; + + while (inst_mask) { + i = ffs(inst_mask) - 1; + adev->ip_map.dev_inst[ip_block][l++] = i; + inst_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[ip_block][l] = -1; +} + void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) { int xcc_mask, sdma_mask; @@ -108,6 +123,9 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) for (; l < HWIP_MAX_INSTANCE; l++) adev->ip_map.dev_inst[SDMA0_HWIP][l] = -1; + /* This covers both VCN and JPEG, JPEG is only alias of VCN */ + aqua_vanjaram_populate_ip_map(adev, VCN_HWIP, adev->vcn.inst_mask); + adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; } -- cgit v1.2.3 From fd91d38b5275959a5b0804d4b4dbc5a4c0a8aac9 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Fri, 17 Feb 2023 19:41:06 +0530 Subject: drm/amdgpu: Use logical ids for VCN/JPEG v4.0.3 Address VCN/JPEG instances using logical ids. Whenever register access is required, get the physical instance using GET_INST. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Acked-by: Leo Liu <leo.liu@amd.com> Tested-by: James Zhu <James.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 29 +- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 14 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 251 +++++++----- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 431 +++++++++++---------- 4 files changed, 408 insertions(+), 317 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 47463ef10fce..1eb9ccd1d83d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -141,18 +141,23 @@ RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \ }) -#define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ - do { \ - if (!indirect) { \ - WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \ - WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ - (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ - mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ - offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ - } else { \ - *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \ - *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \ - } \ +#define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ + do { \ + if (!indirect) { \ + WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \ + mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15( \ + VCN, GET_INST(VCN, inst_idx), \ + mmUVD_DPG_LMA_CTL, \ + (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ + } else { \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ + offset; \ + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ + value; \ + } \ } while (0) #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 90fe77db9bee..51d3cb81e37a 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -70,6 +70,8 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, switch (block) { case GC_HWIP: case SDMA0_HWIP: + /* Both JPEG and VCN as JPEG is only alias of VCN */ + case VCN_HWIP: dev_inst = adev->ip_map.dev_inst[block][inst]; break; default: @@ -379,7 +381,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { u32 mask, inst_mask = adev->sdma.sdma_mask; - int ret, i, num_inst; + int ret, i; /* generally 1 AID supports 4 instances */ adev->sdma.num_inst_per_aid = 4; @@ -394,11 +396,15 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) adev->aid_mask |= (1 << i); } - num_inst = hweight32(adev->aid_mask); + /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be + * addressed based on logical instance ids. + */ + adev->vcn.harvest_config = 0; adev->vcn.num_inst_per_aid = 1; - adev->vcn.num_vcn_inst = adev->vcn.num_inst_per_aid * num_inst; + adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask); + adev->jpeg.harvest_config = 0; adev->jpeg.num_inst_per_aid = 1; - adev->jpeg.num_jpeg_inst = adev->jpeg.num_inst_per_aid * num_inst; + adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask); ret = aqua_vanjaram_xcp_mgr_init(adev); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index aa14a6619e9a..c0e90e27f24b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -82,7 +82,7 @@ static int jpeg_v4_0_3_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, j, r; + int i, j, r, jpeg_inst; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { /* JPEG TRAP */ @@ -101,14 +101,15 @@ static int jpeg_v4_0_3_sw_init(void *handle) return r; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; + jpeg_inst = GET_INST(JPEG, i); + for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; ring->use_doorbell = true; ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id); ring->doorbell_index = - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + j + 9 * i; + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 1 + j + 9 * jpeg_inst; sprintf(ring->name, "jpeg_dec_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -118,8 +119,10 @@ static int jpeg_v4_0_3_sw_init(void *handle) adev->jpeg.internal.jpeg_pitch[j] = regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET; adev->jpeg.inst[i].external.jpeg_pitch[j] = - SOC15_REG_OFFSET1(JPEG, i, regUVD_JRBC0_UVD_JRBC_SCRATCH0, - (j?(0x40 * j - 0xc80):0)); + SOC15_REG_OFFSET1( + JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_SCRATCH0, + (j ? (0x40 * j - 0xc80) : 0)); } } @@ -157,25 +160,30 @@ static int jpeg_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, j, r; + int i, j, r, jpeg_inst; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; + jpeg_inst = GET_INST(JPEG, i); + ring = adev->jpeg.inst[i].ring_dec; if (ring->use_doorbell) - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i, + adev->nbio.funcs->vcn_doorbell_range( + adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * jpeg_inst, adev->jpeg.inst[i].aid_id); for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; if (ring->use_doorbell) - WREG32_SOC15_OFFSET(VCN, i, regVCN_JPEG_DB_CTRL, - (ring->pipe?(ring->pipe - 0x15):0), - ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | - VCN_JPEG_DB_CTRL__EN_MASK); + WREG32_SOC15_OFFSET( + VCN, GET_INST(VCN, i), + regVCN_JPEG_DB_CTRL, + (ring->pipe ? (ring->pipe - 0x15) : 0), + ring->doorbell_index + << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | + VCN_JPEG_DB_CTRL__EN_MASK); r = amdgpu_ring_test_helper(ring); if (r) return r; @@ -250,10 +258,11 @@ static int jpeg_v4_0_3_resume(void *handle) static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx) { + int i, jpeg_inst; uint32_t data; - int i; - data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL); + jpeg_inst = GET_INST(JPEG, inst_idx); + data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data &= (~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1)); @@ -263,21 +272,22 @@ static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int ins data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL, data); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data); - data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE); + data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE); data &= ~(JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK << i); - WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE, data); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data); } static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx) { + int i, jpeg_inst; uint32_t data; - int i; - data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL); + jpeg_inst = GET_INST(JPEG, inst_idx); + data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) { data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= (JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1); @@ -287,13 +297,13 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_CTRL, data); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data); - data = RREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE); + data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE); data |= (JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK); for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i) data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK << i); - WREG32_SOC15(JPEG, inst_idx, regJPEG_CGC_GATE, data); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data); } /** @@ -306,34 +316,36 @@ static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst static int jpeg_v4_0_3_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int i, j; + int i, j, jpeg_inst; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; - WREG32_SOC15(JPEG, i, regUVD_PGFSM_CONFIG, - 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); - SOC15_WAIT_ON_RREG(JPEG, i, regUVD_PGFSM_STATUS, - UVD_PGFSM_STATUS__UVDJ_PWR_ON << - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, + jpeg_inst = GET_INST(JPEG, i); + + WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG, + 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG( + JPEG, jpeg_inst, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDJ_PWR_ON + << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, + regUVD_JPEG_POWER_STATUS), + 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); /* JPEG disable CGC */ jpeg_v4_0_3_disable_clock_gating(adev, i); /* MJPEG global tiling registers */ - WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); @@ -341,25 +353,40 @@ static int jpeg_v4_0_3_start(struct amdgpu_device *adev) ring = &adev->jpeg.inst[i].ring_dec[j]; /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC0_MASK << j, - ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j)); - - WREG32_SOC15_OFFSET(JPEG, i, - regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, - (0x00000001L | 0x00000002L)); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, + regJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC0_MASK << j, + ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j)); + + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JMI0_UVD_LMI_JRBC_RB_VMID, + reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_CNTL, + reg_offset, + (0x00000001L | 0x00000002L)); + WREG32_SOC15_OFFSET( + JPEG, jpeg_inst, + regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW, reg_offset, lower_32_bits(ring->gpu_addr)); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + WREG32_SOC15_OFFSET( + JPEG, jpeg_inst, + regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH, reg_offset, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_RPTR, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_WPTR, reg_offset, 0); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_CNTL, reg_offset, - 0x00000002L); - WREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_SIZE, reg_offset, - ring->ring_size / 4); - ring->wptr = RREG32_SOC15_OFFSET(JPEG, i, regUVD_JRBC0_UVD_JRBC_RB_WPTR, + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_RPTR, + reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_WPTR, + reg_offset, 0); + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_CNTL, + reg_offset, 0x00000002L); + WREG32_SOC15_OFFSET(JPEG, jpeg_inst, + regUVD_JRBC0_UVD_JRBC_RB_SIZE, + reg_offset, ring->ring_size / 4); + ring->wptr = RREG32_SOC15_OFFSET( + JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR, reg_offset); } } @@ -376,29 +403,29 @@ static int jpeg_v4_0_3_start(struct amdgpu_device *adev) */ static int jpeg_v4_0_3_stop(struct amdgpu_device *adev) { - int i; + int i, jpeg_inst; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; - + jpeg_inst = GET_INST(JPEG, i); /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); jpeg_v4_0_3_enable_clock_gating(adev, i); /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_POWER_STATUS), - UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - - WREG32_SOC15(JPEG, i, regUVD_PGFSM_CONFIG, - 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); - SOC15_WAIT_ON_RREG(JPEG, i, regUVD_PGFSM_STATUS, - UVD_PGFSM_STATUS__UVDJ_PWR_OFF << - UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, + WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, + regUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG, + 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT); + SOC15_WAIT_ON_RREG( + JPEG, jpeg_inst, regUVD_PGFSM_STATUS, + UVD_PGFSM_STATUS__UVDJ_PWR_OFF + << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT, UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); } @@ -416,8 +443,9 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_RPTR, - ring->pipe?(0x40 * ring->pipe - 0xc80):0); + return RREG32_SOC15_OFFSET( + JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR, + ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0); } /** @@ -434,8 +462,10 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_WPTR, - ring->pipe?(0x40 * ring->pipe - 0xc80):0); + return RREG32_SOC15_OFFSET( + JPEG, GET_INST(JPEG, ring->me), + regUVD_JRBC0_UVD_JRBC_RB_WPTR, + ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0); } /** @@ -453,8 +483,11 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring) adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15_OFFSET(JPEG, ring->me, regUVD_JRBC0_UVD_JRBC_RB_WPTR, - (ring->pipe?(0x40 * ring->pipe - 0xc80):0), lower_32_bits(ring->wptr)); + WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), + regUVD_JRBC0_UVD_JRBC_RB_WPTR, + (ring->pipe ? (0x40 * ring->pipe - 0xc80) : + 0), + lower_32_bits(ring->wptr)); } } @@ -703,15 +736,15 @@ static bool jpeg_v4_0_3_is_idle(void *handle) int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); - ret &= ((RREG32_SOC15_OFFSET(JPEG, i, - regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset) & - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == - UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); + ret &= ((RREG32_SOC15_OFFSET( + JPEG, GET_INST(JPEG, i), + regUVD_JRBC0_UVD_JRBC_STATUS, + reg_offset) & + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == + UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); } } @@ -725,12 +758,11 @@ static int jpeg_v4_0_3_wait_for_idle(void *handle) int i, j; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { unsigned int reg_offset = (j?(0x40 * j - 0xc80):0); - ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, i, + ret &= SOC15_WAIT_ON_RREG_OFFSET( + JPEG, GET_INST(JPEG, i), regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset, UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK, UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK); @@ -747,8 +779,6 @@ static int jpeg_v4_0_3_set_clockgating_state(void *handle, int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; if (enable) { if (!jpeg_v4_0_3_is_idle(handle)) return -EBUSY; @@ -792,35 +822,46 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t i; + uint32_t i, inst; i = node_id_to_phys_map[entry->node_id]; DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n"); + for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst) + if (adev->jpeg.inst[inst].aid_id == i) + break; + + if (inst >= adev->jpeg.num_jpeg_inst) { + dev_WARN_ONCE(adev->dev, 1, + "Interrupt received for unknown JPEG instance %d", + entry->node_id); + return 0; + } + switch (entry->src_id) { case VCN_4_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[0]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]); break; case VCN_4_0__SRCID__JPEG1_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[1]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]); break; case VCN_4_0__SRCID__JPEG2_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[2]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]); break; case VCN_4_0__SRCID__JPEG3_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[3]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]); break; case VCN_4_0__SRCID__JPEG4_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[4]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]); break; case VCN_4_0__SRCID__JPEG5_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[5]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]); break; case VCN_4_0__SRCID__JPEG6_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[6]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]); break; case VCN_4_0__SRCID__JPEG7_DECODE: - amdgpu_fence_process(&adev->jpeg.inst[i].ring_dec[7]); + amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", @@ -882,17 +923,17 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev) { - int i, j; + int i, j, jpeg_inst; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs; adev->jpeg.inst[i].ring_dec[j].me = i; adev->jpeg.inst[i].ring_dec[j].pipe = j; } - adev->jpeg.inst[i].aid_id = i / adev->jpeg.num_inst_per_aid; + jpeg_inst = GET_INST(JPEG, i); + adev->jpeg.inst[i].aid_id = + jpeg_inst / adev->jpeg.num_inst_per_aid; } DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n"); } @@ -907,8 +948,6 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { - if (adev->jpeg.harvest_config & (1 << i)) - continue; adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; } adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 308dfe80a87c..49b07843efd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -83,7 +83,7 @@ static int vcn_v4_0_3_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, r; + int i, r, vcn_inst; r = amdgpu_vcn_sw_init(adev); if (r) @@ -104,12 +104,13 @@ static int vcn_v4_0_3_sw_init(void *handle) for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; - if (adev->vcn.harvest_config & (1 << i)) - continue; + vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i; + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * vcn_inst; ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); sprintf(ring->name, "vcn_unified_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -148,8 +149,6 @@ static int vcn_v4_0_3_sw_fini(void *handle) for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; - if (adev->vcn.harvest_config & (1 << i)) - continue; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; fw_shared->sq.is_enabled = cpu_to_le32(false); @@ -177,21 +176,25 @@ static int vcn_v4_0_3_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i, r; + int i, r, vcn_inst; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; + vcn_inst = GET_INST(VCN, i); ring = &adev->vcn.inst[i].ring_enc[0]; if (ring->use_doorbell) { - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, - (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i, - adev->vcn.inst[i].aid_id); - - WREG32_SOC15(VCN, ring->me, regVCN_RB1_DB_CTRL, - ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | - VCN_RB1_DB_CTRL__EN_MASK); + adev->nbio.funcs->vcn_doorbell_range( + adev, ring->use_doorbell, + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 9 * vcn_inst, + adev->vcn.inst[i].aid_id); + + WREG32_SOC15( + VCN, GET_INST(VCN, ring->me), + regVCN_RB1_DB_CTRL, + ring->doorbell_index + << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); } r = amdgpu_ring_test_helper(ring); @@ -278,54 +281,67 @@ static int vcn_v4_0_3_resume(void *handle) */ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx) { - uint32_t offset, size; + uint32_t offset, size, vcn_inst; const struct common_firmware_header *hdr; hdr = (const struct common_firmware_header *)adev->vcn.fw->data; size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + vcn_inst = GET_INST(VCN, inst_idx); /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi)); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0, 0); + WREG32_SOC15( + VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx] + .tmr_mc_addr_lo)); + WREG32_SOC15( + VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx] + .tmr_mc_addr_hi)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0); offset = 0; } else { - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr)); offset = size; - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); } - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0, size); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size); /* cache window 1: stack */ - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, + AMDGPU_VCN_STACK_SIZE); /* cache window 2: context */ - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, + AMDGPU_VCN_CONTEXT_SIZE); /* non-cache window */ - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + WREG32_SOC15( + VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + WREG32_SOC15( + VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0, + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15( + VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); } @@ -454,18 +470,21 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx) { uint32_t data; + int vcn_inst; if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) return; + vcn_inst = GET_INST(VCN, inst_idx); + /* VCN disable CGC */ - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL); data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE); data &= ~(UVD_CGC_GATE__SYS_MASK | UVD_CGC_GATE__MPEG2_MASK | UVD_CGC_GATE__REGS_MASK @@ -479,10 +498,10 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst | UVD_CGC_GATE__VCPU_MASK | UVD_CGC_GATE__MMSCH_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE, data); - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_CGC_GATE, 0, 0xFFFFFFFF); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF); - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL); data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK | UVD_CGC_CTRL__MPEG2_MODE_MASK | UVD_CGC_CTRL__REGS_MODE_MASK @@ -495,9 +514,9 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst | UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__MMSCH_MODE_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE); data |= (UVD_SUVD_CGC_GATE__SRE_MASK | UVD_SUVD_CGC_GATE__SIT_MASK | UVD_SUVD_CGC_GATE__SMP_MASK @@ -519,9 +538,9 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK | UVD_SUVD_CGC_GATE__SDB_VP9_MASK | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL); data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK @@ -530,7 +549,7 @@ static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK | UVD_SUVD_CGC_CTRL__IME_MODE_MASK | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data); } /** @@ -595,18 +614,21 @@ static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx) { uint32_t data; + int vcn_inst; if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) return; + vcn_inst = GET_INST(VCN, inst_idx); + /* enable VCN CGC */ - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL); data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL); data |= (UVD_CGC_CTRL__SYS_MODE_MASK | UVD_CGC_CTRL__MPEG2_MODE_MASK | UVD_CGC_CTRL__REGS_MODE_MASK @@ -618,9 +640,9 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_ | UVD_CGC_CTRL__LRBBM_MODE_MASK | UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data); - data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL); data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK @@ -629,7 +651,7 @@ static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK | UVD_SUVD_CGC_CTRL__IME_MODE_MASK | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); - WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data); + WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data); } /** @@ -646,16 +668,18 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; + int vcn_inst; uint32_t tmp; + vcn_inst = GET_INST(VCN, inst_idx); /* disable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, - ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* enable dynamic power gating mode */ - tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS); tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; - WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp); if (indirect) adev->vcn.inst[inst_idx].dpg_sram_curr_addr = @@ -737,27 +761,28 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b ring = &adev->vcn.inst[inst_idx].ring_enc[0]; /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, - upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, + upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, + ring->ring_size / sizeof(uint32_t)); /* resetting ring, fw should not check RB ring */ - tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK); - WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); - WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); - ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); - tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp |= VCN_RB_ENABLE__RB_EN_MASK; - WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); /*resetting done, fw can check RB ring */ @@ -777,99 +802,101 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; struct amdgpu_ring *ring; + int i, j, k, r, vcn_inst; uint32_t tmp; - int i, j, k, r; if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, true); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram); continue; } + vcn_inst = GET_INST(VCN, i); /* set VCN status busy */ - tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; - WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | + UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp); /*SW clock gating */ vcn_v4_0_3_disable_clock_gating(adev, i); /* enable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, + ~UVD_VCPU_CNTL__CLK_EN_MASK); /* disable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, - ~UVD_MASTINT_EN__VCPU_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); /* enable LMI MC and UMC channels */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; - WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); /* setup regUVD_LMI_CTRL */ - tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); - WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__MASK_MC_URGENT_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, + tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); /* setup regUVD_MPC_CNTL */ - tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL); tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; - WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp); /* setup UVD_MPC_SET_MUXA0 */ - WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0, - ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); /* setup UVD_MPC_SET_MUXB0 */ - WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0, - ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); /* setup UVD_MPC_SET_MUX */ - WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX, - ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | - (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); vcn_v4_0_3_mc_resume(adev, i); /* VCN global tiling registers */ - WREG32_SOC15(VCN, i, regUVD_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); /* unblock VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); /* release VCPU reset to boot */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); for (j = 0; j < 10; ++j) { uint32_t status; for (k = 0; k < 100; ++k) { - status = RREG32_SOC15(VCN, i, regUVD_STATUS); + status = RREG32_SOC15(VCN, vcn_inst, + regUVD_STATUS); if (status & 2) break; mdelay(10); @@ -880,12 +907,14 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) DRM_DEV_ERROR(adev->dev, "VCN decode not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, + regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, + regUVD_VCPU_CNTL), + 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); r = -1; @@ -897,39 +926,40 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) } /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), - UVD_MASTINT_EN__VCPU_EN_MASK, - ~UVD_MASTINT_EN__VCPU_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); /* clear the busy bit of VCN_STATUS */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); ring = &adev->vcn.inst[i].ring_enc[0]; fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; /* program the RB_BASE for ring buffer */ - WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, - upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, + upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, + ring->ring_size / sizeof(uint32_t)); /* resetting ring, fw should not check RB ring */ - tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK); - WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0); - WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); - tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp |= VCN_RB_ENABLE__RB_EN_MASK; - WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); - ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); fw_shared->sq.queue_mode &= cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF)); @@ -948,21 +978,24 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev) static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { uint32_t tmp; + int vcn_inst; + + vcn_inst = GET_INST(VCN, inst_idx); /* Wait for power status to be 1 */ - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* wait for read ptr to be equal to write ptr */ - tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); - SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* disable dynamic power gating mode */ - WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, - ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); return 0; } @@ -976,12 +1009,11 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) static int vcn_v4_0_3_stop(struct amdgpu_device *adev) { volatile struct amdgpu_vcn4_fw_shared *fw_shared; + int i, r = 0, vcn_inst; uint32_t tmp; - int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; + vcn_inst = GET_INST(VCN, i); fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; @@ -992,7 +1024,8 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev) } /* wait for vcn idle */ - r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, + UVD_STATUS__IDLE, 0x7); if (r) goto Done; @@ -1000,45 +1033,47 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev) UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, + tmp); if (r) goto Done; /* stall UMC channel */ - tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2); tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; - WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp); tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, + tmp); if (r) goto Done; /* Unblock VCPU Register access */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), - UVD_RB_ARB_CTRL__VCPU_DIS_MASK, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); /* release VCPU reset to boot */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); /* disable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, - ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); /* reset LMI UMC/LMI/VCPU */ - tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; - WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); - tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; - WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); /* clear VCN status */ - WREG32_SOC15(VCN, i, regUVD_STATUS, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); /* apply HW clock gating */ vcn_v4_0_3_enable_clock_gating(adev, i); @@ -1080,7 +1115,7 @@ static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring) if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) DRM_ERROR("wrong ring id is identified in %s", __func__); - return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR); } /** @@ -1100,7 +1135,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return *ring->wptr_cpu_addr; else - return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), + regUVD_RB_WPTR); } /** @@ -1121,7 +1157,8 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring) *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR, + lower_32_bits(ring->wptr)); } } @@ -1163,14 +1200,14 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = { */ static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev) { - int i; + int i, vcn_inst; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs; adev->vcn.inst[i].ring_enc[0].me = i; - adev->vcn.inst[i].aid_id = i / adev->vcn.num_inst_per_aid; + vcn_inst = GET_INST(VCN, i); + adev->vcn.inst[i].aid_id = + vcn_inst / adev->vcn.num_inst_per_aid; } DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n"); } @@ -1188,9 +1225,8 @@ static bool vcn_v4_0_3_is_idle(void *handle) int i, ret = 1; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); + ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == + UVD_STATUS__IDLE); } return ret; @@ -1209,10 +1245,8 @@ static int vcn_v4_0_3_wait_for_idle(void *handle) int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, - UVD_STATUS__IDLE); + ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, + UVD_STATUS__IDLE, UVD_STATUS__IDLE); if (ret) return ret; } @@ -1235,10 +1269,9 @@ static int vcn_v4_0_3_set_clockgating_state(void *handle, int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; if (enable) { - if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) + if (RREG32_SOC15(VCN, GET_INST(VCN, i), + regUVD_STATUS) != UVD_STATUS__IDLE) return -EBUSY; vcn_v4_0_3_enable_clock_gating(adev, i); } else { @@ -1307,15 +1340,26 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t i; + uint32_t i, inst; i = node_id_to_phys_map[entry->node_id]; DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); + for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst) + if (adev->vcn.inst[inst].aid_id == i) + break; + + if (inst >= adev->vcn.num_vcn_inst) { + dev_WARN_ONCE(adev->dev, 1, + "Interrupt received for unknown VCN instance %d", + entry->node_id); + return 0; + } + switch (entry->src_id) { case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: - amdgpu_fence_process(&adev->vcn.inst[i].ring_enc[0]); + amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]); break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", @@ -1343,9 +1387,6 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - adev->vcn.inst->irq.num_types++; } adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; -- cgit v1.2.3 From 672c883c26c68fe49b161d7ceab94bdc69e57b0e Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Mon, 20 Feb 2023 12:04:30 +0530 Subject: drm/amdgpu: Simplify aquavanjram instance mapping Simplify so as to use the same sequence to assign logical to physical ids for all IPs. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Acked-by: Leo Liu <leo.liu@amd.com> Tested-by: James Zhu <James.Zhu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 33 ++++++---------------- 1 file changed, 8 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 51d3cb81e37a..68d732dd9ecb 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -101,32 +101,15 @@ static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) { - int xcc_mask, sdma_mask; - int l, i; - - /* Map GC instances */ - l = 0; - xcc_mask = adev->gfx.xcc_mask; - while (xcc_mask) { - i = ffs(xcc_mask) - 1; - adev->ip_map.dev_inst[GC_HWIP][l++] = i; - xcc_mask &= ~(1 << i); - } - for (; l < HWIP_MAX_INSTANCE; l++) - adev->ip_map.dev_inst[GC_HWIP][l] = -1; - - l = 0; - sdma_mask = adev->sdma.sdma_mask; - while (sdma_mask) { - i = ffs(sdma_mask) - 1; - adev->ip_map.dev_inst[SDMA0_HWIP][l++] = i; - sdma_mask &= ~(1 << i); - } - for (; l < HWIP_MAX_INSTANCE; l++) - adev->ip_map.dev_inst[SDMA0_HWIP][l] = -1; + u32 ip_map[][2] = { + { GC_HWIP, adev->gfx.xcc_mask }, + { SDMA0_HWIP, adev->sdma.sdma_mask }, + { VCN_HWIP, adev->vcn.inst_mask }, + }; + int i; - /* This covers both VCN and JPEG, JPEG is only alias of VCN */ - aqua_vanjaram_populate_ip_map(adev, VCN_HWIP, adev->vcn.inst_mask); + for (i = 0; i < ARRAY_SIZE(ip_map); ++i) + aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; } -- cgit v1.2.3 From 15e3eee8d3939d1f28cd314a5db2590ab94109d6 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Fri, 3 Feb 2023 17:14:12 +0530 Subject: drm/amdgpu: Fill xcp mem node in aquavanjaram Implement callbacks to fill memory node information in aquavanjaram. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Le Ma <le.ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 61 +++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 68d732dd9ecb..aa1bb7883158 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -331,6 +331,64 @@ out: return ret; } +static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev, + int xcc_id, uint8_t *mem_id) +{ + /* TODO: Check if any validation is required based on current + * memory/spatial modes + */ + *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp; + + return 0; +} + +static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr, + struct amdgpu_xcp *xcp, uint8_t *mem_id) +{ + struct amdgpu_numa_info numa_info; + struct amdgpu_device *adev; + uint32_t xcc_mask; + int r, i, xcc_id; + + adev = xcp_mgr->adev; + /* TODO: BIOS is not returning the right info now + * Check on this later + */ + /* + if (adev->gmc.gmc_funcs->query_mem_partition_mode) + mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + */ + if (adev->gmc.num_mem_partitions == 1) { + /* Only one range */ + *mem_id = 0; + return 0; + } + + r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask); + if (r || !xcc_mask) + return -EINVAL; + + xcc_id = ffs(xcc_mask) - 1; + if (!adev->gmc.is_app_apu) + return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id); + + r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); + + if (r) + return r; + + r = -EINVAL; + for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { + if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) { + *mem_id = i; + r = 0; + break; + } + } + + return r; +} + int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, enum AMDGPU_XCP_IP_BLOCK ip_id, struct amdgpu_xcp_ip *ip) @@ -344,7 +402,8 @@ int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, .query_partition_mode = &aqua_vanjaram_query_partition_mode, - .get_ip_details = &aqua_vanjaram_get_xcp_ip_details + .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, + .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) -- cgit v1.2.3 From e47947abb9e71176ea2d9c8f55e03134dabd2605 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Fri, 3 Feb 2023 18:46:40 +0530 Subject: drm/amdgpu: Move initialization of xcp before kfd After partition switch, fill all relevant xcp information before kfd starts initialization. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Le Ma <le.ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 16 +++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 1 + drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 6 ++++-- 3 files changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index e1d3727036a1..bca226cc4e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -113,12 +113,17 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, xcp->valid = true; } -static int __amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps) +int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) { struct amdgpu_xcp_ip ip; uint8_t mem_id; int i, j, ret; + if (!num_xcps || num_xcps > MAX_XCP) + return -EINVAL; + + xcp_mgr->mode = mode; + for (i = 0; i < MAX_XCP; ++i) xcp_mgr->xcp[i].valid = false; @@ -181,13 +186,6 @@ int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) goto out; } - if (!num_xcps || num_xcps > MAX_XCP) { - ret = -EINVAL; - goto out; - } - - xcp_mgr->mode = mode; - __amdgpu_xcp_init(xcp_mgr, num_xcps); out: mutex_unlock(&xcp_mgr->xcp_lock); @@ -240,7 +238,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, mutex_init(&xcp_mgr->xcp_lock); if (init_mode != AMDGPU_XCP_MODE_NONE) - __amdgpu_xcp_init(xcp_mgr, init_num_xcps); + amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode); adev->xcp_mgr = xcp_mgr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 7e7e458d307e..e1319b887bf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -105,6 +105,7 @@ int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs); +int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode); int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode); int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index aa1bb7883158..004400fb89b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -321,9 +321,11 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, if (adev->nbio.funcs->set_compute_partition_mode) adev->nbio.funcs->set_compute_partition_mode(adev, mode); - ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); - + /* Init info about new xcps */ *num_xcps = num_xcc / num_xcc_per_xcp; + amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); + + ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); unlock: if (flags & AMDGPU_XCP_OPS_KFD) amdgpu_amdkfd_unlock_kfd(adev); -- cgit v1.2.3 From 1589c82a10852c6de742e5d6a92042a3fd68d753 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Mon, 13 Feb 2023 18:50:07 +0530 Subject: drm/amdgpu: Check memory ranges for valid xcp mode Check the memory ranges available to the device also for deciding a valid partition mode. Only select combinations are valid for a particular mode. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Le Ma <le.ma@amd.com> Reviewed-by: Philip Yang <philip.yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 004400fb89b0..7469de3fd6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -238,21 +238,28 @@ int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, enum amdgpu_gfx_partition mode) { + struct amdgpu_device *adev = xcp_mgr->adev; int num_xcc, num_xccs_per_xcp; - num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: - return num_xcc > 0; + return adev->gmc.num_mem_partitions == 1 && num_xcc > 0; case AMDGPU_DPX_PARTITION_MODE: - return (num_xcc % 4) == 0; + return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0; case AMDGPU_TPX_PARTITION_MODE: - return (num_xcc % 3) == 0; + return (adev->gmc.num_mem_partitions == 1 || + adev->gmc.num_mem_partitions == 3) && + ((num_xcc % 3) == 0); case AMDGPU_QPX_PARTITION_MODE: num_xccs_per_xcp = num_xcc / 4; - return (num_xccs_per_xcp >= 2); + return (adev->gmc.num_mem_partitions == 1 || + adev->gmc.num_mem_partitions == 4) && + (num_xccs_per_xcp >= 2); case AMDGPU_CPX_PARTITION_MODE: - return (num_xcc > 1); + return (num_xcc > 1) && + (adev->gmc.num_mem_partitions == 1 || + adev->gmc.num_mem_partitions == num_xcc); default: return false; } -- cgit v1.2.3 From 570de94b9c5d93e1c5bc4e357946efb93c662da9 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Mon, 13 Feb 2023 19:26:18 +0530 Subject: drm/amdgpu: Add auto mode for compute partition When auto mode is specified, driver will choose the right compute partition mode. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Le Ma <le.ma@amd.com> Reviewed-by: Philip Yang <philip.yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 8 ++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 ++ .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 28 +++++++++++++++++++++- 4 files changed, 35 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f2bafab15ceb..cb9373f8c25a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -242,7 +242,7 @@ extern int amdgpu_num_kcq; extern int amdgpu_vcnfw_log; extern int amdgpu_sg_display; -extern uint amdgpu_user_partt_mode; +extern int amdgpu_user_partt_mode; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f319318a8813..da4e50aef95a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -193,7 +193,7 @@ int amdgpu_smartshift_bias; int amdgpu_use_xgmi_p2p = 1; int amdgpu_vcnfw_log; int amdgpu_sg_display = -1; /* auto */ -uint amdgpu_user_partt_mode; +int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE; static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -955,8 +955,10 @@ module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); * DOC: partition_mode (int) * Used to override the default SPX mode. */ -MODULE_PARM_DESC(user_partt_mode, - "specify partition mode to be used (0 = AMDGPU_SPX_PARTITION_MODE(default value), \ +MODULE_PARM_DESC( + user_partt_mode, + "specify partition mode to be used (-2 = AMDGPU_AUTO_COMPUTE_PARTITION_MODE(default value) \ + 0 = AMDGPU_SPX_PARTITION_MODE, \ 1 = AMDGPU_DPX_PARTITION_MODE, \ 2 = AMDGPU_TPX_PARTITION_MODE, \ 3 = AMDGPU_QPX_PARTITION_MODE, \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 728977f8afe7..e9c93f6e12b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -62,6 +62,8 @@ enum amdgpu_gfx_partition { AMDGPU_QPX_PARTITION_MODE = 3, AMDGPU_CPX_PARTITION_MODE = 4, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1, + /* Automatically choose the right mode */ + AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2, }; #define NUM_XCC(x) hweight16(x) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 7469de3fd6fe..a165b51e9e58 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -235,6 +235,30 @@ int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, return 0; } +static enum amdgpu_gfx_partition +__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int num_xcc; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + + if (adev->gmc.num_mem_partitions == 1) + return AMDGPU_SPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == num_xcc) + return AMDGPU_CPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == num_xcc / 2) + return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE : + AMDGPU_QPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU)) + return AMDGPU_DPX_PARTITION_MODE; + + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; +} + static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, enum amdgpu_gfx_partition mode) { @@ -304,7 +328,9 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev = xcp_mgr->adev; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) + mode = __aqua_vanjaram_get_auto_mode(xcp_mgr); + else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) return -EINVAL; if (adev->kfd.init_complete) -- cgit v1.2.3 From b6b85c8b43a85988ecd06f039f8f90c041842812 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Tue, 7 Mar 2023 10:36:08 +0530 Subject: drm/amdgpu: Return error on invalid compute mode Return error if an invalid compute partition mode is requested. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Le Ma <le.ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 8 ++++++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 6 +++++- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index a165b51e9e58..848049db00ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -328,10 +328,14 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev = xcp_mgr->adev; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) + if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) { mode = __aqua_vanjaram_get_auto_mode(xcp_mgr); - else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) { + dev_err(adev->dev, + "Invalid compute partition mode requested, requested: %s, available memory partitions: %d", + amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions); return -EINVAL; + } if (adev->kfd.init_complete) flags |= AMDGPU_XCP_OPS_KFD; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 81ab3cd2f229..d0ddcd751432 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1933,7 +1933,11 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, AMDGPU_XCP_FL_NONE) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode); + r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, + amdgpu_user_partt_mode); + + if (r) + return r; num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { -- cgit v1.2.3 From fc021438d0ab7863dc93f84a557af6dc6255b881 Mon Sep 17 00:00:00 2001 From: Philip Yang <Philip.Yang@amd.com> Date: Wed, 19 Apr 2023 17:43:26 -0400 Subject: drm/amdgpu: Enable NPS4 CPX mode CPX compute mode is valid mode for NPS4 memory partition mode. Signed-off-by: Philip Yang <Philip.Yang@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 848049db00ab..97011e7e031d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -281,9 +281,9 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev->gmc.num_mem_partitions == 4) && (num_xccs_per_xcp >= 2); case AMDGPU_CPX_PARTITION_MODE: - return (num_xcc > 1) && - (adev->gmc.num_mem_partitions == 1 || - adev->gmc.num_mem_partitions == num_xcc); + return ((num_xcc > 1) && + (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) && + (num_xcc % adev->gmc.num_mem_partitions) == 0); default: return false; } -- cgit v1.2.3 From 797a0a142ca7f3b823ae1032983111c055bc50fb Mon Sep 17 00:00:00 2001 From: James Zhu <James.Zhu@amd.com> Date: Mon, 15 Aug 2022 16:45:12 -0400 Subject: drm/amdgpu: add partition ID track in ring Keep track partition ID in ring. Signed-off-by: James Zhu <James.Zhu@amd.com> Acked-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 + .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 41 ++++++++++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index d2b1a8854603..4a4c9f89c302 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -252,6 +252,7 @@ struct amdgpu_ring { uint32_t buf_mask; u32 idx; u32 xcc_id; + u32 xcp_id; u32 me; u32 pipe; u32 queue; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 97011e7e031d..c90ea34ef9ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -61,6 +61,47 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } +static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, + uint32_t inst_idx, struct amdgpu_ring *ring) +{ + int xcp_id; + enum AMDGPU_XCP_IP_BLOCK ip_blk; + uint32_t inst_mask; + + ring->xcp_id = ~0; + if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return; + + inst_mask = 1 << inst_idx; + + switch (ring->funcs->type) { + case AMDGPU_HW_IP_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + ip_blk = AMDGPU_XCP_GFX; + break; + case AMDGPU_RING_TYPE_SDMA: + ip_blk = AMDGPU_XCP_SDMA; + break; + case AMDGPU_RING_TYPE_VCN_ENC: + case AMDGPU_RING_TYPE_VCN_JPEG: + ip_blk = AMDGPU_XCP_VCN; + if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + inst_mask = 1 << (inst_idx * 2); + break; + default: + DRM_ERROR("Not support ring type %d!", ring->funcs->type); + return; + } + + for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { + if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { + ring->xcp_id = xcp_id; + break; + } + } +} + static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, enum amd_hw_ip_block_type block, int8_t inst) -- cgit v1.2.3 From d425c6f48b189f0a5a7c7d26980fd7a2114fb35d Mon Sep 17 00:00:00 2001 From: James Zhu <James.Zhu@amd.com> Date: Mon, 15 Aug 2022 17:19:11 -0400 Subject: drm/amdgpu: add partition scheduler list update Add partition scheduler list update in late init and xcp partition mode switch. Signed-off-by: James Zhu <James.Zhu@amd.com> Acked-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 2 + .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 67 +++++++++++++++++++++- 3 files changed, 70 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4d9c535bcb0c..02ee79b7b56d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2474,6 +2474,8 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) } } + amdgpu_xcp_update_partition_sched_list(adev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 9b627a8b1d5c..78fce5aab218 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -118,6 +118,7 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) { + struct amdgpu_device *adev = xcp_mgr->adev; struct amdgpu_xcp_ip ip; uint8_t mem_id; int i, j, ret; @@ -153,6 +154,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) } xcp_mgr->num_xcps = num_xcps; + amdgpu_xcp_update_partition_sched_list(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index c90ea34ef9ec..073ae95e6dd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -102,6 +102,70 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, } } +static void aqua_vanjaram_xcp_gpu_sched_update( + struct amdgpu_device *adev, + struct amdgpu_ring *ring, + unsigned int sel_xcp_id) +{ + unsigned int *num_gpu_sched; + + num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] + .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; + adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] + .sched[(*num_gpu_sched)++] = &ring->sched; + DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name, + sel_xcp_id, ring->funcs->type, + ring->hw_prio, *num_gpu_sched); +} + +static int aqua_vanjaram_xcp_sched_list_update( + struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < MAX_XCP; i++) { + atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); + memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); + } + + if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + ring = adev->rings[i]; + if (!ring || !ring->sched.ready) + continue; + + aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); + + /* VCN is shared by two partitions under CPX MODE */ + if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && + adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); + } + + return 0; +} + +static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring); + else + aqua_vanjaram_set_xcp_id(adev, ring->me, ring); + } + + return aqua_vanjaram_xcp_sched_list_update(adev); +} + static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, enum amd_hw_ip_block_type block, int8_t inst) @@ -483,7 +547,8 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, .query_partition_mode = &aqua_vanjaram_query_partition_mode, .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, - .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id + .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, + .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) -- cgit v1.2.3 From cd7d8400aa04ba989a87949cf4611b7e16af274f Mon Sep 17 00:00:00 2001 From: James Zhu <James.Zhu@amd.com> Date: Mon, 15 Aug 2022 17:00:54 -0400 Subject: drm/amdgpu: add partition schedule for GC(9, 4, 3) Implement partition schedule for GC(9, 4, 3). Signed-off-by: James Zhu <James.Zhu@amd.com> Acked-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 41 ++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 073ae95e6dd6..4ca932a62ce6 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -166,6 +166,46 @@ static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) return aqua_vanjaram_xcp_sched_list_update(adev); } +int aqua_vanjaram_select_scheds( + struct amdgpu_device *adev, + u32 hw_ip, + u32 hw_prio, + struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, + struct drm_gpu_scheduler ***scheds) +{ + u32 sel_xcp_id; + int i; + + if (fpriv->xcp_id == ~0) { + u32 least_ref_cnt = ~0; + + fpriv->xcp_id = 0; + for (i = 0; i < adev->xcp_mgr->num_xcps; i++) { + u32 total_ref_cnt; + + total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt); + if (total_ref_cnt < least_ref_cnt) { + fpriv->xcp_id = i; + least_ref_cnt = total_ref_cnt; + } + } + } + sel_xcp_id = fpriv->xcp_id; + + if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { + *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; + *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; + atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); + DRM_DEBUG("Selected partition #%d", sel_xcp_id); + } else { + DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id); + return -ENOENT; + } + + return 0; +} + static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, enum amd_hw_ip_block_type block, int8_t inst) @@ -548,6 +588,7 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .query_partition_mode = &aqua_vanjaram_query_partition_mode, .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, + .select_scheds = &aqua_vanjaram_select_scheds, .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list }; -- cgit v1.2.3 From 3cde91172d2e9d8d8dc6e0d62b7c829de503825c Mon Sep 17 00:00:00 2001 From: Philip Yang <Philip.Yang@amd.com> Date: Wed, 19 Apr 2023 17:39:35 -0400 Subject: drm/amdgpu: Correct get_xcp_mem_id calculation Current calculation only works for NPS4/QPX mode, correct it for NPS4/CPX mode. Signed-off-by: Philip Yang <Philip.Yang@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 4ca932a62ce6..93e9f947a85d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -518,10 +518,9 @@ out: static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev, int xcc_id, uint8_t *mem_id) { - /* TODO: Check if any validation is required based on current - * memory/spatial modes - */ + /* memory/spatial modes validation check is already done */ *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp; + *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition; return 0; } -- cgit v1.2.3 From af2ba368838ee4913e758f34e3d8bbfeb110be36 Mon Sep 17 00:00:00 2001 From: Tao Zhou <tao.zhou1@amd.com> Date: Mon, 27 Feb 2023 16:31:56 +0800 Subject: drm/amdgpu: convert logical instance mask to physical one Convert instance mask for the convenience of RAS TA. Signed-off-by: Tao Zhou <tao.zhou1@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Stanley.Yang <Stanley.Yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++-- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 18 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15_common.h | 7 ++++++- 3 files changed, 28 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bba8cfeff71f..a4d0bc80ac92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -677,12 +677,14 @@ enum amd_hw_ip_block_type { #define IP_VERSION_REV(ver) ((ver) & 0xFF) struct amdgpu_ip_map_info { - /* Map of logical to actual dev instances */ + /* Map of logical to actual dev instances/mask */ uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE]; int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev, enum amd_hw_ip_block_type block, int8_t inst); - + uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + uint32_t mask); }; struct amd_powerplay { diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 93e9f947a85d..68d1a0fc5f5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -229,6 +229,23 @@ static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, return dev_inst; } +static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + uint32_t mask) +{ + uint32_t dev_mask = 0; + int8_t log_inst, dev_inst; + + while (mask) { + log_inst = ffs(mask) - 1; + dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst); + dev_mask |= (1 << dev_inst); + mask &= ~(1 << log_inst); + } + + return dev_mask; +} + static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, enum amd_hw_ip_block_type ip_block, uint32_t inst_mask) @@ -257,6 +274,7 @@ void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; + adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask; } /* Fixed pattern for smn addressing on different AIDs: diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 3730c5ec202f..96948a59f8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -25,7 +25,12 @@ #define __SOC15_COMMON_H__ /* GET_INST returns the physical instance corresponding to a logical instance */ -#define GET_INST(ip, inst) (adev->ip_map.logical_to_dev_inst? adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst): inst) +#define GET_INST(ip, inst) \ + (adev->ip_map.logical_to_dev_inst ? \ + adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst) +#define GET_MASK(ip, mask) \ + (adev->ip_map.logical_to_dev_mask ? \ + adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask) /* Register Access Macros */ #define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) -- cgit v1.2.3 From 1b177b5c6846f20be013b45c36c24264049c81bf Mon Sep 17 00:00:00 2001 From: Arnd Bergmann <arnd@arndb.de> Date: Mon, 22 May 2023 13:50:30 +0200 Subject: drm/amdgpu:mark aqua_vanjaram_reg_init.c function as static A few newly added global functions have no prototype, which causes warnings: drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c:169:5: error: no previous prototype for 'aqua_vanjaram_select_scheds' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c:310:5: error: no previous prototype for '__aqua_vanjaram_get_xcc_per_xcp' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c:337:5: error: no previous prototype for '__aqua_vanjaram_get_xcp_ip_info' [-Werror=missing-prototypes] drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c:593:5: error: no previous prototype for 'aqua_vanjaram_get_xcp_ip_details' [-Werror=missing-prototypes] There are no callers from other files, so just mark them as 'static'. Fixes: cd7d8400aa04 ("drm/amdgpu: add partition schedule for GC(9, 4, 3)") Fixes: 9cb18287d8f1 ("drm/amdgpu: Add SOC partition funcs for GC v9.4.3") Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 68d1a0fc5f5d..a595bb958215 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -166,7 +166,7 @@ static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) return aqua_vanjaram_xcp_sched_list_update(adev); } -int aqua_vanjaram_select_scheds( +static int aqua_vanjaram_select_scheds( struct amdgpu_device *adev, u32 hw_ip, u32 hw_prio, @@ -307,7 +307,7 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) return mode; } -int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) { int num_xcc, num_xcc_per_xcp = 0; @@ -334,7 +334,7 @@ int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) return num_xcc_per_xcp; } -int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, +static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, enum AMDGPU_XCP_IP_BLOCK ip_id, struct amdgpu_xcp_ip *ip) { @@ -590,7 +590,7 @@ static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr, return r; } -int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, +static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, enum AMDGPU_XCP_IP_BLOCK ip_id, struct amdgpu_xcp_ip *ip) { -- cgit v1.2.3