diff options
author | Lijo Lazar <lijo.lazar@amd.com> | 2024-07-03 11:52:47 +0530 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2024-07-10 10:12:28 -0400 |
commit | d02ddefc7eedaa6394279bad522c70fd5d63e163 (patch) | |
tree | 3b76a3e2fa60bfed4b89d064045fda0f4bdf50f4 /drivers/gpu | |
parent | 5d64af40e3a99c3bbe7a66c3cfe23295f8e97130 (diff) | |
download | lwn-d02ddefc7eedaa6394279bad522c70fd5d63e163.tar.gz lwn-d02ddefc7eedaa6394279bad522c70fd5d63e163.zip |
drm/amdgpu: Initialize VF partition mode
For SOCs with GFX v9.4.3, a VF may have multiple compute partitions.
Fetch the partition information during init and initialize partition
nodes. There is no support to switch partition mode in VF mode, hence
disable the same.
Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 37 |
4 files changed, 88 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6b0416777c5b..ddda94e49db4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -297,6 +297,7 @@ struct amdgpu_gfx_funcs { int (*switch_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); + int (*get_xccs_per_xcp)(struct amdgpu_device *adev); }; struct sq_work { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 2b99eed5ba19..a6d456ec6aeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -219,7 +219,8 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) { int mode; - if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + if (!amdgpu_sriov_vf(xcp_mgr->adev) && + xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) return xcp_mgr->mode; if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) @@ -228,6 +229,12 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) if (!(flags & AMDGPU_XCP_FL_LOCKED)) mutex_lock(&xcp_mgr->xcp_lock); mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); + + /* First time query for VF, set the mode here */ + if (amdgpu_sriov_vf(xcp_mgr->adev) && + xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + xcp_mgr->mode = mode; + if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode) dev_WARN( xcp_mgr->adev->dev, @@ -282,8 +289,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, { struct amdgpu_xcp_mgr *xcp_mgr; - if (!xcp_funcs || !xcp_funcs->switch_partition_mode || - !xcp_funcs->get_ip_details) + if (!xcp_funcs || !xcp_funcs->get_ip_details) return -EINVAL; xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 2c9a0aa41e2d..228fd4dd32f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -304,13 +304,56 @@ u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id) return ext_offset; } +static enum amdgpu_gfx_partition +__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int num_xcc, num_xcc_per_xcp = 0, mode = 0; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + if (adev->gfx.funcs->get_xccs_per_xcp) + num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev); + if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0)) + mode = num_xcc / num_xcc_per_xcp; + + if (num_xcc_per_xcp == 1) + return AMDGPU_CPX_PARTITION_MODE; + + switch (mode) { + case 1: + return AMDGPU_SPX_PARTITION_MODE; + case 2: + return AMDGPU_DPX_PARTITION_MODE; + case 3: + return AMDGPU_TPX_PARTITION_MODE; + case 4: + return AMDGPU_QPX_PARTITION_MODE; + default: + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + } + + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; +} + static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) { - enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + enum amdgpu_gfx_partition derv_mode, + mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; struct amdgpu_device *adev = xcp_mgr->adev; - if (adev->nbio.funcs->get_compute_partition_mode) + derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr); + + if (amdgpu_sriov_vf(adev)) + return derv_mode; + + if (adev->nbio.funcs->get_compute_partition_mode) { mode = adev->nbio.funcs->get_compute_partition_mode(adev); + if (mode != derv_mode) + dev_warn( + adev->dev, + "Mismatch in compute partition mode - reported : %d derived : %d", + mode, derv_mode); + } return mode; } @@ -624,6 +667,9 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) { int ret; + if (amdgpu_sriov_vf(adev)) + aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL; + ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1, &aqua_vanjaram_xcp_funcs); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c908e585b9ec..20ea6cb01edf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -652,6 +652,15 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); } +static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev) +{ + u32 xcp_ctl; + + /* Value is expected to be the same on all, fetch from first instance */ + xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL); + + return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP); +} static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, int num_xccs_per_xcp) @@ -706,6 +715,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, + .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp, }; static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, @@ -2050,18 +2060,31 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { - int r = 0, i, num_xcc; + int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + if (amdgpu_sriov_vf(adev)) { + enum amdgpu_gfx_partition mode; - if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, - AMDGPU_XCP_FL_NONE) == - AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, - amdgpu_user_partt_mode); + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + AMDGPU_XCP_FL_NONE); + if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + return -EINVAL; + num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev); + adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp; + num_xcp = num_xcc / num_xcc_per_xcp; + r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode); + } else { + if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + AMDGPU_XCP_FL_NONE) == + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + r = amdgpu_xcp_switch_partition_mode( + adev->xcp_mgr, amdgpu_user_partt_mode); + } if (r) return r; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { r = gfx_v9_4_3_xcc_cp_resume(adev, i); if (r) |