From 5aa998baab3360d0f1b93d6aff3df924045f956c Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Wed, 17 Nov 2021 16:28:51 +0800 Subject: drm/amdgpu: add xcc index argument to soc15_grbm_select To support grbm select for multiple XCD case. v2: unify naming style Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 3a797424579c..93438770ca1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -761,7 +761,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, for (i = first_vmid; i < last_vmid; i++) { data = 0; - soc15_grbm_select(adev, 0, 0, 0, i); + soc15_grbm_select(adev, 0, 0, 0, i, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, @@ -769,7 +769,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data); } - soc15_grbm_select(adev, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -- cgit v1.2.3 From d51ac6d0a23caf1005cb640f8533161c5d2dd0c0 Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Tue, 24 May 2022 11:22:49 +0800 Subject: drm/amdgpu: add xcc index argument to select_sh_se function v2 v1: To support multiple XCD case (Le) v2: introduce xcc index to gfx_v11_0_select_sh_se (Hawking) Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 16 +++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 +-- drivers/gpu/drm/amd/amdgpu/cik.c | 4 +-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 16 +++++------ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 24 ++++++++--------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 33 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 33 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 20 +++++++------- drivers/gpu/drm/amd/amdgpu/nv.c | 4 +-- drivers/gpu/drm/amd/amdgpu/si.c | 4 +-- drivers/gpu/drm/amd/amdgpu/soc15.c | 4 +-- drivers/gpu/drm/amd/amdgpu/soc21.c | 4 +-- drivers/gpu/drm/amd/amdgpu/vi.c | 4 +-- 18 files changed, 110 insertions(+), 106 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index ebb35633058c..ae06d1f2af93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -787,7 +787,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, for (se_idx = 0; se_idx < se_cnt; se_idx++) { for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { - amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, 0); queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS); /* @@ -820,7 +820,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0); unlock_spi_csq_mutexes(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index f60753f97ac5..cc64ae550bc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -136,7 +136,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se_bank, - sh_bank, instance_bank); + sh_bank, instance_bank, 0); } else if (use_ring) { mutex_lock(&adev->srbm_mutex); amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); @@ -169,7 +169,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, end: if (use_bank) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } else if (use_ring) { amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); @@ -263,7 +263,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, rd->id.grbm.sh, - rd->id.grbm.instance); + rd->id.grbm.instance, 0); } if (rd->id.use_srbm) { @@ -295,7 +295,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } end: if (rd->id.use_grbm) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -907,13 +907,13 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); + amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); x = 0; if (adev->gfx.funcs->read_wave_data) adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -1001,7 +1001,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); + amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); if (bank == 0) { if (adev->gfx.funcs->read_wave_vgprs) @@ -1011,7 +1011,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); } - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index a9e41d7970ea..77e2f714e357 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -237,7 +237,7 @@ struct amdgpu_gfx_funcs { /* get the gpu clock counter */ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, @@ -386,7 +386,7 @@ struct amdgpu_gfx { }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) -#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index de6d10390ab2..5641cf05d856 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1141,12 +1141,12 @@ static uint32_t cik_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index accc0a7251b9..323f5b8927ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3490,7 +3490,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev); @@ -4712,7 +4712,7 @@ static int gfx_v10_0_sw_fini(void *handle) } static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -4772,13 +4772,13 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v10_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -4907,7 +4907,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev); /* * Set corresponding TCP bits for the inactive WGPs in @@ -4940,7 +4940,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -9540,7 +9540,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh( adev, disable_masks[i * 2 + j]); @@ -9561,7 +9561,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 4fbefe236fc7..50d0ff9ca259 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -112,7 +112,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); @@ -1484,7 +1484,7 @@ static int gfx_v11_0_sw_fini(void *handle) } static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -6473,7 +6473,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; counter = 0; - gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 8 && j < 2) gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( adev, disable_masks[i * 2 + j]); @@ -6505,7 +6505,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, active_cu_number += counter; } } - gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d9ce4d1c50e4..7cb72bf1acdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1285,7 +1285,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) } static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -1438,12 +1438,12 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on SI */ - gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); } /* GRBM_GFX_INDEX has a different offset on SI */ - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) @@ -1459,14 +1459,14 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v6_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -1487,7 +1487,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -1496,7 +1496,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG); } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1535,7 +1535,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = RREG32(mmSPI_STATIC_THREAD_MGMT_3); active_cu = gfx_v6_0_get_cu_enabled(adev); @@ -1550,7 +1550,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) } } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -2391,7 +2391,7 @@ static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable) WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); if (!enable) { - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmSPI_LB_CU_MASK, 0x00ff); } } @@ -3571,7 +3571,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v6_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -3593,7 +3593,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 46740ad9a80f..d055e44eee1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1552,7 +1552,8 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * Select which SE, SH combinations to address. */ static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, u32 instance) + u32 se_num, u32 sh_num, u32 instance, + int xcc_id) { u32 data; @@ -1732,13 +1733,13 @@ gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on CI+ */ - gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); } /* GRBM_GFX_INDEX has a different offset on CI+ */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } /** @@ -1761,13 +1762,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v7_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -1790,7 +1791,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -1801,7 +1802,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG_1); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1911,7 +1912,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -3301,7 +3302,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3309,7 +3310,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3474,7 +3475,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); @@ -3530,7 +3531,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3584,7 +3585,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3635,7 +3636,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; @@ -5115,7 +5116,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v7_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -5136,7 +5137,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 8a43e87de49f..b60480876149 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3395,7 +3395,8 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, u32 instance) + u32 se_num, u32 sh_num, u32 instance, + int xcc_id) { u32 data; @@ -3579,13 +3580,13 @@ gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on VI */ - gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); } /* GRBM_GFX_INDEX has a different offset on VI */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) @@ -3601,13 +3602,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v8_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -3630,7 +3631,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -3641,7 +3642,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG_1); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -3788,7 +3789,7 @@ static void gfx_v8_0_constants_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_FIFO_SIZE, (adev->gfx.config.sc_prim_fifo_size_frontend << @@ -3819,7 +3820,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3827,7 +3828,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } if (k == adev->usec_timeout) { gfx_v8_0_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -3835,7 +3836,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -5481,7 +5482,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, { uint32_t data; - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); @@ -6723,11 +6724,11 @@ static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data, */ if (from_wq) { mutex_lock(&adev->grbm_idx_mutex); - gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id); + gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0); sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE); - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -7116,7 +7117,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v8_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -7137,7 +7138,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4939fd61355b..2fa7adef18a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1504,7 +1504,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) mask = 1; cu_bitmap = 0; counter = 0; - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { if (cu_info->bitmap[i][j] & mask) { @@ -1523,7 +1523,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = cu_bitmap; } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1545,7 +1545,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); /* set mmRLC_LB_PARAMS = 0x003F_1006 */ @@ -1594,7 +1594,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); /* set mmRLC_LB_PARAMS = 0x003F_1006 */ @@ -2241,7 +2241,7 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) } void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance) + u32 instance, int xcc_id) { u32 data; @@ -2290,13 +2290,13 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v9_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -2433,7 +2433,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -2441,7 +2441,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } if (k == adev->usec_timeout) { amdgpu_gfx_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -2449,7 +2449,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -6608,7 +6608,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { - amdgpu_gfx_select_se_sh(adev, j, 0x0, k); + amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0); RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); } } @@ -6670,7 +6670,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { - amdgpu_gfx_select_se_sh(adev, j, 0, k); + amdgpu_gfx_select_se_sh(adev, j, 0, k, 0); reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) @@ -6685,7 +6685,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += sec_count; err_data->ue_count += ded_count; - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); gfx_v9_0_query_utc_edc_status(adev, err_data); @@ -7145,7 +7145,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); gfx_v9_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_0_get_cu_active_bitmap(adev); @@ -7178,7 +7178,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h index dfe8d4841f58..f9f6edc5e558 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h @@ -27,6 +27,6 @@ extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance); + u32 instance, int xcc_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 93438770ca1a..d648a29c33e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -777,7 +777,7 @@ void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev) { u32 tmp; - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); tmp = 0; tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index f1c04140e717..b67be666f38a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -568,7 +568,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance) + u32 instance, + int xcc_id) { u32 data; @@ -591,7 +592,7 @@ static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data); + WREG32_SOC15_RLC_SHADOW_EX(reg, GC, xcc_id, regGRBM_GFX_INDEX, data); } static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) @@ -907,13 +908,13 @@ static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev, int xcc_id) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); data = gfx_v9_4_3_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -1146,7 +1147,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32_SOC15(GC, 0, regRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -1154,7 +1155,8 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } if (k == adev->usec_timeout) { gfx_v9_4_3_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, + xcc_id); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -1162,7 +1164,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3065,7 +3067,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, 0); gfx_v9_4_3_set_user_cu_inactive_bitmap( adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev); @@ -3098,7 +3100,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 47420b403871..148049782f50 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -371,12 +371,12 @@ static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 7f99e130acd0..f64b87b11b1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1181,12 +1181,12 @@ static uint32_t si_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4b79a8933476..4d1487a9836c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -363,12 +363,12 @@ static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_n mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 514bfc705d5a..6ef4be9322d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -288,12 +288,12 @@ static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_n mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 531f173ade2d..8e70581960fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -762,12 +762,12 @@ static uint32_t vi_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { -- cgit v1.2.3 From f4caf5842652f08e024741ef6d423cb0c101d863 Mon Sep 17 00:00:00 2001 From: Hawking Zhang <Hawking.Zhang@amd.com> Date: Wed, 14 Sep 2022 16:35:50 +0800 Subject: drm/amdgpu: introduce vmhub definition for multi-partition cases (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v1: Each partition has its own gfxhub or mmhub. adjust the num of MAX_VMHUBS and the GFXHUB/MMHUB layout (Le) v2: re-design the AMDGPU_GFXHUB/AMDGPU_MMHUB layout (Le) v3: apply the gfxhub/mmhub layout to new IPs (Hawking) v4: fix up gmc11 (Alex) v5: rebase (Alex) Signed-off-by: Le Ma <le.ma@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 13 +++-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 6 +-- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 +-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c | 12 ++--- drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 33 ++++++------- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 26 +++++----- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 78 +++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 4 +- drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 2 +- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 12 ++--- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 4 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 8 +-- drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 2 +- 47 files changed, 204 insertions(+), 204 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index a46285841d17..f0a136d35279 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -736,7 +736,7 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); } else { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0); + amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 87e1a1a9f298..488b3bb6dcb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -315,7 +315,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.kiq; ring->xcc_id = xcc_id; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); if (xcc_id >= 1) ring->doorbell_index = adev->doorbell_index.xcc1_kiq_start + xcc_id - 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 4e2531758866..0a4e5fcfec6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -670,7 +670,7 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + hub->ctx_distance * i; - tmp = (hub_type == AMDGPU_GFXHUB_0) ? + tmp = (hub_type == AMDGPU_GFXHUB(0)) ? RREG32_SOC15_IP(GC, reg) : RREG32_SOC15_IP(MMHUB, reg); @@ -679,7 +679,7 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, else tmp &= ~hub->vm_cntx_cntl_vm_fault; - (hub_type == AMDGPU_GFXHUB_0) ? + (hub_type == AMDGPU_GFXHUB(0)) ? WREG32_SOC15_IP(GC, reg, tmp) : WREG32_SOC15_IP(MMHUB, reg, tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b6bd667df676..c3964c14f215 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2374,12 +2374,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, - AMDGPU_GFXHUB_0); + AMDGPU_GFXHUB(0)); if (r) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: - amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); + amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB(0)); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 6f085f0b4ef3..9f5d32b0fda1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -111,11 +111,14 @@ struct amdgpu_mem_stats; /* Reserve 4MB VRAM for page tables */ #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20) -/* max number of VMHUB */ -#define AMDGPU_MAX_VMHUBS 3 -#define AMDGPU_GFXHUB_0 0 -#define AMDGPU_MMHUB_0 1 -#define AMDGPU_MMHUB_1 2 +/* + * max number of VMHUB + * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1 + */ +#define AMDGPU_MAX_VMHUBS 13 +#define AMDGPU_GFXHUB(x) (x) +#define AMDGPU_MMHUB0(x) (8 + x) +#define AMDGPU_MMHUB1(x) (8 + 4 + x) /* Reserve 2MB at top/bottom of address space for kernel use */ #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 8e86b2c23c0a..7b585141e10e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4461,7 +4461,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; else ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; @@ -4490,7 +4490,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX10_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -4978,7 +4978,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { nv_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f77779c31043..790df2cc3480 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -906,7 +906,7 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; else ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; @@ -937,7 +937,7 @@ static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX11_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -1707,7 +1707,7 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { soc21_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); @@ -4190,7 +4190,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) false : true; adev->gfxhub.funcs->set_fault_enable_default(adev, value); - amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 46577b59cb04..91814dc083c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2005,7 +2005,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -2105,7 +2105,7 @@ static int gfx_v9_0_sw_init(void *handle) /* disable scheduler on the real ring */ ring->no_scheduler = true; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -2123,7 +2123,7 @@ static int gfx_v9_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; ring->is_sw_ring = true; hw_prio = amdgpu_sw_ring_priority(i); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio, NULL); @@ -2393,7 +2393,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { soc15_grbm_select(adev, 0, 0, 0, i, 0); /* CP and shaders */ if (i == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index d648a29c33e0..ec7c049c5952 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1935,7 +1935,7 @@ static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev) u32 status = 0; struct amdgpu_vmhub *hub; - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; status = RREG32(hub->vm_l2_pro_fault_status); /* reset page fault status */ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 9d17dcfae130..f5104b982633 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -757,7 +757,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d.%d", ring->xcc_id, ring->me, ring->pipe, ring->queue); @@ -996,7 +996,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { for (j = 0; j < adev->gfx.num_xcd; j++) { soc15_grbm_select(adev, 0, 0, 0, i, j); /* CP and shaders */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ab2325f6c7ac..d94cc1ec7242 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -40,7 +40,7 @@ static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -247,7 +247,7 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -307,7 +307,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -338,7 +338,7 @@ static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -411,7 +411,7 @@ static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 79af32bb078c..9c385ce3a8c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -42,7 +42,7 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; for (i = 0; i < adev->gfx.num_xcd; i++) { @@ -291,7 +291,7 @@ static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned num_level, block_size; uint32_t tmp; int i, j; @@ -357,7 +357,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i, j; for (j = 0; j < adev->gfx.num_xcd; j++) { @@ -406,7 +406,7 @@ static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i, j; @@ -483,7 +483,7 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_2_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 9b3a02527318..f173a61c6c15 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -120,7 +120,7 @@ static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -282,7 +282,7 @@ static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -331,7 +331,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -360,7 +360,7 @@ static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -433,7 +433,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = { static void gfxhub_v2_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index 4aacbbec31e2..d8fc3e8088cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -123,7 +123,7 @@ static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -291,7 +291,7 @@ static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -340,7 +340,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -381,7 +381,7 @@ static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev) static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -462,7 +462,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = { static void gfxhub_v2_1_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, @@ -651,7 +651,7 @@ static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev) static void gfxhub_v2_1_halt(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; int time = 1000; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c index 13712640fa46..c53147f9c9fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c @@ -119,7 +119,7 @@ static u64 gfxhub_v3_0_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -290,7 +290,7 @@ static void gfxhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -339,7 +339,7 @@ static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -380,7 +380,7 @@ static int gfxhub_v3_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v3_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -463,7 +463,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v3_0_vmhub_funcs = { static void gfxhub_v3_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c index 6e0bd628c889..ae777487d72e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c @@ -122,7 +122,7 @@ static u64 gfxhub_v3_0_3_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v3_0_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -295,7 +295,7 @@ static void gfxhub_v3_0_3_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -344,7 +344,7 @@ static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -373,7 +373,7 @@ static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev) static void gfxhub_v3_0_3_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -451,7 +451,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v3_0_3_vmhub_funcs = { static void gfxhub_v3_0_3_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5697b66bf0de..ea2a448147e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -76,7 +76,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -84,11 +84,11 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -96,7 +96,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); break; default: break; @@ -149,7 +149,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if ((entry->vmid_src == AMDGPU_GFXHUB_0) && + if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32(hub->vm_l2_pro_fault_status); @@ -212,8 +212,7 @@ static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - return ((vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) && + return ((vmhub == AMDGPU_MMHUB0(0)) && (!amdgpu_sriov_vf(adev))); } @@ -249,7 +248,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int i; unsigned char hub_ip = 0; - hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); @@ -284,7 +283,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * Issue a dummy read to wait for the ACK register to be cleared * to avoid a false ACK due to the new fast GRBM interface. */ - if ((vmhub == AMDGPU_GFXHUB_0) && + if ((vmhub == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, hub_ip); @@ -361,19 +360,19 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, mutex_lock(&adev->mman.gtt_window_lock); - if (vmhub == AMDGPU_MMHUB_0) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0); + if (vmhub == AMDGPU_MMHUB0(0)) { + gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB0(0), 0); mutex_unlock(&adev->mman.gtt_window_lock); return; } - BUG_ON(vmhub != AMDGPU_GFXHUB_0); + BUG_ON(vmhub != AMDGPU_GFXHUB(0)); if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || amdgpu_in_reset(adev) || ring->sched.ready == false) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); + gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB(0), 0); mutex_unlock(&adev->mman.gtt_window_lock); return; } @@ -466,7 +465,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v10_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } if (!adev->enable_mes) break; @@ -534,7 +533,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid if (ring->is_mes_queue) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -1075,9 +1074,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) if (!adev->in_s0ix) adev->gfxhub.funcs->set_fault_enable_default(adev, value); adev->mmhub.funcs->set_fault_enable_default(adev, value); - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); if (!adev->in_s0ix) - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 2f570fb5febe..fb2ac31cbba7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -64,7 +64,7 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -72,11 +72,11 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -84,7 +84,7 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); break; default: break; @@ -110,7 +110,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if (entry->vmid_src == AMDGPU_GFXHUB_0) + if (entry->vmid_src == AMDGPU_GFXHUB(0)) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); @@ -170,7 +170,7 @@ static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - return ((vmhub == AMDGPU_MMHUB_0) && + return ((vmhub == AMDGPU_MMHUB0(0)) && (!amdgpu_sriov_vf(adev))); } @@ -202,7 +202,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int i; unsigned char hub_ip = 0; - hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); @@ -251,7 +251,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, hub->eng_distance * eng, 0, hub_ip); /* Issue additional private vm invalidation to MMHUB */ - if ((vmhub != AMDGPU_GFXHUB_0) && + if ((vmhub != AMDGPU_GFXHUB(0)) && (hub->vm_l2_bank_select_reserved_cid2) && !amdgpu_sriov_vf(adev)) { inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); @@ -284,7 +284,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { - if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron) + if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) return; /* flush hdp cache */ @@ -369,7 +369,7 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v11_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } } } @@ -435,7 +435,7 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid if (ring->is_mes_queue) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; @@ -886,7 +886,7 @@ static int gmc_v11_0_sw_fini(void *handle) static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32(hub->vm_contexts_disable, 0); return; @@ -921,7 +921,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) false : true; adev->mmhub.funcs->set_fault_enable_default(adev, value); - gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); + gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6ae5cee9b64b..193ba4d912a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -491,20 +491,20 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) continue; - if (j == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP(GC, reg); - else + if (j >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP(MMHUB, reg); + else + tmp = RREG32_SOC15_IP(GC, reg); tmp &= ~bits; - if (j == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP(GC, reg, tmp); - else + if (j >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP(MMHUB, reg, tmp); + else + WREG32_SOC15_IP(GC, reg, tmp); } } break; @@ -519,20 +519,20 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) continue; - if (j == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP(GC, reg); - else + if (j >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP(MMHUB, reg); + else + tmp = RREG32_SOC15_IP(GC, reg); tmp |= bits; - if (j == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP(GC, reg, tmp); - else + if (j >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP(MMHUB, reg, tmp); + else + WREG32_SOC15_IP(GC, reg, tmp); } } break; @@ -605,13 +605,13 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (entry->client_id == SOC15_IH_CLIENTID_VMC) { hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB_0]; + hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { hub_name = "mmhub1"; - hub = &adev->vmhub[AMDGPU_MMHUB_1]; + hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; } else { hub_name = "gfxhub0"; - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; } memset(&task_info, 0, sizeof(struct amdgpu_task_info)); @@ -636,7 +636,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if ((entry->vmid_src == AMDGPU_GFXHUB_0) && + if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) RREG32(hub->vm_l2_pro_fault_status); @@ -649,7 +649,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) { + if (hub == &adev->vmhub[AMDGPU_GFXHUB(0)]) { dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], @@ -759,8 +759,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) return false; - return ((vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) && + return ((vmhub == AMDGPU_MMHUB0(0) || + vmhub == AMDGPU_MMHUB1(0)) && (!amdgpu_sriov_vf(adev)) && (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && (adev->apu_flags & AMD_APU_IS_PICASSO)))); @@ -849,11 +849,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (j = 0; j < adev->usec_timeout; j++) { /* a read return value of 1 means semaphore acquire */ - if (vmhub == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); - else + if (vmhub >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng); - + else + tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); if (tmp & 0x1) break; udelay(1); @@ -864,27 +863,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } do { - if (vmhub == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); - else + if (vmhub >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + else + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); /* * Issue a dummy read to wait for the ACK register to * be cleared to avoid a false ACK due to the new fast * GRBM interface. */ - if ((vmhub == AMDGPU_GFXHUB_0) && + if ((vmhub == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng); for (j = 0; j < adev->usec_timeout; j++) { - if (vmhub == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); - else + if (vmhub >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng); - + else + tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); if (tmp & (1 << vmid)) break; udelay(1); @@ -900,10 +898,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - if (vmhub == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + if (vmhub >= AMDGPU_MMHUB0(0)) + WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); else - WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); } spin_unlock(&adev->gmc.invalidate_lock); @@ -994,7 +992,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v9_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } break; } @@ -1060,10 +1058,10 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, uint32_t reg; /* Do nothing because there's no lut register for mmhub1. */ - if (ring->vm_hub == AMDGPU_MMHUB_1) + if (ring->vm_hub == AMDGPU_MMHUB1(0)) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -1947,7 +1945,7 @@ static int gmc_v9_0_hw_init(void *handle) adev->mmhub.funcs->set_fault_enable_default(adev, value); } for (i = 0; i < adev->num_vmhubs; ++i) { - if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0))) continue; gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index a3076eb8af6a..71fe7f6f9889 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -485,7 +485,7 @@ int jpeg_v1_0_sw_init(void *handle) return r; ring = &adev->jpeg.inst->ring_dec; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 0eddf7c824a7..3a43e42f4834 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -86,7 +86,7 @@ static int jpeg_v2_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index b040f51d9aa9..259b7ba6a842 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -128,9 +128,9 @@ static int jpeg_v2_5_sw_init(void *handle) ring = &adev->jpeg.inst[i].ring_dec; ring->use_doorbell = true; if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; sprintf(ring->name, "jpeg_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 1c2292cc5f2c..c55386c22311 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -101,7 +101,7 @@ static int jpeg_v3_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 77e1e64aa1d1..d7d5ffc29393 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -108,7 +108,7 @@ static int jpeg_v4_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 4560476c7c31..f1a6abdad21b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -149,7 +149,7 @@ static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes, { struct amdgpu_device *adev = mes->adev; union MESAPI__ADD_QUEUE mes_add_queue_pkt; - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 3adb450eec07..9791f3581786 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -164,7 +164,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, { struct amdgpu_device *adev = mes->adev; union MESAPI__ADD_QUEUE mes_add_queue_pkt; - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 15e7cbeae75b..fb91b31056ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -54,7 +54,7 @@ static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -229,7 +229,7 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -285,7 +285,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -338,7 +338,7 @@ static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -415,7 +415,7 @@ static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index 73afbf2facc9..9086f2fdfaf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -54,7 +54,7 @@ static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -261,7 +261,7 @@ static void mmhub_v1_7_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -319,7 +319,7 @@ static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -348,7 +348,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_7_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -425,7 +425,7 @@ static void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_7_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 342d1702104c..9ec06f9db761 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -53,7 +53,7 @@ static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -253,7 +253,7 @@ static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -311,7 +311,7 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -352,7 +352,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -426,7 +426,7 @@ static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_8_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 278e32db878d..8f76c6ecf50a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -187,7 +187,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -362,7 +362,7 @@ static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -412,7 +412,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -441,7 +441,7 @@ static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -520,7 +520,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = { static void mmhub_v2_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index fcf2813e70db..8bd0fc8d9d25 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -121,7 +121,7 @@ static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -280,7 +280,7 @@ static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -330,7 +330,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -373,7 +373,7 @@ static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev) static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -446,7 +446,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = { static void mmhub_v2_3_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index 17a792616979..441379e91cfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -136,7 +136,7 @@ mmhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -319,7 +319,7 @@ static void mmhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -369,7 +369,7 @@ static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -398,7 +398,7 @@ static int mmhub_v3_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -477,7 +477,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_vmhub_funcs = { static void mmhub_v3_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 26509b6b8c24..12c7f4b46ea9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -138,7 +138,7 @@ static void mmhub_v3_0_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -306,7 +306,7 @@ static void mmhub_v3_0_1_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -356,7 +356,7 @@ static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_1_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -385,7 +385,7 @@ static int mmhub_v3_0_1_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_1_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -459,7 +459,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_1_vmhub_funcs = { static void mmhub_v3_0_1_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c index 26abbc6a47ab..5dadc85abf7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c @@ -129,7 +129,7 @@ mmhub_v3_0_2_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v3_0_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -311,7 +311,7 @@ static void mmhub_v3_0_2_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -361,7 +361,7 @@ static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_2_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -390,7 +390,7 @@ static int mmhub_v3_0_2_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_2_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -469,7 +469,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_2_vmhub_funcs = { static void mmhub_v3_0_2_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 72083e96222f..e790f890aec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -57,7 +57,7 @@ static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid, uint32_t vmid, uint64_t value) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -294,7 +294,7 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev, static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned int num_level, block_size; uint32_t tmp; int i; @@ -363,7 +363,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev, int hubid) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -404,7 +404,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i, j; @@ -507,8 +507,8 @@ static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v9_4_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = - {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]}; + struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = { + &adev->vmhub[AMDGPU_MMHUB0(0)], &adev->vmhub[AMDGPU_MMHUB1(0)]}; int i; for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 9295ac7edd56..50b6eb9bcfda 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1825,12 +1825,12 @@ static int sdma_v4_0_sw_init(void *handle) /* * On Arcturus, SDMA instance 5~7 has a different vmhub - * type(AMDGPU_MMHUB_1). + * type(AMDGPU_MMHUB1). */ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, @@ -1851,9 +1851,9 @@ static int sdma_v4_0_sw_init(void *handle) ring->doorbell_index += 0x400; if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 64dcaa2670dd..7efe7c43fffb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1309,7 +1309,7 @@ static int sdma_v4_4_2_sw_init(void *handle) /* doorbell size is 2 dwords, get DWORD offset */ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, @@ -1328,7 +1328,7 @@ static int sdma_v4_4_2_sw_init(void *handle) */ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; ring->doorbell_index += 0x400; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 92e1299be021..a0077cf41295 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1389,7 +1389,7 @@ static int sdma_v5_0_sw_init(void *handle) (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index ca7e8757d78e..efa2c84ee78e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1253,7 +1253,7 @@ static int sdma_v5_2_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 3d9a80511a45..79d09792d2ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1298,7 +1298,7 @@ static int sdma_v6_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e32b656b3dab..abaa4463e906 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -444,7 +444,7 @@ static int uvd_v7_0_sw_init(void *handle) continue; if (!amdgpu_sriov_vf(adev)) { ring = &adev->uvd.inst[j].ring; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "uvd_%d", ring->me); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0, @@ -455,7 +455,7 @@ static int uvd_v7_0_sw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst[j].ring_enc[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 57b85bb6a1e4..e0b70cd3b697 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -466,7 +466,7 @@ static int vce_v4_0_sw_init(void *handle) enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); ring = &adev->vce.ring[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vce%d", i); if (amdgpu_sriov_vf(adev)) { /* DOORBELL only works under SRIOV */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 761c28fa6ec1..f877c39c7cdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -120,7 +120,7 @@ static int vcn_v1_0_sw_init(void *handle) return r; ring = &adev->vcn.inst->ring_dec; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -142,7 +142,7 @@ static int vcn_v1_0_sw_init(void *handle) enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); ring = &adev->vcn.inst->ring_enc[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, hw_prio, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 7c2b3aa48083..c975aed2f6c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -129,7 +129,7 @@ static int vcn_v2_0_sw_init(void *handle) ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -160,7 +160,7 @@ static int vcn_v2_0_sw_init(void *handle) ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); if (!amdgpu_sriov_vf(adev)) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; else diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index ab0b45d0ead1..7044bd7c9f62 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -188,9 +188,9 @@ static int vcn_v2_5_sw_init(void *handle) (amdgpu_sriov_vf(adev) ? 2*j : 8*j); if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec_%d", j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, @@ -208,9 +208,9 @@ static int vcn_v2_5_sw_init(void *handle) (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc_%d.%d", j, i); r = amdgpu_ring_init(adev, ring, 512, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3eab186261aa..70fefbf26c48 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -189,7 +189,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; } - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, AMDGPU_RING_PRIO_DEFAULT, @@ -213,7 +213,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; } - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, hw_prio, &adev->vcn.inst[i].sched_score); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index bf0674039598..81446e6996df 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -149,7 +149,7 @@ static int vcn_v4_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1; else ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_unified_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, -- cgit v1.2.3 From 2c22ed0bdb0cb6da9408593eafa6137325576017 Mon Sep 17 00:00:00 2001 From: Tao Zhou <tao.zhou1@amd.com> Date: Mon, 27 Feb 2023 18:25:23 +0800 Subject: drm/amdgpu: add instance mask for RAS inject User can specify injected instances by the mask. For backward compatibility, the mask value is incorporated into sub block index without interface change of RAS TA. User uses logical mask and driver should convert it to physical value before sending it to RAS TA. v2: update parameter name. Signed-off-by: Tao Zhou <tao.zhou1@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Stanley.Yang <Stanley.Yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 21 ++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 23 ++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 9 ++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 5 +++-- 8 files changed, 56 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index ec79a5c2f500..59b8b26e2caf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1672,14 +1672,33 @@ int psp_ras_initialize(struct psp_context *psp) } int psp_ras_trigger_error(struct psp_context *psp, - struct ta_ras_trigger_error_input *info) + struct ta_ras_trigger_error_input *info, uint32_t instance_mask) { struct ta_ras_shared_memory *ras_cmd; + struct amdgpu_device *adev = psp->adev; int ret; + uint32_t dev_mask; if (!psp->ras_context.context.initialized) return -EINVAL; + switch (info->block_id) { + case TA_RAS_BLOCK__GFX: + dev_mask = GET_MASK(GC, instance_mask); + break; + case TA_RAS_BLOCK__SDMA: + dev_mask = GET_MASK(SDMA0, instance_mask); + break; + default: + dev_mask = instance_mask; + break; + } + + /* reuse sub_block_index for backward compatibility */ + dev_mask <<= AMDGPU_RAS_INST_SHIFT; + dev_mask &= AMDGPU_RAS_INST_MASK; + info->sub_block_index |= dev_mask; + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 0a409da749d1..d84323923a3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -486,7 +486,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_enable_features(struct psp_context *psp, union ta_ras_cmd_input *info, bool enable); int psp_ras_trigger_error(struct psp_context *psp, - struct ta_ras_trigger_error_input *info); + struct ta_ras_trigger_error_input *info, uint32_t instance_mask); int psp_ras_terminate(struct psp_context *psp); int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 64f80e8cbd63..7ae08f168f99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -256,6 +256,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, int block_id; uint32_t sub_block; u64 address, value; + /* default value is 0 if the mask is not set by user */ + u32 instance_mask = 0; if (*pos) return -EINVAL; @@ -306,7 +308,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, data->op = op; if (op == 2) { - if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", + if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x", + &sub_block, &address, &value, &instance_mask) != 4 && + sscanf(str, "%*s %*s %*s %u %llu %llu %u", + &sub_block, &address, &value, &instance_mask) != 4 && + sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", &sub_block, &address, &value) != 3 && sscanf(str, "%*s %*s %*s %u %llu %llu", &sub_block, &address, &value) != 3) @@ -314,6 +320,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, data->head.sub_block_index = sub_block; data->inject.address = address; data->inject.value = value; + data->inject.instance_mask = instance_mask; } } else { if (size < sizeof(*data)) @@ -341,7 +348,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. * name: the name of IP. * - * inject has two more members than head, they are address, value. + * inject has three more members than head, they are address, value and mask. * As their names indicate, inject operation will write the * value to the address. * @@ -365,7 +372,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl - * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl + * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl * * Where N, is the card which you want to affect. * @@ -382,13 +389,14 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * * The sub-block is a the sub-block index, pass 0 if there is no sub-block. * The address and value are hexadecimal numbers, leading 0x is optional. + * The mask means instance mask, is optional, default value is 0x1. * * For instance, * * .. code-block:: bash * * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl - * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl + * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl * * How to check the result of the operation? @@ -1117,13 +1125,14 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (info->head.block == AMDGPU_RAS_BLOCK__GFX) { if (block_obj->hw_ops->ras_error_inject) - ret = block_obj->hw_ops->ras_error_inject(adev, info); + ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask); } else { /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */ if (block_obj->hw_ops->ras_error_inject) - ret = block_obj->hw_ops->ras_error_inject(adev, &block_info); + ret = block_obj->hw_ops->ras_error_inject(adev, &block_info, + info->instance_mask); else /*If not defined .ras_error_inject, use default ras_error_inject*/ - ret = psp_ras_trigger_error(&adev->psp, &block_info); + ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask); } if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index e96333d0c269..bc43f7db17cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -32,6 +32,11 @@ struct amdgpu_iv_entry; #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) +/* position of instance value in sub_block_index of + * ta_ras_trigger_error_input, the sub block uses lower 12 bits + */ +#define AMDGPU_RAS_INST_MASK 0xfffff000 +#define AMDGPU_RAS_INST_SHIFT 0xc enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, @@ -508,6 +513,7 @@ struct ras_inject_if { struct ras_common_if head; uint64_t address; uint64_t value; + uint32_t instance_mask; }; struct ras_cure_if { @@ -545,7 +551,8 @@ struct amdgpu_ras_block_object { }; struct amdgpu_ras_block_hw_ops { - int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); + int (*ras_error_inject)(struct amdgpu_device *adev, + void *inject_if, uint32_t instance_mask); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*query_ras_error_status)(struct amdgpu_device *adev); void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 439925477fb8..85ee1af963dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1014,7 +1014,8 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, } /* Trigger XGMI/WAFL error */ -static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *inject_if) +static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, + void *inject_if, uint32_t instance_mask) { int ret = 0; struct ta_ras_trigger_error_input *block_info = @@ -1026,7 +1027,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *injec if (amdgpu_dpm_allow_xgmi_power_down(adev, false)) dev_warn(adev->dev, "Failed to disallow XGMI power down"); - ret = psp_ras_trigger_error(&adev->psp, block_info); + ret = psp_ras_trigger_error(&adev->psp, block_info, instance_mask); if (amdgpu_ras_intr_triggered()) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index cc005e3bcd40..de8e70b3db75 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -770,7 +770,7 @@ static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, - void *inject_if); + void *inject_if, uint32_t instance_mask); static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, @@ -6335,7 +6335,7 @@ static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = { }; static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, - void *inject_if) + void *inject_if, uint32_t instance_mask) { struct ras_inject_if *info = (struct ras_inject_if *)inject_if; int ret; @@ -6374,7 +6374,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, block_info.value = info->value; mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info); + ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); mutex_unlock(&adev->grbm_idx_mutex); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index c67e387a97f5..59abe162bbaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -971,7 +971,7 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) } static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, - void *inject_if) + void *inject_if, uint32_t instance_mask) { struct ras_inject_if *info = (struct ras_inject_if *)inject_if; int ret; @@ -987,7 +987,7 @@ static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, block_info.value = info->value; mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info); + ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); mutex_unlock(&adev->grbm_idx_mutex); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index ec7c049c5952..4906affa6f8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1699,7 +1699,8 @@ static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL); } -static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if) +static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, + void *inject_if, uint32_t instance_mask) { struct ras_inject_if *info = (struct ras_inject_if *)inject_if; int ret; @@ -1715,7 +1716,7 @@ static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_ block_info.value = info->value; mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info); + ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); mutex_unlock(&adev->grbm_idx_mutex); return ret; -- cgit v1.2.3 From 6e3c51a5814aff70b72e8b2a9953bdac7aea2f44 Mon Sep 17 00:00:00 2001 From: Tao Zhou <tao.zhou1@amd.com> Date: Mon, 13 Mar 2023 16:34:19 +0800 Subject: drm/amdgpu: remove RAS GFX injection for gfx_v9_4/gfx_v9_4_2 No special requirement in RAS injection for the two versions, switch to use default injection interface. Signed-off-by: Tao Zhou <tao.zhou1@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Stanley.Yang <Stanley.Yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c | 24 ------------------------ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 24 ------------------------ 2 files changed, 48 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index 59abe162bbaf..bc8416afb62c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -970,29 +970,6 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); } -static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, - void *inject_if, uint32_t instance_mask) -{ - struct ras_inject_if *info = (struct ras_inject_if *)inject_if; - int ret; - struct ta_ras_trigger_error_input block_info = { 0 }; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; - - block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); - block_info.sub_block_index = info->head.sub_block_index; - block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); - block_info.address = info->address; - block_info.value = info->value; - - mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); - mutex_unlock(&adev->grbm_idx_mutex); - - return ret; -} - static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 }; @@ -1030,7 +1007,6 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev) const struct amdgpu_ras_block_hw_ops gfx_v9_4_ras_ops = { - .ras_error_inject = &gfx_v9_4_ras_error_inject, .query_ras_error_count = &gfx_v9_4_query_ras_error_count, .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, .query_ras_error_status = &gfx_v9_4_query_ras_error_status, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 4906affa6f8c..2cc3a7cb1f54 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1699,29 +1699,6 @@ static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL); } -static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, - void *inject_if, uint32_t instance_mask) -{ - struct ras_inject_if *info = (struct ras_inject_if *)inject_if; - int ret; - struct ta_ras_trigger_error_input block_info = { 0 }; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; - - block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); - block_info.sub_block_index = info->head.sub_block_index; - block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); - block_info.address = info->address; - block_info.value = info->value; - - mutex_lock(&adev->grbm_idx_mutex); - ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask); - mutex_unlock(&adev->grbm_idx_mutex); - - return ret; -} - static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev) { uint32_t i, j; @@ -1945,7 +1922,6 @@ static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev) } struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = { - .ras_error_inject = &gfx_v9_4_2_ras_error_inject, .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, -- cgit v1.2.3 From 4504f14338cdc43586189558113faafa8acb9ffe Mon Sep 17 00:00:00 2001 From: Jonathan Kim <jonathan.kim@amd.com> Date: Thu, 31 Mar 2022 12:05:00 -0400 Subject: drm/amdgpu: setup hw debug registers on driver initialization Add missing debug trap registers references and initialize all debug registers on boot by clearing the hardware exception overrides and the wave allocation ID index. The debugger requires that TTMPs 6 & 7 save the dispatch ID to map waves onto dispatch during compute context inspection. In order to correctly set this up, set the special reserved CP bit by default whenever the MQD is initailized. Signed-off-by: Jonathan Kim <jonathan.kim@amd.com> Reviewed-by: Felix Kuehling <felix.kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 26 ++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 3 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 5 ++ .../drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h | 14 +++++ .../amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h | 69 ++++++++++++++++++++++ .../drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h | 10 ++++ .../amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h | 4 ++ .../amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h | 4 ++ 12 files changed, 176 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f7ad883a70fa..be984f8c71c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4825,6 +4825,29 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade #define DEFAULT_SH_MEM_BASES (0x6000) +static void gfx_v10_0_debug_trap_config_init(struct amdgpu_device *adev, + uint32_t first_vmid, + uint32_t last_vmid) +{ + uint32_t data; + uint32_t trap_config_vmid_mask = 0; + int i; + + /* Calculate trap config vmid mask */ + for (i = first_vmid; i < last_vmid; i++) + trap_config_vmid_mask |= (1 << i); + + data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG, + VMID_SEL, trap_config_vmid_mask); + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG, + TRAP_EN, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0); +} + static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) { int i; @@ -4856,6 +4879,9 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); } + + gfx_v10_0_debug_trap_config_init(adev, adev->vm_manager.first_kfd_vmid, + AMDGPU_NUM_VMID); } static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index da21bf868080..690e121d9dda 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1638,6 +1638,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) /* Enable trap for each kfd vmid. */ data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data); } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0189e50bd89f..7f17e0061027 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2303,6 +2303,29 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) adev->gfx.config.num_rbs = hweight32(active_rbs); } +static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev, + uint32_t first_vmid, + uint32_t last_vmid) +{ + uint32_t data; + uint32_t trap_config_vmid_mask = 0; + int i; + + /* Calculate trap config vmid mask */ + for (i = first_vmid; i < last_vmid; i++) + trap_config_vmid_mask |= (1 << i); + + data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG, + VMID_SEL, trap_config_vmid_mask); + data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG, + TRAP_EN, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0); +} + #define DEFAULT_SH_MEM_BASES (0x6000) static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) { @@ -4602,6 +4625,13 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + gfx_v9_4_2_debug_trap_config_init(adev, + adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID); + else + gfx_v9_0_debug_trap_config_init(adev, + adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 2cc3a7cb1f54..63f6843a069e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -771,6 +771,9 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, soc15_grbm_select(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA0), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA1), 0); } void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 772c09b5821b..eaaa4f4ddaaa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -117,6 +117,11 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; + /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the + * DISPATCH_PTR. This is required for the kfd debugger + */ + m->cp_hqd_hq_scheduler0 = 1 << 14; + if (q->format == KFD_QUEUE_FORMAT_AQL) { m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 632344b95d90..3a48bbc589fe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -143,6 +143,11 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; + /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the + * DISPATCH_PTR. This is required for the kfd debugger + */ + m->cp_hqd_hq_status0 = 1 << 14; + /* * GFX11 RS64 CPFW version >= 509 supports PCIe atomics support * acknowledgment. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 226132ec3714..b7c95158d4a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -182,6 +182,11 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; + /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the + * DISPATCH_PTR. This is required for the kfd debugger + */ + m->cp_hqd_hq_status0 = 1 << 14; + if (q->format == KFD_QUEUE_FORMAT_AQL) m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h index 79c41004c0b6..4908044f7409 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h @@ -5194,6 +5194,20 @@ #define mmSPI_WCL_PIPE_PERCENT_CS6_BASE_IDX 0 #define mmSPI_WCL_PIPE_PERCENT_CS7 0x1f70 #define mmSPI_WCL_PIPE_PERCENT_CS7_BASE_IDX 0 +#define mmSPI_GDBG_WAVE_CNTL 0x1f71 +#define mmSPI_GDBG_WAVE_CNTL_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_CONFIG 0x1f72 +#define mmSPI_GDBG_TRAP_CONFIG_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_MASK 0x1f73 +#define mmSPI_GDBG_TRAP_MASK_BASE_IDX 0 +#define mmSPI_GDBG_WAVE_CNTL2 0x1f74 +#define mmSPI_GDBG_WAVE_CNTL2_BASE_IDX 0 +#define mmSPI_GDBG_WAVE_CNTL3 0x1f75 +#define mmSPI_GDBG_WAVE_CNTL3_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_DATA0 0x1f78 +#define mmSPI_GDBG_TRAP_DATA0_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_DATA1 0x1f79 +#define mmSPI_GDBG_TRAP_DATA1_BASE_IDX 0 #define mmSPI_COMPUTE_QUEUE_RESET 0x1f7b #define mmSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0 #define mmSPI_RESOURCE_RESERVE_CU_0 0x1f7c diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h index 52043e143067..9b7d219e7954 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h @@ -19700,6 +19700,75 @@ //SPI_WCL_PIPE_PERCENT_CS7 #define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0 #define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL +//SPI_GDBG_WAVE_CNTL +#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0 +#define SPI_GDBG_WAVE_CNTL__STALL_VMID__SHIFT 0x1 +#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x00000001L +#define SPI_GDBG_WAVE_CNTL__STALL_VMID_MASK 0x0001FFFEL +//SPI_GDBG_TRAP_CONFIG +#define SPI_GDBG_TRAP_CONFIG__ME_SEL__SHIFT 0x0 +#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL__SHIFT 0x2 +#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL__SHIFT 0x4 +#define SPI_GDBG_TRAP_CONFIG__ME_MATCH__SHIFT 0x7 +#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH__SHIFT 0x8 +#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH__SHIFT 0x9 +#define SPI_GDBG_TRAP_CONFIG__TRAP_EN__SHIFT 0xf +#define SPI_GDBG_TRAP_CONFIG__VMID_SEL__SHIFT 0x10 +#define SPI_GDBG_TRAP_CONFIG__ME_SEL_MASK 0x00000003L +#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL_MASK 0x0000000CL +#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL_MASK 0x00000070L +#define SPI_GDBG_TRAP_CONFIG__ME_MATCH_MASK 0x00000080L +#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH_MASK 0x00000100L +#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH_MASK 0x00000200L +#define SPI_GDBG_TRAP_CONFIG__TRAP_EN_MASK 0x00008000L +#define SPI_GDBG_TRAP_CONFIG__VMID_SEL_MASK 0xFFFF0000L +//SPI_GDBG_TRAP_MASK +#define SPI_GDBG_TRAP_MASK__EXCP_EN__SHIFT 0x0 +#define SPI_GDBG_TRAP_MASK__REPLACE__SHIFT 0x9 +#define SPI_GDBG_TRAP_MASK__EXCP_EN_MASK 0x01FFL +#define SPI_GDBG_TRAP_MASK__REPLACE_MASK 0x0200L +//SPI_GDBG_WAVE_CNTL2 +#define SPI_GDBG_WAVE_CNTL2__VMID_MASK__SHIFT 0x0 +#define SPI_GDBG_WAVE_CNTL2__MODE__SHIFT 0x10 +#define SPI_GDBG_WAVE_CNTL2__VMID_MASK_MASK 0x0000FFFFL +#define SPI_GDBG_WAVE_CNTL2__MODE_MASK 0x00030000L +//SPI_GDBG_WAVE_CNTL3 +#define SPI_GDBG_WAVE_CNTL3__STALL_PS__SHIFT 0x0 +#define SPI_GDBG_WAVE_CNTL3__STALL_VS__SHIFT 0x1 +#define SPI_GDBG_WAVE_CNTL3__STALL_GS__SHIFT 0x2 +#define SPI_GDBG_WAVE_CNTL3__STALL_HS__SHIFT 0x3 +#define SPI_GDBG_WAVE_CNTL3__STALL_CSG__SHIFT 0x4 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS0__SHIFT 0x5 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS1__SHIFT 0x6 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS2__SHIFT 0x7 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS3__SHIFT 0x8 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS4__SHIFT 0x9 +#define SPI_GDBG_WAVE_CNTL3__STALL_CS5__SHIFT 0xa +#define SPI_GDBG_WAVE_CNTL3__STALL_CS6__SHIFT 0xb +#define SPI_GDBG_WAVE_CNTL3__STALL_CS7__SHIFT 0xc +#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION__SHIFT 0xd +#define SPI_GDBG_WAVE_CNTL3__STALL_MULT__SHIFT 0x1c +#define SPI_GDBG_WAVE_CNTL3__STALL_PS_MASK 0x00000001L +#define SPI_GDBG_WAVE_CNTL3__STALL_VS_MASK 0x00000002L +#define SPI_GDBG_WAVE_CNTL3__STALL_GS_MASK 0x00000004L +#define SPI_GDBG_WAVE_CNTL3__STALL_HS_MASK 0x00000008L +#define SPI_GDBG_WAVE_CNTL3__STALL_CSG_MASK 0x00000010L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS0_MASK 0x00000020L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS1_MASK 0x00000040L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS2_MASK 0x00000080L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS3_MASK 0x00000100L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS4_MASK 0x00000200L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS5_MASK 0x00000400L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS6_MASK 0x00000800L +#define SPI_GDBG_WAVE_CNTL3__STALL_CS7_MASK 0x00001000L +#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION_MASK 0x0FFFE000L +#define SPI_GDBG_WAVE_CNTL3__STALL_MULT_MASK 0x10000000L +//SPI_GDBG_TRAP_DATA0 +#define SPI_GDBG_TRAP_DATA0__DATA__SHIFT 0x0 +#define SPI_GDBG_TRAP_DATA0__DATA_MASK 0xFFFFFFFFL +//SPI_GDBG_TRAP_DATA1 +#define SPI_GDBG_TRAP_DATA1__DATA__SHIFT 0x0 +#define SPI_GDBG_TRAP_DATA1__DATA_MASK 0xFFFFFFFFL //SPI_COMPUTE_QUEUE_RESET #define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0 #define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h index a734abaa91a5..5e15ac14b63c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h @@ -26,6 +26,8 @@ #define mmSQ_DEBUG_STS_GLOBAL_BASE_IDX 0 #define mmSQ_DEBUG_STS_GLOBAL2 0x10B0 #define mmSQ_DEBUG_STS_GLOBAL2_BASE_IDX 0 +#define mmSQ_DEBUG 0x10B1 +#define mmSQ_DEBUG_BASE_IDX 0 // addressBlock: gc_sdma0_sdma0dec // base address: 0x4980 @@ -4853,10 +4855,18 @@ #define mmSPI_WCL_PIPE_PERCENT_CS3_BASE_IDX 0 #define mmSPI_GDBG_WAVE_CNTL 0x1f71 #define mmSPI_GDBG_WAVE_CNTL_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_CONFIG 0x1f72 +#define mmSPI_GDBG_TRAP_CONFIG_BASE_IDX 0 #define mmSPI_GDBG_TRAP_MASK 0x1f73 #define mmSPI_GDBG_TRAP_MASK_BASE_IDX 0 #define mmSPI_GDBG_WAVE_CNTL2 0x1f74 #define mmSPI_GDBG_WAVE_CNTL2_BASE_IDX 0 +#define mmSPI_GDBG_WAVE_CNTL3 0x1f75 +#define mmSPI_GDBG_WAVE_CNTL3_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_DATA0 0x1f78 +#define mmSPI_GDBG_TRAP_DATA0_BASE_IDX 0 +#define mmSPI_GDBG_TRAP_DATA1 0x1f79 +#define mmSPI_GDBG_TRAP_DATA1_BASE_IDX 0 #define mmSPI_COMPUTE_QUEUE_RESET 0x1f7b #define mmSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0 #define mmSPI_RESOURCE_RESERVE_CU_0 0x1f7c diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h index d7a17bae2584..e4ecd6c2d20e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h @@ -47907,6 +47907,10 @@ // addressBlock: sqind +//SQ_DEBUG +#define SQ_DEBUG__SINGLE_MEMOP_MASK 0x00000001L +#define SQ_DEBUG__SINGLE_MEMOP__SHIFT 0x00000000 + //SQ_DEBUG_STS_GLOBAL #define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0_MASK 0x000000ffL #define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0__SHIFT 0x00000000 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h index 4f08f90856fc..3088a4a13cb5 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h @@ -17216,11 +17216,15 @@ #define SPI_GDBG_PER_VMID_CNTL__TRAP_EN__SHIFT 0x3 #define SPI_GDBG_PER_VMID_CNTL__EXCP_EN__SHIFT 0x4 #define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE__SHIFT 0xd +#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_START__SHIFT 0xe +#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_END__SHIFT 0xf #define SPI_GDBG_PER_VMID_CNTL__STALL_VMID_MASK 0x00000001L #define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE_MASK 0x00000006L #define SPI_GDBG_PER_VMID_CNTL__TRAP_EN_MASK 0x00000008L #define SPI_GDBG_PER_VMID_CNTL__EXCP_EN_MASK 0x00001FF0L #define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE_MASK 0x00002000L +#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_START_MASK 0x00004000L +#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_END_MASK 0x00008000L //SPI_COMPUTE_QUEUE_RESET #define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0 #define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L -- cgit v1.2.3