From 277bd3371f11400d5b02df54f057569be4b10cea Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Tue, 24 May 2022 10:51:43 +0800 Subject: drm/amdgpu: convert gfx.kiq to array type (v3) v1: more kiq instances are a available in SOC (Le) v2: squash commits to avoid breaking the build (Le) v3: make the conversion for gfx/mec v11_0 (Hawking) Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index bfabea76d166..c742b4a36979 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -296,7 +296,7 @@ struct amdgpu_gfx { struct amdgpu_ce ce; struct amdgpu_me me; struct amdgpu_mec mec; - struct amdgpu_kiq kiq; + struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_imu imu; bool rs64_enable; /* firmware format */ const struct firmware *me_fw; /* ME firmware */ -- cgit v1.2.3 From be697aa3a78ef83a6b8d49e1f0671a002e502cd0 Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Wed, 27 Jul 2022 14:35:49 +0800 Subject: drm/amdgpu: move queue_bitmap to an independent structure (v3) To allocate independent queue_bitmap for each XCD, then the old bitmap policy can be continued to use with a clear logic. Use mec_bitmap[0] as default for all non-GC 9.4.3 IPs. v2: squash commits to avoid breaking the build v3: unify naming style Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 41 ++++++++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 7 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 5 +-- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 +-- 9 files changed, 48 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 0385f7f69278..fed8bb9a721f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -162,7 +162,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) * clear */ bitmap_complement(gpu_resources.cp_queue_bitmap, - adev->gfx.mec.queue_bitmap, + adev->gfx.mec_bitmap[0].queue_bitmap, KGD_MAX_QUEUES); /* According to linux/bitmap.h we shouldn't use bitmap_clear if diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index bc944ae4fd5b..03875b971ba6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -778,7 +778,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, * Iterate through the shader engines and arrays of the device * to get number of waves in flight */ - bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap, + bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap, KGD_MAX_QUEUES); max_queue_cnt = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9b6071df1fa7..b300b1784210 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -63,10 +63,10 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, } bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, - int mec, int pipe, int queue) + int xcc_id, int mec, int pipe, int queue) { return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), - adev->gfx.mec.queue_bitmap); + adev->gfx.mec_bitmap[xcc_id].queue_bitmap); } int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, @@ -204,29 +204,38 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) { - int i, queue, pipe; + int i, j, queue, pipe; bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe, adev->gfx.num_compute_rings); + int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1; if (multipipe_policy) { - /* policy: make queues evenly cross all pipes on MEC1 only */ - for (i = 0; i < max_queues_per_mec; i++) { - pipe = i % adev->gfx.mec.num_pipe_per_mec; - queue = (i / adev->gfx.mec.num_pipe_per_mec) % - adev->gfx.mec.num_queue_per_pipe; - - set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, - adev->gfx.mec.queue_bitmap); + /* policy: make queues evenly cross all pipes on MEC1 only + * for multiple xcc, just use the original policy for simplicity */ + for (j = 0; j < num_xcd; j++) { + for (i = 0; i < max_queues_per_mec; i++) { + pipe = i % adev->gfx.mec.num_pipe_per_mec; + queue = (i / adev->gfx.mec.num_pipe_per_mec) % + adev->gfx.mec.num_queue_per_pipe; + + set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, + adev->gfx.mec_bitmap[j].queue_bitmap); + } } } else { /* policy: amdgpu owns all queues in the given pipe */ - for (i = 0; i < max_queues_per_mec; ++i) - set_bit(i, adev->gfx.mec.queue_bitmap); + for (j = 0; j < num_xcd; j++) { + for (i = 0; i < max_queues_per_mec; ++i) + set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); + } } - dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); + for (j = 0; j < num_xcd; j++) { + dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", + bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); + } } void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) @@ -268,7 +277,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_queue_per_pipe; while (--queue_bit >= 0) { - if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) + if (test_bit(queue_bit, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); @@ -516,7 +525,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) return -EINVAL; for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec.queue_bitmap)) + if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; /* This situation may be hit in the future if a new HW diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index c742b4a36979..830323310694 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -76,7 +76,9 @@ struct amdgpu_mec { u32 num_pipe_per_mec; u32 num_queue_per_pipe; void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; +}; +struct amdgpu_mec_bitmap { /* These are the resources for which amdgpu takes ownership */ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); }; @@ -296,6 +298,7 @@ struct amdgpu_gfx { struct amdgpu_ce ce; struct amdgpu_me me; struct amdgpu_mec mec; + struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_imu imu; bool rs64_enable; /* firmware format */ @@ -425,8 +428,8 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue); void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); -bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, - int pipe, int queue); +bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int inst, + int mec, int pipe, int queue); bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d4e7de8fd9da..88f8424ea1e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4219,7 +4219,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr = NULL; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -4614,8 +4614,8 @@ static int gfx_v10_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, - j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v10_0_compute_ring_init(adev, ring_id, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 6a5435255e6d..3e42a44f10a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -699,7 +699,7 @@ static int gfx_v11_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -1374,8 +1374,8 @@ static int gfx_v11_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, - j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v11_0_compute_ring_init(adev, ring_id, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 9d5c1e29b4a3..46740ad9a80f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2728,7 +2728,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -4456,7 +4456,8 @@ static int gfx_v7_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v7_0_compute_ring_init(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ed04bad8543d..18722450e265 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1304,7 +1304,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) u32 *hpd; size_t mec_hpd_size; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -2001,7 +2001,8 @@ static int gfx_v8_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v8_0_compute_ring_init(adev, @@ -4319,7 +4320,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) int r, i; for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec.queue_bitmap)) + if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap)) continue; /* This situation may be hit in the future if a new HW diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index adf86bc7ed36..49adc36dcc6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1713,7 +1713,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr; - bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); @@ -2154,7 +2154,8 @@ static int gfx_v9_0_sw_init(void *handle) for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, + k, j)) continue; r = gfx_v9_0_compute_ring_init(adev, -- cgit v1.2.3 From c38be07035bcb31274ce5f85e3b249f691c5b8db Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Tue, 16 Nov 2021 21:56:34 +0800 Subject: drm/amdgpu: separate the mqd_backup for kiq from kcq This will benifit the mqd indexing for kiq/kcq in multi XCD case. Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 830323310694..d811cb038e94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -75,7 +75,7 @@ struct amdgpu_mec { u32 num_mec; u32 num_pipe_per_mec; u32 num_queue_per_pipe; - void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; + void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; }; struct amdgpu_mec_bitmap { @@ -122,6 +122,7 @@ struct amdgpu_kiq { struct amdgpu_ring ring; struct amdgpu_irq_src irq; const struct kiq_pm4_funcs *pmf; + void *mqd_backup; }; /* -- cgit v1.2.3 From def799c6596d078112095c24c25e162cb5102d90 Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Tue, 24 May 2022 12:23:03 +0800 Subject: drm/amdgpu: add multi-xcc support to amdgpu_gfx interfaces (v4) v1: Modify kiq_init/fini, mqd_sw_init/fini and enable/disable_kcq to adapt to multi-die case. Pass 0 as default to all asics with single xcc (Le) v2: squash commits to avoid breaking the build (Le) v3: unify naming style (Le) v4: apply the changes to gc v11_0 (Hawking) Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 75 ++++++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 16 +++---- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 23 +++++----- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 23 +++++----- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 19 ++++----- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 25 ++++++----- 6 files changed, 93 insertions(+), 88 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index b300b1784210..7f5c60381103 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -267,7 +267,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) } static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, - struct amdgpu_ring *ring) + struct amdgpu_ring *ring, int xcc_id) { int queue_bit; int mec, pipe, queue; @@ -277,7 +277,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_queue_per_pipe; while (--queue_bit >= 0) { - if (test_bit(queue_bit, adev->gfx.mec_bitmap[0].queue_bitmap)) + if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) continue; amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); @@ -303,9 +303,9 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq) + struct amdgpu_irq_src *irq, int xcc_id) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; int r = 0; spin_lock_init(&kiq->ring_lock); @@ -314,15 +314,16 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->ring_obj = NULL; ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.kiq; + ring->xcc_id = xcc_id; ring->vm_hub = AMDGPU_GFXHUB_0; - r = amdgpu_gfx_kiq_acquire(adev, ring); + r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); if (r) return r; ring->eop_gpu_addr = kiq->eop_gpu_addr; ring->no_scheduler = true; - sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue); + sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue); r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) @@ -336,19 +337,19 @@ void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) amdgpu_ring_fini(ring); } -void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) +void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); } int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, - unsigned hpd_size) + unsigned hpd_size, int xcc_id) { int r; u32 *hpd; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, @@ -371,13 +372,13 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, /* create MQD for each compute/gfx queue */ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, - unsigned mqd_size) + unsigned mqd_size, int xcc_id) { - struct amdgpu_ring *ring = NULL; int r, i; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_ring *ring = &kiq->ring; /* create MQD for KIQ */ - ring = &adev->gfx.kiq[0].ring; if (!adev->enable_mes_kiq && !ring->mqd_obj) { /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD @@ -396,8 +397,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, } /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); - if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) + kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); + if (!kiq->mqd_backup) dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); } @@ -424,7 +425,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, /* create MQD for each KCQ */ for (i = 0; i < adev->gfx.num_compute_rings; i++) { - ring = &adev->gfx.compute_ring[i]; + ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; if (!ring->mqd_obj) { r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, @@ -435,7 +436,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, } /* prepare MQD backup */ - adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); + adev->gfx.mec.mqd_backup[i + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.mec.mqd_backup[i]) dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); } @@ -444,10 +445,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, return 0; } -void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) +void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring = NULL; - int i; + int i, j; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) { @@ -460,6 +462,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) } for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; ring = &adev->gfx.compute_ring[i]; kfree(adev->gfx.mec.mqd_backup[i]); amdgpu_bo_free_kernel(&ring->mqd_obj, @@ -467,36 +470,40 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) &ring->mqd_ptr); } - ring = &adev->gfx.kiq[0].ring; + ring = &kiq->ring; + kfree(kiq->mqd_backup); kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, &ring->mqd_ptr); } -int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) +int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i, r = 0; + int j; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - spin_lock(&adev->gfx.kiq[0].ring_lock); + spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * adev->gfx.num_compute_rings)) { spin_unlock(&adev->gfx.kiq[0].ring_lock); return -ENOMEM; } - for (i = 0; i < adev->gfx.num_compute_rings; i++) + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], RESET_QUEUES, 0, 0); + } if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); + spin_unlock(&kiq->ring_lock); return r; } @@ -514,18 +521,18 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, return set_resource_bit; } -int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) +int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) { - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_ring *kiq_ring = &kiq->ring; uint64_t queue_mask = 0; - int r, i; + int r, i, j; if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) return -EINVAL; for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap)) + if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) continue; /* This situation may be hit in the future if a new HW @@ -541,7 +548,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); - spin_lock(&adev->gfx.kiq[0].ring_lock); + spin_lock(&kiq->ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + kiq->pmf->set_resources_size); @@ -555,11 +562,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) queue_mask = ~0ULL; kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); - for (i = 0; i < adev->gfx.num_compute_rings; i++) + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); + } r = amdgpu_ring_test_helper(kiq_ring); - spin_unlock(&adev->gfx.kiq[0].ring_lock); + spin_unlock(&kiq->ring_lock); if (r) DRM_ERROR("KCQ enable failed\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index d811cb038e94..a9e41d7970ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -408,19 +408,19 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq); + struct amdgpu_irq_src *irq, int xcc_id); void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); -void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev); +void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, - unsigned hpd_size); + unsigned hpd_size, int xcc_id); int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, - unsigned mqd_size); -void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev); -int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev); -int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev); + unsigned mqd_size, int xcc_id); +void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id); void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev); @@ -429,7 +429,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue); void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); -bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int inst, +bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id, int mec, int pipe, int queue); bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 88f8424ea1e4..accc0a7251b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4629,19 +4629,19 @@ static int gfx_v10_0_sw_init(void *handle) } if (!adev->enable_mes_kiq) { - r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE); + r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); return r; } kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); if (r) return r; } - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd)); + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd), 0); if (r) return r; @@ -4690,11 +4690,11 @@ static int gfx_v10_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev); + amdgpu_gfx_mqd_sw_fini(adev, 0); if (!adev->enable_mes_kiq) { amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev); + amdgpu_gfx_kiq_fini(adev, 0); } gfx_v10_0_pfp_fini(adev); @@ -6812,14 +6812,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v10_compute_mqd *mqd = ring->mqd_ptr; - int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; gfx_v10_0_kiq_setting(ring); if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; @@ -6841,8 +6840,8 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); } return 0; @@ -6927,7 +6926,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev) goto done; } - r = amdgpu_gfx_enable_kcq(adev); + r = amdgpu_gfx_enable_kcq(adev, 0); done: return r; } @@ -7280,7 +7279,7 @@ static int gfx_v10_0_hw_fini(void *handle) DRM_ERROR("KGQ disable failed\n"); } #endif - if (amdgpu_gfx_disable_kcq(adev)) + if (amdgpu_gfx_disable_kcq(adev, 0)) DRM_ERROR("KCQ disable failed\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 3e42a44f10a7..4fbefe236fc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1389,19 +1389,19 @@ static int gfx_v11_0_sw_init(void *handle) } if (!adev->enable_mes_kiq) { - r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE); + r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); return r; } kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); if (r) return r; } - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd)); + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0); if (r) return r; @@ -1463,11 +1463,11 @@ static int gfx_v11_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev); + amdgpu_gfx_mqd_sw_fini(adev, 0); if (!adev->enable_mes_kiq) { amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev); + amdgpu_gfx_kiq_fini(adev, 0); } gfx_v11_0_pfp_fini(adev); @@ -4035,14 +4035,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v11_compute_mqd *mqd = ring->mqd_ptr; - int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; gfx_v11_0_kiq_setting(ring); if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; @@ -4064,8 +4063,8 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); } return 0; @@ -4153,7 +4152,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) goto done; } - r = amdgpu_gfx_enable_kcq(adev); + r = amdgpu_gfx_enable_kcq(adev, 0); done: return r; } @@ -4456,7 +4455,7 @@ static int gfx_v11_0_hw_fini(void *handle) DRM_ERROR("KGQ disable failed\n"); } #endif - if (amdgpu_gfx_disable_kcq(adev)) + if (amdgpu_gfx_disable_kcq(adev, 0)) DRM_ERROR("KCQ disable failed\n"); amdgpu_mes_kiq_hw_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 18722450e265..8a43e87de49f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2016,19 +2016,19 @@ static int gfx_v8_0_sw_init(void *handle) } } - r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE); + r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); return r; } kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); if (r) return r; /* create MQD for all compute queues as well as KIQ for SRIOV case */ - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation)); + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0); if (r) return r; @@ -2051,9 +2051,9 @@ static int gfx_v8_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev); + amdgpu_gfx_mqd_sw_fini(adev, 0); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev); + amdgpu_gfx_kiq_fini(adev, 0); gfx_v8_0_mec_fini(adev); amdgpu_gfx_rlc_fini(adev); @@ -4596,14 +4596,13 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct vi_mqd *mqd = ring->mqd_ptr; - int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; gfx_v8_0_kiq_setting(ring); if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; @@ -4626,8 +4625,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation)); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 49adc36dcc6f..62af92e5be51 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2169,19 +2169,19 @@ static int gfx_v9_0_sw_init(void *handle) } } - r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE); + r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); if (r) { DRM_ERROR("Failed to init KIQ BOs!\n"); return r; } kiq = &adev->gfx.kiq[0]; - r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0); if (r) return r; /* create MQD for all compute queues as wel as KIQ for SRIOV case */ - r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation)); + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0); if (r) return r; @@ -2216,9 +2216,9 @@ static int gfx_v9_0_sw_fini(void *handle) for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - amdgpu_gfx_mqd_sw_fini(adev); + amdgpu_gfx_mqd_sw_fini(adev, 0); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); - amdgpu_gfx_kiq_fini(adev); + amdgpu_gfx_kiq_fini(adev, 0); gfx_v9_0_mec_fini(adev); amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, @@ -3520,7 +3520,6 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; - int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; struct v9_mqd *tmp_mqd; gfx_v9_0_kiq_setting(ring); @@ -3530,11 +3529,11 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) * driver need to re-init the mqd. * check mqd->cp_hqd_pq_control since this value should not be 0 */ - tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; + tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup; if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){ /* for GPU_RESET case , reset MQD to a clean status */ - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; @@ -3558,8 +3557,8 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); + if (adev->gfx.kiq[0].mqd_backup) + memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); } return 0; @@ -3653,7 +3652,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) goto done; } - r = amdgpu_gfx_enable_kcq(adev); + r = amdgpu_gfx_enable_kcq(adev, 0); done: return r; } @@ -3772,7 +3771,7 @@ static int gfx_v9_0_hw_fini(void *handle) /* DF freeze and kcq disable will fail */ if (!amdgpu_ras_intr_triggered()) /* disable KCQ to avoid CPC touch memory not valid anymore */ - amdgpu_gfx_disable_kcq(adev); + amdgpu_gfx_disable_kcq(adev, 0); if (amdgpu_sriov_vf(adev)) { gfx_v9_0_cp_gfx_enable(adev, false); -- cgit v1.2.3 From d51ac6d0a23caf1005cb640f8533161c5d2dd0c0 Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Tue, 24 May 2022 11:22:49 +0800 Subject: drm/amdgpu: add xcc index argument to select_sh_se function v2 v1: To support multiple XCD case (Le) v2: introduce xcc index to gfx_v11_0_select_sh_se (Hawking) Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 16 +++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 +-- drivers/gpu/drm/amd/amdgpu/cik.c | 4 +-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 16 +++++------ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 24 ++++++++--------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 33 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 33 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 20 +++++++------- drivers/gpu/drm/amd/amdgpu/nv.c | 4 +-- drivers/gpu/drm/amd/amdgpu/si.c | 4 +-- drivers/gpu/drm/amd/amdgpu/soc15.c | 4 +-- drivers/gpu/drm/amd/amdgpu/soc21.c | 4 +-- drivers/gpu/drm/amd/amdgpu/vi.c | 4 +-- 18 files changed, 110 insertions(+), 106 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index ebb35633058c..ae06d1f2af93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -787,7 +787,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, for (se_idx = 0; se_idx < se_cnt; se_idx++) { for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { - amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, 0); queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS); /* @@ -820,7 +820,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); soc15_grbm_select(adev, 0, 0, 0, 0, 0); unlock_spi_csq_mutexes(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index f60753f97ac5..cc64ae550bc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -136,7 +136,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, } mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, se_bank, - sh_bank, instance_bank); + sh_bank, instance_bank, 0); } else if (use_ring) { mutex_lock(&adev->srbm_mutex); amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); @@ -169,7 +169,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, end: if (use_bank) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } else if (use_ring) { amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); @@ -263,7 +263,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, rd->id.grbm.sh, - rd->id.grbm.instance); + rd->id.grbm.instance, 0); } if (rd->id.use_srbm) { @@ -295,7 +295,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } end: if (rd->id.use_grbm) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -907,13 +907,13 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); + amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); x = 0; if (adev->gfx.funcs->read_wave_data) adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -1001,7 +1001,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); + amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0); if (bank == 0) { if (adev->gfx.funcs->read_wave_vgprs) @@ -1011,7 +1011,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); } - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index a9e41d7970ea..77e2f714e357 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -237,7 +237,7 @@ struct amdgpu_gfx_funcs { /* get the gpu clock counter */ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, @@ -386,7 +386,7 @@ struct amdgpu_gfx { }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) -#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index de6d10390ab2..5641cf05d856 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1141,12 +1141,12 @@ static uint32_t cik_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index accc0a7251b9..323f5b8927ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3490,7 +3490,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev); @@ -4712,7 +4712,7 @@ static int gfx_v10_0_sw_fini(void *handle) } static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -4772,13 +4772,13 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v10_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -4907,7 +4907,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev); /* * Set corresponding TCP bits for the inactive WGPs in @@ -4940,7 +4940,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -9540,7 +9540,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh( adev, disable_masks[i * 2 + j]); @@ -9561,7 +9561,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 4fbefe236fc7..50d0ff9ca259 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -112,7 +112,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance); + u32 sh_num, u32 instance, int xcc_id); static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); @@ -1484,7 +1484,7 @@ static int gfx_v11_0_sw_fini(void *handle) } static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -6473,7 +6473,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; counter = 0; - gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 8 && j < 2) gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( adev, disable_masks[i * 2 + j]); @@ -6505,7 +6505,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, active_cu_number += counter; } } - gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d9ce4d1c50e4..7cb72bf1acdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1285,7 +1285,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) } static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, - u32 sh_num, u32 instance) + u32 sh_num, u32 instance, int xcc_id) { u32 data; @@ -1438,12 +1438,12 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on SI */ - gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); } /* GRBM_GFX_INDEX has a different offset on SI */ - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) @@ -1459,14 +1459,14 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v6_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -1487,7 +1487,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -1496,7 +1496,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG); } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1535,7 +1535,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = RREG32(mmSPI_STATIC_THREAD_MGMT_3); active_cu = gfx_v6_0_get_cu_enabled(adev); @@ -1550,7 +1550,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) } } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -2391,7 +2391,7 @@ static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable) WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); if (!enable) { - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmSPI_LB_CU_MASK, 0x00ff); } } @@ -3571,7 +3571,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v6_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -3593,7 +3593,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) } } - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 46740ad9a80f..d055e44eee1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1552,7 +1552,8 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * Select which SE, SH combinations to address. */ static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, u32 instance) + u32 se_num, u32 sh_num, u32 instance, + int xcc_id) { u32 data; @@ -1732,13 +1733,13 @@ gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on CI+ */ - gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); } /* GRBM_GFX_INDEX has a different offset on CI+ */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } /** @@ -1761,13 +1762,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v7_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -1790,7 +1791,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -1801,7 +1802,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG_1); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1911,7 +1912,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -3301,7 +3302,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3309,7 +3310,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3474,7 +3475,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); @@ -3530,7 +3531,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3584,7 +3585,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3635,7 +3636,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; @@ -5115,7 +5116,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v7_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -5136,7 +5137,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 8a43e87de49f..b60480876149 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3395,7 +3395,8 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, u32 instance) + u32 se_num, u32 sh_num, u32 instance, + int xcc_id) { u32 data; @@ -3579,13 +3580,13 @@ gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev, } /* GRBM_GFX_INDEX has a different offset on VI */ - gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); } /* GRBM_GFX_INDEX has a different offset on VI */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); } static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) @@ -3601,13 +3602,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v8_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); @@ -3630,7 +3631,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) /* cache the values for userspace */ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); adev->gfx.config.rb_config[i][j].rb_backend_disable = RREG32(mmCC_RB_BACKEND_DISABLE); adev->gfx.config.rb_config[i][j].user_rb_backend_disable = @@ -3641,7 +3642,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) RREG32(mmPA_SC_RASTER_CONFIG_1); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -3788,7 +3789,7 @@ static void gfx_v8_0_constants_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmPA_SC_FIFO_SIZE, (adev->gfx.config.sc_prim_fifo_size_frontend << @@ -3819,7 +3820,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3827,7 +3828,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } if (k == adev->usec_timeout) { gfx_v8_0_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -3835,7 +3836,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -5481,7 +5482,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, { uint32_t data; - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); @@ -6723,11 +6724,11 @@ static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data, */ if (from_wq) { mutex_lock(&adev->grbm_idx_mutex); - gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id); + gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0); sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE); - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -7116,7 +7117,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) mask = 1; ao_bitmap = 0; counter = 0; - gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0); if (i < 4 && j < 2) gfx_v8_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * 2 + j]); @@ -7137,7 +7138,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4939fd61355b..2fa7adef18a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1504,7 +1504,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) mask = 1; cu_bitmap = 0; counter = 0; - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { if (cu_info->bitmap[i][j] & mask) { @@ -1523,7 +1523,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) cu_info->ao_cu_bitmap[i][j] = cu_bitmap; } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } @@ -1545,7 +1545,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); /* set mmRLC_LB_PARAMS = 0x003F_1006 */ @@ -1594,7 +1594,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); /* set mmRLC_LB_PARAMS = 0x003F_1006 */ @@ -2241,7 +2241,7 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) } void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance) + u32 instance, int xcc_id) { u32 data; @@ -2290,13 +2290,13 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); data = gfx_v9_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -2433,7 +2433,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -2441,7 +2441,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } if (k == adev->usec_timeout) { amdgpu_gfx_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -2449,7 +2449,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -6608,7 +6608,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { - amdgpu_gfx_select_se_sh(adev, j, 0x0, k); + amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0); RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); } } @@ -6670,7 +6670,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { - amdgpu_gfx_select_se_sh(adev, j, 0, k); + amdgpu_gfx_select_se_sh(adev, j, 0, k, 0); reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) @@ -6685,7 +6685,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += sec_count; err_data->ue_count += ded_count; - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); gfx_v9_0_query_utc_edc_status(adev, err_data); @@ -7145,7 +7145,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); gfx_v9_0_set_user_cu_inactive_bitmap( adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_0_get_cu_active_bitmap(adev); @@ -7178,7 +7178,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h index dfe8d4841f58..f9f6edc5e558 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h @@ -27,6 +27,6 @@ extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance); + u32 instance, int xcc_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 93438770ca1a..d648a29c33e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -777,7 +777,7 @@ void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev) { u32 tmp; - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); tmp = 0; tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index f1c04140e717..b67be666f38a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -568,7 +568,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, - u32 instance) + u32 instance, + int xcc_id) { u32 data; @@ -591,7 +592,7 @@ static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data); + WREG32_SOC15_RLC_SHADOW_EX(reg, GC, xcc_id, regGRBM_GFX_INDEX, data); } static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) @@ -907,13 +908,13 @@ static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev, int xcc_id) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); data = gfx_v9_4_3_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -1146,7 +1147,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32_SOC15(GC, 0, regRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -1154,7 +1155,8 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } if (k == adev->usec_timeout) { gfx_v9_4_3_select_se_sh(adev, 0xffffffff, - 0xffffffff, 0xffffffff); + 0xffffffff, 0xffffffff, + xcc_id); mutex_unlock(&adev->grbm_idx_mutex); DRM_INFO("Timeout wait for RLC serdes %u,%u\n", i, j); @@ -1162,7 +1164,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev, } } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3065,7 +3067,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, mask = 1; ao_bitmap = 0; counter = 0; - gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, 0); gfx_v9_4_3_set_user_cu_inactive_bitmap( adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev); @@ -3098,7 +3100,7 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } - gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 47420b403871..148049782f50 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -371,12 +371,12 @@ static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 7f99e130acd0..f64b87b11b1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1181,12 +1181,12 @@ static uint32_t si_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4b79a8933476..4d1487a9836c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -363,12 +363,12 @@ static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_n mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 514bfc705d5a..6ef4be9322d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -288,12 +288,12 @@ static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_n mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 531f173ade2d..8e70581960fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -762,12 +762,12 @@ static uint32_t vi_get_register_value(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); return val; } else { -- cgit v1.2.3 From 66daccde429611530db82605c197be01adadb389 Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Wed, 1 Dec 2021 16:44:18 +0800 Subject: drm/amdgpu: add master/slave check in init phase Skip KCQ setup on slave xcc as there's no use case. Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 59 ++++++++++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 5 +++ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 10 ++++-- 4 files changed, 50 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 7f5c60381103..c83fb4277233 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -489,16 +489,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) return -EINVAL; spin_lock(&kiq->ring_lock); - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * - adev->gfx.num_compute_rings)) { - spin_unlock(&adev->gfx.kiq[0].ring_lock); - return -ENOMEM; - } + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * + adev->gfx.num_compute_rings)) { + spin_unlock(&kiq->ring_lock); + return -ENOMEM; + } - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - j = i + xcc_id * adev->gfx.num_compute_rings; - kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], - RESET_QUEUES, 0, 0); + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; + kiq->pmf->kiq_unmap_queues(kiq_ring, + &adev->gfx.compute_ring[i], + RESET_QUEUES, 0, 0); + } } if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) @@ -549,22 +552,26 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, kiq_ring->queue); spin_lock(&kiq->ring_lock); - r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * - adev->gfx.num_compute_rings + - kiq->pmf->set_resources_size); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - spin_unlock(&adev->gfx.kiq[0].ring_lock); - return r; - } + /* No need to map kcq on the slave */ + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * + adev->gfx.num_compute_rings + + kiq->pmf->set_resources_size); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + spin_unlock(&adev->gfx.kiq[0].ring_lock); + return r; + } - if (adev->enable_mes) - queue_mask = ~0ULL; + if (adev->enable_mes) + queue_mask = ~0ULL; - kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - j = i + xcc_id * adev->gfx.num_compute_rings; - kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); + kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + j = i + xcc_id * adev->gfx.num_compute_rings; + kiq->pmf->kiq_map_queues(kiq_ring, + &adev->gfx.compute_ring[i]); + } } r = amdgpu_ring_test_helper(kiq_ring); @@ -1078,3 +1085,9 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); } } + +bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) +{ + return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? + adev->gfx.num_xcc_per_xcp : 1)); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 77e2f714e357..a2d311a4da5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -462,4 +462,6 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id) int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev); int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); + +bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 4ff348e10e4d..ef96ff2f4272 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -406,6 +406,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) else tmo = tmo_gfx; + /* skip ib test on the slave kcq */ + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE && + !amdgpu_gfx_is_master_xcc(adev, ring->xcc_id)) + continue; + r = amdgpu_ring_test_ib(ring, tmo); if (!r) { DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index baa10ee8ec69..572f84f487cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1885,9 +1885,13 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) if (r) return r; - for (j = 0; j < adev->gfx.num_compute_rings; j++) { - ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings]; - amdgpu_ring_test_helper(ring); + /* skip ring test on slave kcq */ + if (amdgpu_gfx_is_master_xcc(adev, i)) { + for (j = 0; j < adev->gfx.num_compute_rings; j++) { + ring = &adev->gfx.compute_ring[j + + i * adev->gfx.num_compute_rings]; + amdgpu_ring_test_helper(ring); + } } gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i); -- cgit v1.2.3 From 89d8445e4f5cfaf5c017c0bafb05f8bbaac59bf9 Mon Sep 17 00:00:00 2001 From: Alex Deucher <alexander.deucher@amd.com> Date: Mon, 20 Mar 2023 12:22:49 -0400 Subject: drm/amdgpu/gfx11: add FW version check for new CP GFX shadow feature Use this to determine if we support the new SET_Q_PREEMPTION_MODE packet. Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 ++ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 13 +++++++++++++ 2 files changed, 15 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index a2d311a4da5a..6c5f58a34e84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -380,6 +380,8 @@ struct amdgpu_gfx { struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS]; struct amdgpu_ring_mux muxer; + bool cp_gfx_shadow; /* for gfx11 */ + enum amdgpu_gfx_partition partition_mode; uint32_t num_xcd; uint32_t num_xcc_per_xcp; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index d3c89e6c0c03..f1ad6bd1eda8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -463,6 +463,18 @@ out: return err; } +static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + default: + adev->gfx.cp_gfx_shadow = false; + break; + } +} + static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) { char fw_name[40]; @@ -539,6 +551,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) /* only one MEC for gfx 11.0.0. */ adev->gfx.mec2_fw = NULL; + gfx_v11_0_check_fw_cp_gfx_shadow(adev); out: if (err) { amdgpu_ucode_release(&adev->gfx.pfp_fw); -- cgit v1.2.3 From 0db0c0379d15cd811214bdb631a0b6bdcdd22c84 Mon Sep 17 00:00:00 2001 From: Alex Deucher <alexander.deucher@amd.com> Date: Thu, 9 Mar 2023 14:56:07 -0500 Subject: drm/amdgpu: add gfx shadow callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To provide IP specific shadow sizes. UMDs will use this to query the kernel driver for the size of the shadow buffers. v2: make callback return an int (Alex) v3: drop GDS (Alex) Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6c5f58a34e84..f1a2ce7b6aca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -233,6 +233,13 @@ struct amdgpu_gfx_ras { struct amdgpu_iv_entry *entry); }; +struct amdgpu_gfx_shadow_info { + u32 shadow_size; + u32 shadow_alignment; + u32 csa_size; + u32 csa_alignment; +}; + struct amdgpu_gfx_funcs { /* get the gpu clock counter */ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); @@ -250,6 +257,8 @@ struct amdgpu_gfx_funcs { u32 queue, u32 vmid); void (*init_spm_golden)(struct amdgpu_device *adev); void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); + int (*get_gfx_shadow_info)(struct amdgpu_device *adev, + struct amdgpu_gfx_shadow_info *shadow_info); }; struct sq_work { @@ -391,6 +400,7 @@ struct amdgpu_gfx { #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) +#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si))) /** * amdgpu_gfx_create_bitmask - create a bitmask -- cgit v1.2.3 From 1156e1a60f024ce29fbb88dd330c2be81c4303ea Mon Sep 17 00:00:00 2001 From: Alex Deucher <alexander.deucher@amd.com> Date: Mon, 24 Apr 2023 14:24:20 -0400 Subject: drm/amdgpu: add [en/dis]able_kgq() functions To replace the IP specific variants which are largely duplicate. Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 68 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 + 2 files changed, 70 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 66b9740ec376..90f5d302d5f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -519,6 +519,39 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) return r; } +int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + int i, r = 0; + int j; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock(&kiq->ring_lock); + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * + adev->gfx.num_gfx_rings)) { + spin_unlock(&kiq->ring_lock); + return -ENOMEM; + } + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + j = i + xcc_id * adev->gfx.num_gfx_rings; + kiq->pmf->kiq_unmap_queues(kiq_ring, + &adev->gfx.gfx_ring[i], + PREEMPT_QUEUES, 0, 0); + } + } + + if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) + r = amdgpu_ring_test_helper(kiq_ring); + spin_unlock(&kiq->ring_lock); + + return r; +} + int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit) { @@ -590,6 +623,41 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) return r; } +int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; + struct amdgpu_ring *kiq_ring = &kiq->ring; + int r, i, j; + + if (!kiq->pmf || !kiq->pmf->kiq_map_queues) + return -EINVAL; + + spin_lock(&kiq->ring_lock); + /* No need to map kcq on the slave */ + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * + adev->gfx.num_gfx_rings); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + spin_unlock(&adev->gfx.kiq[0].ring_lock); + return r; + } + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + j = i + xcc_id * adev->gfx.num_gfx_rings; + kiq->pmf->kiq_map_queues(kiq_ring, + &adev->gfx.gfx_ring[i]); + } + } + + r = amdgpu_ring_test_helper(kiq_ring); + spin_unlock(&kiq->ring_lock); + if (r) + DRM_ERROR("KCQ enable failed\n"); + + return r; +} + /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable * * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index f1a2ce7b6aca..2755f00ac19a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -433,6 +433,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id); void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev); -- cgit v1.2.3 From 98a54e88e87f7291d4bbc6ec646c498f64ae042f Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Tue, 17 May 2022 22:20:10 +0800 Subject: drm/amdgpu: add sysfs node for compute partition mode Add current/available compute partitin mode sysfs node. v2: make the sysfs node as IP independent one in amdgpu_gfx.c Signed-off-by: Le Ma <le.ma@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 132 +++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 6 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 3 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 67 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 32 +++++++ 6 files changed, 241 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e94507a10e15..f432064a0535 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3634,6 +3634,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->srbm_mutex); mutex_init(&adev->gfx.pipe_reserve_mutex); mutex_init(&adev->gfx.gfx_off_mutex); + mutex_init(&adev->gfx.partition_mutex); mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e5ff0bf9f23b..4a4d71ff9b95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1174,3 +1174,135 @@ bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? adev->gfx.num_xcc_per_xcp : 1)); } + +static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_gfx_partition mode; + char *partition_mode; + + mode = adev->gfx.funcs->query_partition_mode(adev); + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + partition_mode = "SPX"; + break; + case AMDGPU_DPX_PARTITION_MODE: + partition_mode = "DPX"; + break; + case AMDGPU_TPX_PARTITION_MODE: + partition_mode = "TPX"; + break; + case AMDGPU_QPX_PARTITION_MODE: + partition_mode = "QPX"; + break; + case AMDGPU_CPX_PARTITION_MODE: + partition_mode = "CPX"; + break; + default: + partition_mode = "UNKNOWN"; + break; + } + + return sysfs_emit(buf, "%s\n", partition_mode); +} + +static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, + struct device_attribute *addr, + const char *buf, size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_gfx_partition mode; + int ret; + + if (adev->gfx.num_xcd % 2 != 0) + return -EINVAL; + + if (!strncasecmp("SPX", buf, strlen("SPX"))) { + mode = AMDGPU_SPX_PARTITION_MODE; + } else if (!strncasecmp("DPX", buf, strlen("DPX"))) { + if (adev->gfx.num_xcd != 4 || adev->gfx.num_xcd != 8) + return -EINVAL; + mode = AMDGPU_DPX_PARTITION_MODE; + } else if (!strncasecmp("TPX", buf, strlen("TPX"))) { + if (adev->gfx.num_xcd != 6) + return -EINVAL; + mode = AMDGPU_TPX_PARTITION_MODE; + } else if (!strncasecmp("QPX", buf, strlen("QPX"))) { + if (adev->gfx.num_xcd != 8) + return -EINVAL; + mode = AMDGPU_QPX_PARTITION_MODE; + } else if (!strncasecmp("CPX", buf, strlen("CPX"))) { + mode = AMDGPU_CPX_PARTITION_MODE; + } else { + return -EINVAL; + } + + mutex_lock(&adev->gfx.partition_mutex); + + ret = adev->gfx.funcs->switch_partition_mode(adev, mode); + + mutex_unlock(&adev->gfx.partition_mutex); + + if (ret) + return ret; + + return count; +} + +static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + char *supported_partition; + + /* TBD */ + switch (adev->gfx.num_xcd) { + case 8: + supported_partition = "SPX, DPX, QPX, CPX"; + break; + case 6: + supported_partition = "SPX, TPX, CPX"; + break; + case 4: + supported_partition = "SPX, DPX, CPX"; + break; + /* this seems only existing in emulation phase */ + case 2: + supported_partition = "SPX, CPX"; + break; + default: + supported_partition = "Not supported"; + break; + } + + return sysfs_emit(buf, "%s\n", supported_partition); +} + +static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, + amdgpu_gfx_get_current_compute_partition, + amdgpu_gfx_set_compute_partition); + +static DEVICE_ATTR(available_compute_partition, S_IRUGO, + amdgpu_gfx_get_available_compute_partition, NULL); + +int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) +{ + int r; + + r = device_create_file(adev->dev, &dev_attr_current_compute_partition); + if (r) + return r; + + r = device_create_file(adev->dev, &dev_attr_available_compute_partition); + if (r) + return r; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 2755f00ac19a..8df36527aee9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -259,6 +259,10 @@ struct amdgpu_gfx_funcs { void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); int (*get_gfx_shadow_info)(struct amdgpu_device *adev, struct amdgpu_gfx_shadow_info *shadow_info); + enum amdgpu_gfx_partition + (*query_partition_mode)(struct amdgpu_device *adev); + int (*switch_partition_mode)(struct amdgpu_device *adev, + enum amdgpu_gfx_partition mode); }; struct sq_work { @@ -394,6 +398,7 @@ struct amdgpu_gfx { enum amdgpu_gfx_partition partition_mode; uint32_t num_xcd; uint32_t num_xcc_per_xcp; + struct mutex partition_mutex; }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) @@ -478,4 +483,5 @@ int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); +int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index c686ff4bcc39..6a636c34b717 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -95,6 +95,9 @@ struct amdgpu_nbio_funcs { void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev); void (*clear_doorbell_interrupt)(struct amdgpu_device *adev); u32 (*get_rom_offset)(struct amdgpu_device *adev); + u32 (*get_compute_partition_mode)(struct amdgpu_device *adev); + void (*set_compute_partition_mode)(struct amdgpu_device *adev, + enum amdgpu_gfx_partition mode); }; struct amdgpu_nbio { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 064cd02451c2..1d15db9423c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -662,6 +662,67 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, soc15_grbm_select(adev, me, pipe, q, vm, 0); } +static enum amdgpu_gfx_partition +gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) +{ + enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + + if (adev->nbio.funcs->get_compute_partition_mode) + mode = adev->nbio.funcs->get_compute_partition_mode(adev); + + return mode; +} + +static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, + enum amdgpu_gfx_partition mode) +{ + u32 tmp = 0; + int num_xcc_per_partition, i; + + if (mode == adev->gfx.partition_mode) + return mode; + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_xcc_per_partition = adev->gfx.num_xcd; + break; + case AMDGPU_DPX_PARTITION_MODE: + num_xcc_per_partition = adev->gfx.num_xcd / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + num_xcc_per_partition = adev->gfx.num_xcd / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + num_xcc_per_partition = adev->gfx.num_xcd / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + num_xcc_per_partition = 1; + break; + default: + return -EINVAL; + } + + /* TODO: + * Stop user queues and threads, and make sure GPU is empty of work. + */ + + for (i = 0; i < adev->gfx.num_xcd; i++) { + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, + num_xcc_per_partition); + tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, + i % num_xcc_per_partition); + WREG32_SOC15(GC, i, regCP_HYP_XCP_CTL, tmp); + } + + if (adev->nbio.funcs->set_compute_partition_mode) + adev->nbio.funcs->set_compute_partition_mode(adev, mode); + + adev->gfx.num_xcc_per_xcp = num_xcc_per_partition; + adev->gfx.partition_mode = mode; + + return 0; +} + static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, .select_se_sh = &gfx_v9_4_3_select_se_sh, @@ -669,6 +730,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, + .query_partition_mode = &gfx_v9_4_3_query_compute_partition, + .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, }; static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) @@ -858,6 +921,10 @@ static int gfx_v9_4_3_sw_init(void *handle) if (r) return r; + r = amdgpu_gfx_sysfs_init(adev); + if (r) + return r; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index a331a59c49e3..bdb84a53f0e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -347,6 +347,36 @@ static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev, DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); } +static enum amdgpu_gfx_partition nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP); + + if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, SPX_SUPPORT)) + return AMDGPU_SPX_PARTITION_MODE; + else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, DPX_SUPPORT)) + return AMDGPU_DPX_PARTITION_MODE; + else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, TPX_SUPPORT)) + return AMDGPU_TPX_PARTITION_MODE; + else if (REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_CAP, CPX_SUPPORT)) + return AMDGPU_CPX_PARTITION_MODE; + else + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; +} + +static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev, + enum amdgpu_gfx_partition mode) +{ + u32 tmp; + + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP); + tmp &= ~0x1f; + tmp |= 1 << mode; + + WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_CAP, tmp); +} + const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, @@ -366,4 +396,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_clockgating_state = nbio_v7_9_get_clockgating_state, .ih_control = nbio_v7_9_ih_control, .remap_hdp_registers = nbio_v7_9_remap_hdp_registers, + .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, + .set_compute_partition_mode = nbio_v7_9_set_compute_partition_mode, }; -- cgit v1.2.3 From 8078f1c610fdcdd8003e2c538fb04af41fa5c269 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Wed, 29 Jun 2022 11:41:53 +0530 Subject: drm/amdgpu: Change num_xcd to xcc_mask Instead of number of XCCs, keep a mask of XCCs for the exact XCCs available on the ASIC. XCC configuration could differ based on different ASIC configs. v2: Rename num_xcd to num_xcc (Hawking) Use smaller xcc_mask size, changed to u16 (Le) Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Le Ma <Le.Ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 21 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 133 ++++++++++++++++++------------- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 67 ++++++++++------ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 10 +-- 7 files changed, 141 insertions(+), 99 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 76438f197de1..069b259f384c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -209,12 +209,12 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe, adev->gfx.num_compute_rings); - int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1; + int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; if (multipipe_policy) { /* policy: make queues evenly cross all pipes on MEC1 only * for multiple xcc, just use the original policy for simplicity */ - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { for (i = 0; i < max_queues_per_mec; i++) { pipe = i % adev->gfx.mec.num_pipe_per_mec; queue = (i / adev->gfx.mec.num_pipe_per_mec) % @@ -226,13 +226,13 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) } } else { /* policy: amdgpu owns all queues in the given pipe */ - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { for (i = 0; i < max_queues_per_mec; ++i) set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); } } - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); } @@ -1207,23 +1207,24 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); enum amdgpu_gfx_partition mode; - int ret; + int ret = 0, num_xcc; - if (adev->gfx.num_xcd % 2 != 0) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + if (num_xcc % 2 != 0) return -EINVAL; if (!strncasecmp("SPX", buf, strlen("SPX"))) { mode = AMDGPU_SPX_PARTITION_MODE; } else if (!strncasecmp("DPX", buf, strlen("DPX"))) { - if (adev->gfx.num_xcd != 4 || adev->gfx.num_xcd != 8) + if (num_xcc != 4 || num_xcc != 8) return -EINVAL; mode = AMDGPU_DPX_PARTITION_MODE; } else if (!strncasecmp("TPX", buf, strlen("TPX"))) { - if (adev->gfx.num_xcd != 6) + if (num_xcc != 6) return -EINVAL; mode = AMDGPU_TPX_PARTITION_MODE; } else if (!strncasecmp("QPX", buf, strlen("QPX"))) { - if (adev->gfx.num_xcd != 8) + if (num_xcc != 8) return -EINVAL; mode = AMDGPU_QPX_PARTITION_MODE; } else if (!strncasecmp("CPX", buf, strlen("CPX"))) { @@ -1253,7 +1254,7 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, char *supported_partition; /* TBD */ - switch (adev->gfx.num_xcd) { + switch (NUM_XCC(adev->gfx.xcc_mask)) { case 8: supported_partition = "SPX, DPX, QPX, CPX"; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8df36527aee9..93f9875154db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -64,6 +64,8 @@ enum amdgpu_gfx_partition { AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, }; +#define NUM_XCC(x) hweight16(x) + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -396,7 +398,7 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ enum amdgpu_gfx_partition partition_mode; - uint32_t num_xcd; + uint16_t xcc_mask; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 91814dc083c9..da69177dc76f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4536,7 +4536,7 @@ static int gfx_v9_0_early_init(void *handle) adev->gfx.num_gfx_rings = 0; else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; - adev->gfx.num_xcd = 1; + adev->gfx.xcc_mask = 1; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_0_set_kiq_pm4_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 52185b1d5d31..c776fc5884de 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -177,16 +177,19 @@ static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - int i; - for (i = 0; i < adev->gfx.num_xcd; i++) + int i, num_xcc; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; } static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 2; i < adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 2; i < num_xcc; i++) WREG32_SOC15(GC, i, regGRBM_MCM_ADDR, 0x4); } @@ -499,7 +502,7 @@ static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) { - int r, i; + int r, i, num_xcc; u32 *hpd; const __le32 *fw_data; unsigned fw_size; @@ -508,7 +511,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr; - for (i = 0; i < adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); @@ -683,23 +687,24 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode) { u32 tmp = 0; - int num_xcc_per_partition, i; + int num_xcc_per_partition, i, num_xcc; if (mode == adev->gfx.partition_mode) return mode; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd; + num_xcc_per_partition = num_xcc; break; case AMDGPU_DPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 2; + num_xcc_per_partition = num_xcc / 2; break; case AMDGPU_TPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 3; + num_xcc_per_partition = num_xcc / 3; break; case AMDGPU_QPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 4; + num_xcc_per_partition = num_xcc / 4; break; case AMDGPU_CPX_PARTITION_MODE: num_xcc_per_partition = 1; @@ -712,7 +717,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, * Stop user queues and threads, and make sure GPU is empty of work. */ - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, num_xcc_per_partition); tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, @@ -836,7 +841,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v9_4_3_sw_init(void *handle) { - int i, j, k, r, ring_id, xcc_id; + int i, j, k, r, ring_id, xcc_id, num_xcc; struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -844,6 +849,8 @@ static int gfx_v9_4_3_sw_init(void *handle) adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + /* EOP Event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); if (r) @@ -877,8 +884,7 @@ static int gfx_v9_4_3_sw_init(void *handle) /* set up the compute queues - allocate horizontally across pipes */ ring_id = 0; - for (xcc_id = 0; xcc_id < adev->gfx.num_xcd; xcc_id++) { - + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; @@ -930,14 +936,14 @@ static int gfx_v9_4_3_sw_init(void *handle) static int gfx_v9_4_3_sw_fini(void *handle) { - int i; + int i, num_xcc; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - for (i = 0; i < adev->gfx.num_compute_rings * - adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { amdgpu_gfx_mqd_sw_fini(adev, i); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); amdgpu_gfx_kiq_fini(adev, i); @@ -1050,9 +1056,10 @@ static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) { u32 tmp; - int i, j; + int i, j, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, GRBM_CNTL, READ_TIMEOUT, 0xff); gfx_v9_4_3_setup_rb(adev, i); } @@ -1064,7 +1071,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { - for (j = 0; j < adev->gfx.num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { soc15_grbm_select(adev, 0, 0, 0, i, j); /* CP and shaders */ if (i == 0) { @@ -1092,7 +1099,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { gfx_v9_4_3_init_compute_vmid(adev, i); gfx_v9_4_3_init_gds_vmid(adev, i); } @@ -1150,8 +1157,10 @@ static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) { uint32_t tmp = 0; + int num_xcc; - switch (adev->gfx.num_xcd) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + switch (num_xcc) { /* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */ case 1: WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, 0x8); @@ -1288,9 +1297,10 @@ static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); gfx_v9_4_3_wait_for_rlc_serdes(adev, i); @@ -1299,9 +1309,10 @@ static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); @@ -1314,9 +1325,10 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) #ifdef AMDGPU_RLC_DEBUG_RETRY u32 rlc_ucode_ver; #endif - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 1); udelay(50); @@ -1377,11 +1389,12 @@ static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) { - int r, i; + int r, i, num_xcc; adev->gfx.rlc.funcs->stop(adev); - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* disable CG */ WREG32_SOC15(GC, i, regRLC_CGCG_CGLS_CTRL, 0); @@ -1954,10 +1967,11 @@ done: static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { - int r, i, j; + int r, i, j, num_xcc; struct amdgpu_ring *ring; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -2021,12 +2035,13 @@ static int gfx_v9_4_3_hw_init(void *handle) static int gfx_v9_4_3_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (amdgpu_gfx_disable_kcq(adev, i)) DRM_ERROR("XCD %d KCQ disable failed\n", i); @@ -2069,9 +2084,10 @@ static int gfx_v9_4_3_resume(void *handle) static bool gfx_v9_4_3_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (REG_GET_FIELD(RREG32_SOC15(GC, i, regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) return false; @@ -2183,30 +2199,30 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, static int gfx_v9_4_3_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int num_xcc; - /* hardcode in emulation phase */ - adev->gfx.num_xcd = 1; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); adev->gfx.partition_mode = amdgpu_user_partt_mode; /* calculate the num_xcc_in_xcp for the partition mode*/ switch (amdgpu_user_partt_mode) { case AMDGPU_SPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd; + adev->gfx.num_xcc_per_xcp = num_xcc; break; case AMDGPU_DPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 2; + adev->gfx.num_xcc_per_xcp = num_xcc / 2; break; case AMDGPU_TPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 3; + adev->gfx.num_xcc_per_xcp = num_xcc / 3; break; case AMDGPU_QPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 4; + adev->gfx.num_xcc_per_xcp = num_xcc / 4; break; case AMDGPU_CPX_PARTITION_MODE: adev->gfx.num_xcc_per_xcp = 1; break; default: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd; + adev->gfx.num_xcc_per_xcp = num_xcc; break; } @@ -2404,14 +2420,15 @@ static int gfx_v9_4_3_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; if (amdgpu_sriov_vf(adev)) return 0; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 3): - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) gfx_v9_4_3_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE, i); break; @@ -2739,12 +2756,13 @@ static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; + int i, num_xcc; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); @@ -2761,12 +2779,13 @@ static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; + int i, num_xcc; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); @@ -2783,8 +2802,10 @@ static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; - for (i = 0; i < adev->gfx.num_xcd; i++) { + int i, num_xcc; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { switch (type) { case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state, i); @@ -2842,6 +2863,7 @@ static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, /* Per-queue interrupt is supported for MEC starting from VI. * The interrupt can only be enabled/disabled per pipe instead of per queue. */ + if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) amdgpu_fence_process(ring); } @@ -3056,9 +3078,10 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) { - int i, j; + int i, j, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; for (j = 0; j < adev->gfx.num_compute_rings; j++) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 1bb17d95f720..e35365ab3f1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -43,9 +43,10 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint64_t page_table_base) { struct amdgpu_vmhub *hub; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; WREG32_SOC15_OFFSET(GC, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -56,13 +57,14 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); + } } static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) { uint64_t pt_base; - int i; + int i, num_xcc; if (adev->gmc.pdb0_bo) pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); @@ -74,7 +76,8 @@ static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) /* If use GART for FB translation, vmid0 page table covers both * vram and system memory (gart) */ - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (adev->gmc.pdb0_bo) { WREG32_SOC15(GC, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -111,9 +114,10 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Program the AGP BAR */ WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BASE, 0); WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); @@ -177,9 +181,10 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Setup TLB control */ tmp = RREG32_SOC15(GC, i, regMC_VM_MX_L1_TLB_CNTL); @@ -202,9 +207,10 @@ static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Setup L2 cache */ tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -249,9 +255,10 @@ static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { tmp = RREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, @@ -266,9 +273,10 @@ static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_SOC15(GC, i, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0XFFFFFFFF); @@ -295,7 +303,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) struct amdgpu_vmhub *hub; unsigned num_level, block_size; uint32_t tmp; - int i, j; + int i, j, num_xcc; num_level = adev->vm_manager.num_level; block_size = adev->vm_manager.block_size; @@ -304,7 +312,8 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) else block_size -= 9; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i); @@ -362,10 +371,12 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - unsigned i, j; + unsigned i, j, num_xcc; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; + for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, i * hub->eng_addr_distance, 0xffffffff); @@ -377,9 +388,10 @@ static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (amdgpu_sriov_vf(adev)) { /* * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are @@ -413,9 +425,10 @@ static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; u32 tmp; - u32 i, j; + u32 i, j, num_xcc; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; /* Disable all tables */ for (i = 0; i < 16; i++) @@ -449,9 +462,10 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -490,9 +504,10 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_2_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; hub->ctx0_ptb_addr_lo32 = diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 4b2c4ecd7253..2c322a25bf1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1733,7 +1733,8 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.translate_further = adev->vm_manager.num_level > 1; break; case IP_VERSION(9, 4, 3): - bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), adev->gfx.num_xcd); + bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), + NUM_XCC(adev->gfx.xcc_mask)); bitmap_set(adev->vmhubs_mask, AMDGPU_MMHUB0(0), adev->num_aid); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 829e32433faf..df96c4c508a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -592,6 +592,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; + int num_xcd; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -601,16 +602,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - if (kfd->adev->gfx.num_xcd == 0 || kfd->adev->gfx.num_xcd == 1 || - kfd->adev->gfx.num_xcc_per_xcp == 0) + num_xcd = NUM_XCC(kfd->adev->gfx.xcc_mask); + if (num_xcd == 0 || num_xcd == 1 || kfd->adev->gfx.num_xcc_per_xcp == 0) kfd->num_nodes = 1; else - kfd->num_nodes = - kfd->adev->gfx.num_xcd/kfd->adev->gfx.num_xcc_per_xcp; + kfd->num_nodes = num_xcd / kfd->adev->gfx.num_xcc_per_xcp; if (kfd->num_nodes == 0) { dev_err(kfd_device, "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", - kfd->adev->gfx.num_xcd, kfd->adev->gfx.num_xcc_per_xcp); + num_xcd, kfd->adev->gfx.num_xcc_per_xcp); goto out; } -- cgit v1.2.3 From 553f973a0d7bbe95ea5da46979d926a9c0ada109 Mon Sep 17 00:00:00 2001 From: Tom St Denis <tom.stdenis@amd.com> Date: Tue, 11 Oct 2022 09:52:58 -0400 Subject: drm/amd/amdgpu: Update debugfs for XCC support (v3) This patch updates the 'regs2' interface for MMIO registers to add a new IOCTL command for a 'v2' state data that includes the XCC ID. This patch then updates amdgpu_gfx_select_se_sh() and amdgpu_gfx_select_me_pipe_q() (and the implementations in the gfx drivers) to support an additional parameter. This patch then creates a new debugfs interface "gprwave" which is a merge of shader GPR and wave status access. This new inteface uses an IOCTL to select banks as well as XCC identity. (v2) Fix missing xcc_id in wave_ind function (v3) Fix pm runtime calls and mutex locking (v4) Fix bad label Signed-off-by: Tom St Denis <tom.stdenis@amd.com> Reviewed-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 202 +++++++++++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h | 36 ++++- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 60 ++++----- 10 files changed, 275 insertions(+), 75 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index df94cd2c4b39..e94d0cf3f793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -139,7 +139,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, sh_bank, instance_bank, 0); } else if (use_ring) { mutex_lock(&adev->srbm_mutex); - amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); + amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0); } if (pm_pg_lock) @@ -172,7 +172,7 @@ end: amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); mutex_unlock(&adev->grbm_idx_mutex); } else if (use_ring) { - amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } @@ -263,14 +263,14 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } mutex_lock(&adev->grbm_idx_mutex); amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, - rd->id.grbm.sh, - rd->id.grbm.instance, 0); + rd->id.grbm.sh, + rd->id.grbm.instance, rd->id.xcc_id); } if (rd->id.use_srbm) { mutex_lock(&adev->srbm_mutex); amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, - rd->id.srbm.queue, rd->id.srbm.vmid); + rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id); } if (rd->id.pg_lock) @@ -296,12 +296,12 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off } end: if (rd->id.use_grbm) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id); mutex_unlock(&adev->grbm_idx_mutex); } if (rd->id.use_srbm) { - amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id); mutex_unlock(&adev->srbm_mutex); } @@ -320,19 +320,45 @@ end: static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) { struct amdgpu_debugfs_regs2_data *rd = f->private_data; + struct amdgpu_debugfs_regs2_iocdata v1_data; int r; + mutex_lock(&rd->lock); + switch (cmd) { - case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: - mutex_lock(&rd->lock); - r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2: + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data, sizeof(rd->id)); - mutex_unlock(&rd->lock); - return r ? -EINVAL : 0; + if (r) + r = -EINVAL; + goto done; + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: + r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data, + sizeof(v1_data)); + if (r) { + r = -EINVAL; + goto done; + } + goto v1_copy; default: - return -EINVAL; - } - return 0; + r = -EINVAL; + goto done; + } + +v1_copy: + rd->id.use_srbm = v1_data.use_srbm; + rd->id.use_grbm = v1_data.use_grbm; + rd->id.pg_lock = v1_data.pg_lock; + rd->id.grbm.se = v1_data.grbm.se; + rd->id.grbm.sh = v1_data.grbm.sh; + rd->id.grbm.instance = v1_data.grbm.instance; + rd->id.srbm.me = v1_data.srbm.me; + rd->id.srbm.pipe = v1_data.srbm.pipe; + rd->id.srbm.queue = v1_data.srbm.queue; + rd->id.xcc_id = 0; +done: + mutex_unlock(&rd->lock); + return r; } static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) @@ -345,6 +371,135 @@ static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); } +static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_gprwave_data *rd; + + rd = kzalloc(sizeof *rd, GFP_KERNEL); + if (!rd) + return -ENOMEM; + rd->adev = file_inode(file)->i_private; + file->private_data = rd; + mutex_init(&rd->lock); + + return 0; +} + +static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_gprwave_data *rd = file->private_data; + mutex_destroy(&rd->lock); + kfree(file->private_data); + return 0; +} + +static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos) +{ + struct amdgpu_debugfs_gprwave_data *rd = f->private_data; + struct amdgpu_device *adev = rd->adev; + ssize_t result = 0; + int r; + uint32_t *data, x; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); + if (!data) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); + return -ENOMEM; + } + + /* switch to the specific se/sh/cu */ + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id); + + if (!rd->id.gpr_or_wave) { + x = 0; + if (adev->gfx.funcs->read_wave_data) + adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x); + } else { + x = size >> 2; + if (rd->id.gpr.vpgr_or_sgpr) { + if (adev->gfx.funcs->read_wave_vgprs) + adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data); + } else { + if (adev->gfx.funcs->read_wave_sgprs) + adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data); + } + } + + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id); + mutex_unlock(&adev->grbm_idx_mutex); + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + if (!x) { + result = -EINVAL; + goto done; + } + + while (size && (*pos < x * 4)) { + uint32_t value; + + value = data[*pos >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) { + result = r; + goto done; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + +done: + amdgpu_virt_disable_access_debugfs(adev); + kfree(data); + return result; +} + +static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data) +{ + struct amdgpu_debugfs_gprwave_data *rd = f->private_data; + int r; + + mutex_lock(&rd->lock); + + switch (cmd) { + case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE: + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_gprwave_iocdata *)data, sizeof rd->id); + if (r) + return r ? -EINVAL : 0; + goto done; + default: + r = -EINVAL; + goto done; + } + +done: + mutex_unlock(&rd->lock); + return r; +} + + + /** * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register @@ -913,7 +1068,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, x = 0; if (adev->gfx.funcs->read_wave_data) - adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); + adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x); amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); @@ -1007,10 +1162,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, if (bank == 0) { if (adev->gfx.funcs->read_wave_vgprs) - adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); + adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data); } else { if (adev->gfx.funcs->read_wave_sgprs) - adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); + adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data); } amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); @@ -1341,6 +1496,15 @@ static const struct file_operations amdgpu_debugfs_regs2_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_gprwave_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl, + .read = amdgpu_debugfs_gprwave_read, + .open = amdgpu_debugfs_gprwave_open, + .release = amdgpu_debugfs_gprwave_release, + .llseek = default_llseek +}; + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -1418,6 +1582,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = { static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs2_fops, + &amdgpu_debugfs_gprwave_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, @@ -1434,6 +1599,7 @@ static const struct file_operations *debugfs_regs[] = { static const char * const debugfs_regs_names[] = { "amdgpu_regs", "amdgpu_regs2", + "amdgpu_gprwave", "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 93f9875154db..2b2d75763875 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -247,16 +247,16 @@ struct amdgpu_gfx_funcs { uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance, int xcc_id); - void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, + void (*read_wave_data)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); - void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, + void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst); - void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, + void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst); void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, - u32 queue, u32 vmid); + u32 queue, u32 vmid, u32 xcc_id); void (*init_spm_golden)(struct amdgpu_device *adev); void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); int (*get_gfx_shadow_info)(struct amdgpu_device *adev, @@ -405,7 +405,7 @@ struct amdgpu_gfx { #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) -#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) +#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id))) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) #define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h index 919d9d401750..107f9bb0e24f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h @@ -35,17 +35,51 @@ struct amdgpu_debugfs_regs2_iocdata { } srbm; }; +struct amdgpu_debugfs_regs2_iocdata_v2 { + __u32 use_srbm, use_grbm, pg_lock; + struct { + __u32 se, sh, instance; + } grbm; + struct { + __u32 me, pipe, queue, vmid; + } srbm; + u32 xcc_id; +}; + +struct amdgpu_debugfs_gprwave_iocdata { + u32 gpr_or_wave, se, sh, cu, wave, simd, xcc_id; + struct { + u32 thread, vpgr_or_sgpr; + } gpr; +}; + /* * MMIO debugfs state data (per file* handle) */ struct amdgpu_debugfs_regs2_data { struct amdgpu_device *adev; struct mutex lock; - struct amdgpu_debugfs_regs2_iocdata id; + struct amdgpu_debugfs_regs2_iocdata_v2 id; +}; + +struct amdgpu_debugfs_gprwave_data { + struct amdgpu_device *adev; + struct mutex lock; + struct amdgpu_debugfs_gprwave_iocdata id; }; enum AMDGPU_DEBUGFS_REGS2_CMDS { AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0, + AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2, +}; + +enum AMDGPU_DEBUGFS_GPRWAVE_CMDS { + AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE=0, }; +//reg2 interface #define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata) +#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2 _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2, struct amdgpu_debugfs_regs2_iocdata_v2) + +//gprwave interface +#define AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE, struct amdgpu_debugfs_gprwave_iocdata) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 7b585141e10e..89158c72753e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4291,7 +4291,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); } -static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* in gfx10 the SIMD_ID is specified as part of the INSTANCE * field when performing a select_se_sh so it should be @@ -4318,7 +4318,7 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } -static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -4329,7 +4329,7 @@ static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, dst); } -static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst) @@ -4340,7 +4340,7 @@ static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { nv_grbm_select(adev, me, pipe, q, vm); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 790df2cc3480..4b7224de879e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -765,7 +765,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); } -static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* in gfx11 the SIMD_ID is specified as part of the INSTANCE * field when performing a select_se_sh so it should be @@ -791,7 +791,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } -static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -802,7 +802,7 @@ static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, dst); } -static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst) @@ -813,7 +813,7 @@ static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { soc21_grbm_select(adev, me, pipe, q, vm); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 7cb72bf1acdd..809558c718e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -2968,7 +2968,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32(mmSQ_IND_DATA); } -static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 0 wave data */ dst[(*no_fields)++] = 0; @@ -2993,7 +2993,7 @@ static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -3003,7 +3003,7 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { DRM_INFO("Not implemented\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d56dda5fc588..0f0c12bbe228 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4112,7 +4112,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32(mmSQ_IND_DATA); } -static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 0 wave data */ dst[(*no_fields)++] = 0; @@ -4137,7 +4137,7 @@ static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -4147,7 +4147,7 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { cik_srbm_select(adev, me, pipe, q, vm); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 2ae7f167985f..6d0589dc1d6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3419,7 +3419,7 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, } static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { vi_srbm_select(adev, me, pipe, q, vm); } @@ -5217,7 +5217,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32(mmSQ_IND_DATA); } -static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 0 wave data */ dst[(*no_fields)++] = 0; @@ -5242,7 +5242,7 @@ static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index da69177dc76f..cc005e3bcd40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1788,7 +1788,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); } -static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 1 wave data */ dst[(*no_fields)++] = 1; @@ -1809,7 +1809,7 @@ static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); } -static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { @@ -1818,7 +1818,7 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, start + SQIND_WAVE_SGPRS_OFFSET, size, dst); } -static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst) @@ -1829,7 +1829,7 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, } static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { soc15_grbm_select(adev, me, pipe, q, vm, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c5ffcd70ec7b..76e3571ec5c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -536,21 +536,21 @@ static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev, WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); } -static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) +static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address) { - WREG32_SOC15_RLC(GC, GET_INST(GC, 0), regSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (address << SQ_IND_INDEX__INDEX__SHIFT) | (SQ_IND_INDEX__FORCE_READ_MASK)); - return RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_IND_DATA); + return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); } -static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, +static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t regno, uint32_t num, uint32_t *out) { - WREG32_SOC15_RLC(GC, GET_INST(GC, 0), regSQ_IND_INDEX, + WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | (regno << SQ_IND_INDEX__INDEX__SHIFT) | @@ -558,53 +558,53 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, (SQ_IND_INDEX__FORCE_READ_MASK) | (SQ_IND_INDEX__AUTO_INCR_MASK)); while (num--) - *(out++) = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_IND_DATA); + *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); } static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, - uint32_t simd, uint32_t wave, + uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) { /* type 1 wave data */ dst[(*no_fields)++] = 1; - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); - dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); -} - -static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE); +} + +static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst) { - wave_read_regs(adev, simd, wave, 0, + wave_read_regs(adev, xcc_id, simd, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, dst); } -static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, +static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst) { - wave_read_regs(adev, simd, wave, thread, + wave_read_regs(adev, xcc_id, simd, wave, thread, start + SQIND_WAVE_VGPRS_OFFSET, size, dst); } static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, - u32 me, u32 pipe, u32 q, u32 vm) + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) { - soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, 0)); + soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); } static enum amdgpu_gfx_partition -- cgit v1.2.3 From 3d2ea552b229495050316e84d7cb0257cb3cd13b Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com> Date: Tue, 12 Apr 2022 23:33:20 -0400 Subject: drm/amdgpu: implement smuio v13_0_3 callbacks Add smuio v13_0_3 callbacks for SMUIO. Tested-by: Ori Messinger <Ori.Messinger@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> Signed-off-by: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/Makefile | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 5 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h | 1 + drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c | 103 +++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h | 28 ++++++++ 5 files changed, 138 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c create mode 100644 drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 90f771423c94..8418a90df493 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -205,6 +205,7 @@ amdgpu-y += \ smuio_v11_0.o \ smuio_v11_0_6.o \ smuio_v13_0.o \ + smuio_v13_0_3.o \ smuio_v13_0_6.o # add reset block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 2b2d75763875..789115f5505f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -66,6 +66,11 @@ enum amdgpu_gfx_partition { #define NUM_XCC(x) hweight16(x) +enum amdgpu_pkg_type { + AMDGPU_PKG_TYPE_APU = 2, + AMDGPU_PKG_TYPE_UNKNOWN, +}; + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h index c7a823f3f2c5..89c38d864471 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h @@ -30,6 +30,7 @@ struct amdgpu_smuio_funcs { void (*get_clock_gating_state)(struct amdgpu_device *adev, u64 *flags); u32 (*get_die_id)(struct amdgpu_device *adev); u32 (*get_socket_id)(struct amdgpu_device *adev); + enum amdgpu_pkg_type (*get_pkg_type)(struct amdgpu_device *adev); bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev); }; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c new file mode 100644 index 000000000000..4368a5891eeb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c @@ -0,0 +1,103 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v13_0_3.h" +#include "soc15_common.h" +#include "smuio/smuio_13_0_3_offset.h" +#include "smuio/smuio_13_0_3_sh_mask.h" + +#define PKG_TYPE_MASK 0x00000003L + +/** + * smuio_v13_0_3_get_die_id - query die id from FCH. + * + * @adev: amdgpu device pointer + * + * Returns die id + */ +static u32 smuio_v13_0_3_get_die_id(struct amdgpu_device *adev) +{ + u32 data, die_id; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + die_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, DIE_ID); + + return die_id; +} + +/** + * smuio_v13_0_3_get_socket_id - query socket id from FCH + * + * @adev: amdgpu device pointer + * + * Returns socket id + */ +static u32 smuio_v13_0_3_get_socket_id(struct amdgpu_device *adev) +{ + u32 data, socket_id; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + socket_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, SOCKET_ID); + + return socket_id; +} + +/** + * smuio_v13_0_3_get_pkg_type - query package type set by MP1/bootcode + * + * @adev: amdgpu device pointer + * + * Returns package type + */ + +static enum amdgpu_pkg_type smuio_v13_0_3_get_pkg_type(struct amdgpu_device *adev) +{ + enum amdgpu_pkg_type pkg_type; + u32 data; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, PKG_TYPE); + /* pkg_type[4:0] + * + * bit 1 == 1 APU form factor + * + * b0100 - b1111 - Reserved + */ + switch (data & PKG_TYPE_MASK) { + case 0x2: + pkg_type = AMDGPU_PKG_TYPE_APU; + break; + default: + pkg_type = AMDGPU_PKG_TYPE_UNKNOWN; + break; + } + + return pkg_type; +} + + +const struct amdgpu_smuio_funcs smuio_v13_0_3_funcs = { + .get_die_id = smuio_v13_0_3_get_die_id, + .get_socket_id = smuio_v13_0_3_get_socket_id, + .get_pkg_type = smuio_v13_0_3_get_pkg_type, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h new file mode 100644 index 000000000000..aec35f7efb58 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h @@ -0,0 +1,28 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V13_0_3_H__ +#define __SMUIO_V13_0_3_H__ + +extern const struct amdgpu_smuio_funcs smuio_v13_0_3_funcs; + +#endif /* __SMUIO_V13_0_3_H__ */ -- cgit v1.2.3 From ea2d2f8ececdd4c778b66e19b82ce278dfc5e1c4 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com> Date: Tue, 5 Apr 2022 13:00:13 -0400 Subject: drm/amdgpu: detect current GPU memory partition mode - Add helpers to detect the current GPU memory partition. - Add current memory partition mode sysfs node. Tested-by: Ori Messinger <Ori.Messinger@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 25 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 12 ++++++++++++ 5 files changed, 60 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 2f6d85090b55..c2e92664031b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1200,6 +1200,24 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, return sysfs_emit(buf, "%s\n", partition_mode); } +static ssize_t amdgpu_gfx_get_current_memory_partition(struct device *dev, + struct device_attribute *addr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_memory_partition mode; + static const char *partition_modes[] = { + "UNKNOWN", "NPS1", "NPS2", "NPS4", "NPS8" + }; + BUILD_BUG_ON(ARRAY_SIZE(partition_modes) <= AMDGPU_NPS8_PARTITION_MODE); + + mode = min((int)adev->gfx.funcs->query_mem_partition_mode(adev), + AMDGPU_NPS8_PARTITION_MODE); + + return sysfs_emit(buf, "%s\n", partition_modes[mode]); +} + static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, struct device_attribute *addr, const char *buf, size_t count) @@ -1307,6 +1325,9 @@ static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, static DEVICE_ATTR(available_compute_partition, S_IRUGO, amdgpu_gfx_get_available_compute_partition, NULL); +static DEVICE_ATTR(current_memory_partition, S_IRUGO, + amdgpu_gfx_get_current_memory_partition, NULL); + int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) { int r; @@ -1319,5 +1340,9 @@ int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) if (r) return r; + r = device_create_file(adev->dev, &dev_attr_current_memory_partition); + if (r) + return r; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 789115f5505f..e37501685fa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -71,6 +71,14 @@ enum amdgpu_pkg_type { AMDGPU_PKG_TYPE_UNKNOWN, }; +enum amdgpu_memory_partition { + UNKNOWN_MEMORY_PARTITION_MODE = 0, + AMDGPU_NPS1_PARTITION_MODE = 1, + AMDGPU_NPS2_PARTITION_MODE = 2, + AMDGPU_NPS4_PARTITION_MODE = 3, + AMDGPU_NPS8_PARTITION_MODE = 4, +}; + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -268,6 +276,8 @@ struct amdgpu_gfx_funcs { struct amdgpu_gfx_shadow_info *shadow_info); enum amdgpu_gfx_partition (*query_partition_mode)(struct amdgpu_device *adev); + enum amdgpu_memory_partition + (*query_mem_partition_mode)(struct amdgpu_device *adev); int (*switch_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); }; @@ -404,6 +414,7 @@ struct amdgpu_gfx { enum amdgpu_gfx_partition partition_mode; uint16_t xcc_mask; + enum amdgpu_memory_partition mem_partition_mode; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 8fa3a1f3b181..639b86c4d664 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -97,6 +97,7 @@ struct amdgpu_nbio_funcs { void (*clear_doorbell_interrupt)(struct amdgpu_device *adev); u32 (*get_rom_offset)(struct amdgpu_device *adev); u32 (*get_compute_partition_mode)(struct amdgpu_device *adev); + u32 (*get_memory_partition_mode)(struct amdgpu_device *adev); void (*set_compute_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 6ed97371ff1e..3c19c5a0069d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -606,6 +606,16 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, { soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); } +static enum amdgpu_memory_partition +gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; + + if (adev->nbio.funcs->get_memory_partition_mode) + mode = adev->nbio.funcs->get_memory_partition_mode(adev); + + return mode; +} static enum amdgpu_gfx_partition gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) @@ -675,6 +685,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, .query_partition_mode = &gfx_v9_4_3_query_compute_partition, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, + .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, }; static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 78eab4d48e38..fa4b423c399b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -30,6 +30,8 @@ #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include <uapi/linux/kfd_ioctl.h> +#define NPS_MODE_MASK 0x000000FFL + static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, @@ -406,6 +408,15 @@ static void nbio_v7_9_set_compute_partition_mode(struct amdgpu_device *adev, WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS, tmp); } +static enum amdgpu_memory_partition nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev) +{ + u32 tmp; + tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); + tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE); + + return ffs(tmp); +} + const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset, @@ -428,4 +439,5 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .remap_hdp_registers = nbio_v7_9_remap_hdp_registers, .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode, .set_compute_partition_mode = nbio_v7_9_set_compute_partition_mode, + .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, }; -- cgit v1.2.3 From 993d218f82211b1e17fcea7a5f727fa16efba353 Mon Sep 17 00:00:00 2001 From: Shiwu Zhang <shiwu.zhang@amd.com> Date: Mon, 14 Nov 2022 15:52:19 +0800 Subject: drm/amdgpu: remove partition attributes sys file for gfx_v9_4_3 For driver de-init like rmmod operations those partition specific attributes need to be removed accordingly. Signed-off-by: Shiwu Zhang <shiwu.zhang@amd.com> Reviewed-by: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 1 + 3 files changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 8aea9e023275..5ff49737d7c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1345,3 +1345,10 @@ int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) return 0; } + +void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_current_compute_partition); + device_remove_file(adev->dev, &dev_attr_available_compute_partition); + device_remove_file(adev->dev, &dev_attr_current_memory_partition); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index e37501685fa9..023c5b08929f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -502,4 +502,5 @@ int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); +void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 9d7852ffd708..75ad5176e99e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -895,6 +895,7 @@ static int gfx_v9_4_3_sw_fini(void *handle) gfx_v9_4_3_mec_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); gfx_v9_4_3_free_microcode(adev); + amdgpu_gfx_sysfs_fini(adev); return 0; } -- cgit v1.2.3 From 75d1692393cb78b510ff18733457f91d002452f7 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Fri, 16 Sep 2022 12:43:35 +0530 Subject: drm/amdgpu: Add initial version of XCP routines Within a device, an accelerator core partition can be constituted with different IP instances. These partitions are spatial in nature. Number of partitions which can exist at the same time depends on the 'partition mode'. Add a manager entity which is responsible for switching between different partition modes and maintaining partitions. It is also responsible for suspend/resume of different partitions. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 244 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 107 ++++++++++++++ 6 files changed, 356 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 8418a90df493..74a9aa6fe18c 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -60,7 +60,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ - amdgpu_ring_mux.o + amdgpu_ring_mux.o amdgpu_xcp.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c2dd100bbd60..3858d29baef1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -283,6 +283,7 @@ extern uint amdgpu_user_partt_mode; #define AMDGPU_SMARTSHIFT_MAX_BIAS (100) #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) +struct amdgpu_xcp_mgr; struct amdgpu_device; struct amdgpu_irq_src; struct amdgpu_fpriv; @@ -765,6 +766,7 @@ struct amdgpu_device { struct amdgpu_acp acp; #endif struct amdgpu_hive_info *hive; + struct amdgpu_xcp_mgr *xcp_mgr; /* ASIC */ enum amd_asic_type asic_type; uint32_t family; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 023c5b08929f..5adfe4277641 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -61,7 +61,7 @@ enum amdgpu_gfx_partition { AMDGPU_TPX_PARTITION_MODE = 2, AMDGPU_QPX_PARTITION_MODE = 3, AMDGPU_CPX_PARTITION_MODE = 4, - AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1, }; #define NUM_XCC(x) hweight16(x) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 639b86c4d664..eb25ac98903f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -96,7 +96,7 @@ struct amdgpu_nbio_funcs { void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev); void (*clear_doorbell_interrupt)(struct amdgpu_device *adev); u32 (*get_rom_offset)(struct amdgpu_device *adev); - u32 (*get_compute_partition_mode)(struct amdgpu_device *adev); + int (*get_compute_partition_mode)(struct amdgpu_device *adev); u32 (*get_memory_partition_mode)(struct amdgpu_device *adev); void (*set_compute_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c new file mode 100644 index 000000000000..f59bc450cabe --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -0,0 +1,244 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_xcp.h" + +static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, + struct amdgpu_xcp_ip *xcp_ip, int xcp_state) +{ + int (*run_func)(void *handle, uint32_t inst_mask); + int ret = 0; + + if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs) + return 0; + + run_func = NULL; + + switch (xcp_state) { + case AMDGPU_XCP_PREPARE_SUSPEND: + run_func = xcp_ip->ip_funcs->prepare_suspend; + break; + case AMDGPU_XCP_SUSPEND: + run_func = xcp_ip->ip_funcs->suspend; + break; + case AMDGPU_XCP_PREPARE_RESUME: + run_func = xcp_ip->ip_funcs->prepare_resume; + break; + case AMDGPU_XCP_RESUME: + run_func = xcp_ip->ip_funcs->resume; + break; + } + + if (run_func) + ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask); + + return ret; +} + +static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + int state) +{ + struct amdgpu_xcp_ip *xcp_ip; + struct amdgpu_xcp *xcp; + int i, ret; + + if (xcp_id > MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) + return -EINVAL; + + xcp = &xcp_mgr->xcp[xcp_id]; + for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) { + xcp_ip = &xcp->ip[i]; + ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state); + if (ret) + break; + } + + return ret; +} + +int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) +{ + return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, + AMDGPU_XCP_PREPARE_SUSPEND); +} + +int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) +{ + return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND); +} + +int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) +{ + return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, + AMDGPU_XCP_PREPARE_RESUME); +} + +int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) +{ + return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME); +} + +static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + struct amdgpu_xcp_ip *ip) +{ + struct amdgpu_xcp *xcp; + + if (!ip) + return; + + xcp = &xcp_mgr->xcp[xcp_id]; + xcp->ip[ip->ip_id] = *ip; + xcp->ip[ip->ip_id].valid = true; + + xcp->valid = true; +} + +static int __amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps) +{ + struct amdgpu_xcp_ip ip; + int i, j, ret; + + for (i = 0; i < MAX_XCP; ++i) + xcp_mgr->xcp[i].valid = false; + + for (i = 0; i < num_xcps; ++i) { + for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) { + ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j, + &ip); + if (ret) + continue; + + __amdgpu_xcp_add_block(xcp_mgr, i, &ip); + } + } + + xcp_mgr->num_xcps = num_xcps; + + return 0; +} + +int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +{ + int ret, num_xcps = 0; + + if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE) + return -EINVAL; + + if (xcp_mgr->mode == mode) + return 0; + + if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode) + return 0; + + mutex_lock(&xcp_mgr->xcp_lock); + + ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps); + + if (ret) + goto out; + + if (!num_xcps || num_xcps > MAX_XCP) { + ret = -EINVAL; + goto out; + } + + xcp_mgr->mode = mode; + __amdgpu_xcp_init(xcp_mgr, num_xcps); +out: + mutex_unlock(&xcp_mgr->xcp_lock); + + return ret; +} + +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + int mode; + + if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return xcp_mgr->mode; + + if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) + return xcp_mgr->mode; + + mutex_lock(&xcp_mgr->xcp_lock); + mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); + if (mode != xcp_mgr->mode) + dev_WARN( + xcp_mgr->adev->dev, + "Cached partition mode %d not matching with device mode %d", + xcp_mgr->mode, mode); + + mutex_unlock(&xcp_mgr->xcp_lock); + + return mode; +} + +int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, + int init_num_xcps, + struct amdgpu_xcp_mgr_funcs *xcp_funcs) +{ + struct amdgpu_xcp_mgr *xcp_mgr; + + if (!xcp_funcs || !xcp_funcs->switch_partition_mode || + !xcp_funcs->get_ip_details) + return -EINVAL; + + xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL); + + if (!xcp_mgr) + return -ENOMEM; + + xcp_mgr->adev = adev; + xcp_mgr->funcs = xcp_funcs; + xcp_mgr->mode = init_mode; + mutex_init(&xcp_mgr->xcp_lock); + + if (init_mode != AMDGPU_XCP_MODE_NONE) + __amdgpu_xcp_init(xcp_mgr, init_num_xcps); + + adev->xcp_mgr = xcp_mgr; + + return 0; +} + +int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, + enum AMDGPU_XCP_IP_BLOCK ip, int instance) +{ + struct amdgpu_xcp *xcp; + int i, id_mask = 0; + + if (ip >= AMDGPU_XCP_MAX_BLOCKS) + return -EINVAL; + + for (i = 0; i < xcp_mgr->num_xcps; ++i) { + xcp = &xcp_mgr->xcp[i]; + if ((xcp->valid) && (xcp->ip[ip].valid) && + (xcp->ip[ip].inst_mask & BIT(instance))) + id_mask |= BIT(i); + } + + if (!id_mask) + id_mask = -ENXIO; + + return id_mask; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h new file mode 100644 index 000000000000..f0b973c6092f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -0,0 +1,107 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef AMDGPU_XCP_H +#define AMDGPU_XCP_H + +#include <linux/xarray.h> + +#define MAX_XCP 8 + +#define AMDGPU_XCP_MODE_NONE -1 + +enum AMDGPU_XCP_IP_BLOCK { + AMDGPU_XCP_GFXHUB, + AMDGPU_XCP_GFX, + AMDGPU_XCP_SDMA, + AMDGPU_XCP_VCN, + AMDGPU_XCP_MAX_BLOCKS +}; + +enum AMDGPU_XCP_STATE { + AMDGPU_XCP_PREPARE_SUSPEND, + AMDGPU_XCP_SUSPEND, + AMDGPU_XCP_PREPARE_RESUME, + AMDGPU_XCP_RESUME, +}; + +struct amdgpu_xcp_ip_funcs { + int (*prepare_suspend)(void *handle, uint32_t inst_mask); + int (*suspend)(void *handle, uint32_t inst_mask); + int (*prepare_resume)(void *handle, uint32_t inst_mask); + int (*resume)(void *handle, uint32_t inst_mask); +}; + +struct amdgpu_xcp_ip { + struct amdgpu_xcp_ip_funcs *ip_funcs; + uint32_t inst_mask; + + enum AMDGPU_XCP_IP_BLOCK ip_id; + bool valid; +}; + +struct amdgpu_xcp { + struct amdgpu_xcp_ip ip[AMDGPU_XCP_MAX_BLOCKS]; + + uint8_t id; + uint8_t mem_node; + bool valid; +}; + +struct amdgpu_xcp_mgr { + struct amdgpu_device *adev; + struct mutex xcp_lock; + struct amdgpu_xcp_mgr_funcs *funcs; + + struct amdgpu_xcp xcp[MAX_XCP]; + uint8_t num_xcps; + int8_t mode; +}; + +struct amdgpu_xcp_mgr_funcs { + int (*switch_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr, int mode, + int *num_xcps); + int (*query_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr); + int (*get_ip_details)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + enum AMDGPU_XCP_IP_BLOCK ip_id, + struct amdgpu_xcp_ip *ip); + + int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); +}; + +int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); +int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); +int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); +int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); + +int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, + int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs); +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr); +int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode); +int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, + enum AMDGPU_XCP_IP_BLOCK ip, int instance); + +#endif -- cgit v1.2.3 From 9cb18287d8f1c4549d95280e2cf60f4d1bab64f8 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Mon, 19 Sep 2022 17:34:02 +0530 Subject: drm/amdgpu: Add SOC partition funcs for GC v9.4.3 Switching the partition mode configuration of ASIC is SOC level function rather than something at GFX core level. Add partition mode switch functions as SOC specific callbacks. Implement the XCP manager callbacks needed for partition switch for GC 9.4.3 based ASICs. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 + .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 232 +++++++++++++++++++++ 2 files changed, 236 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 5adfe4277641..8be4ab50b171 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -278,8 +278,12 @@ struct amdgpu_gfx_funcs { (*query_partition_mode)(struct amdgpu_device *adev); enum amdgpu_memory_partition (*query_mem_partition_mode)(struct amdgpu_device *adev); + int (*switch_partition_mode)(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode); + + int (*switch_gfx_partition_mode)(struct amdgpu_device *adev, + int num_xccs_per_xcp); }; struct sq_work { diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 2616bdb40418..4264fbd267ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -24,6 +24,12 @@ #include "soc15.h" #include "soc15_common.h" +#include "amdgpu_xcp.h" + +#define XCP_INST_MASK(num_inst, xcp_id) \ + (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) + +#define AMDGPU_XCP_OPS_KFD (1 << 0) void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) { @@ -119,3 +125,229 @@ u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id) return ext_offset; } + +static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + struct amdgpu_device *adev = xcp_mgr->adev; + + if (adev->nbio.funcs->get_compute_partition_mode) + mode = adev->nbio.funcs->get_compute_partition_mode(adev); + + return mode; +} + +int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode) +{ + int num_xcc, num_xcc_per_xcp = 0; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc; + break; + case AMDGPU_DPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + num_xcc_per_xcp = num_xcc / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + num_xcc_per_xcp = 1; + break; + } + + return num_xcc_per_xcp; +} + +int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + enum AMDGPU_XCP_IP_BLOCK ip_id, + struct amdgpu_xcp_ip *ip) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp; + int num_sdma, num_vcn; + + num_sdma = adev->sdma.num_instances; + num_vcn = adev->vcn.num_vcn_inst; + + switch (xcp_mgr->mode) { + case AMDGPU_SPX_PARTITION_MODE: + num_sdma_xcp = num_sdma; + num_vcn_xcp = num_vcn; + break; + case AMDGPU_DPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 2; + num_vcn_xcp = num_vcn / 2; + break; + case AMDGPU_TPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 3; + num_vcn_xcp = num_vcn / 3; + break; + case AMDGPU_QPX_PARTITION_MODE: + num_sdma_xcp = num_sdma / 4; + num_vcn_xcp = num_vcn / 4; + break; + case AMDGPU_CPX_PARTITION_MODE: + num_sdma_xcp = 2; + num_vcn_xcp = num_vcn ? 1 : 0; + break; + default: + return -EINVAL; + } + + num_xcc_xcp = adev->gfx.num_xcc_per_xcp; + + switch (ip_id) { + case AMDGPU_XCP_GFXHUB: + ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_GFX: + ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_SDMA: + ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + case AMDGPU_XCP_VCN: + ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id); + /* TODO : Assign IP funcs */ + break; + default: + return -EINVAL; + } + + ip->ip_id = ip_id; + + return 0; +} + +static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, + enum amdgpu_gfx_partition mode) +{ + int num_xcc, num_xccs_per_xcp; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + return num_xcc > 0; + case AMDGPU_DPX_PARTITION_MODE: + return (num_xcc % 4) == 0; + case AMDGPU_TPX_PARTITION_MODE: + return (num_xcc % 3) == 0; + case AMDGPU_QPX_PARTITION_MODE: + num_xccs_per_xcp = num_xcc / 4; + return (num_xccs_per_xcp >= 2); + case AMDGPU_CPX_PARTITION_MODE: + return (num_xcc > 1); + default: + return false; + } + + return false; +} + +static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + /* TODO: + * Stop user queues and threads, and make sure GPU is empty of work. + */ + + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); + + return 0; +} + +static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int ret = 0; + + if (flags & AMDGPU_XCP_OPS_KFD) { + amdgpu_amdkfd_device_probe(xcp_mgr->adev); + amdgpu_amdkfd_device_init(xcp_mgr->adev); + /* If KFD init failed, return failure */ + if (!xcp_mgr->adev->kfd.init_complete) + ret = -EIO; + } + + return ret; +} + +static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, + int mode, int *num_xcps) +{ + int num_xcc_per_xcp, num_xcc, ret; + struct amdgpu_device *adev; + u32 flags = 0; + + adev = xcp_mgr->adev; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + + if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + return -EINVAL; + + if (adev->kfd.init_complete) + flags |= AMDGPU_XCP_OPS_KFD; + + if (flags & AMDGPU_XCP_OPS_KFD) { + ret = amdgpu_amdkfd_check_and_lock_kfd(adev); + if (ret) + goto out; + } + + ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags); + if (ret) + goto unlock; + + num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode); + if (adev->gfx.funcs->switch_gfx_partition_mode) + adev->gfx.funcs->switch_gfx_partition_mode(xcp_mgr->adev, + num_xcc_per_xcp); + + if (adev->nbio.funcs->set_compute_partition_mode) + adev->nbio.funcs->set_compute_partition_mode(adev, mode); + + ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); +unlock: + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_unlock_kfd(adev); +out: + return ret; +} + +int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, + enum AMDGPU_XCP_IP_BLOCK ip_id, + struct amdgpu_xcp_ip *ip) +{ + if (!ip) + return -EINVAL; + + return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip); +} + +struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { + .switch_partition_mode = &aqua_vanjaram_switch_partition_mode, + .query_partition_mode = &aqua_vanjaram_query_partition_mode, + .get_ip_details = &aqua_vanjaram_get_xcp_ip_details +}; + +static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) +{ + int ret; + + ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1, + &aqua_vanjaram_xcp_funcs); + if (ret) + return ret; + + /* TODO: Default memory node affinity init */ + + return ret; +} -- cgit v1.2.3 From 8e7fd19380f9187dae3ad18a61793b1752dfa097 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Wed, 16 Nov 2022 17:15:47 +0530 Subject: drm/amdgpu: Switch to SOC partition funcs For GFXv9.4.3, use SOC level partition switch implementation rather than keeping them at GFX IP level. Change the exisiting implementation in GFX IP for keeping partition mode and restrict it to only GFX related switch. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 31 ++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 5 -- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 59 ++++------------------ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 +-- 5 files changed, 20 insertions(+), 87 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 5ff49737d7c6..f895a4b8ca0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -28,6 +28,7 @@ #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" #include "amdgpu_ras.h" +#include "amdgpu_xcp.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -1170,10 +1171,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - enum amdgpu_gfx_partition mode; + int mode; char *partition_mode; - mode = adev->gfx.funcs->query_partition_mode(adev); + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: @@ -1254,31 +1255,7 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return -EINVAL; } - if (!adev->kfd.init_complete) - return -EPERM; - - mutex_lock(&adev->gfx.partition_mutex); - - if (mode == adev->gfx.funcs->query_partition_mode(adev)) - goto out; - - ret = amdgpu_amdkfd_check_and_lock_kfd(adev); - if (ret) - goto out; - - amdgpu_amdkfd_device_fini_sw(adev); - - adev->gfx.funcs->switch_partition_mode(adev, mode); - - amdgpu_amdkfd_device_probe(adev); - amdgpu_amdkfd_device_init(adev); - /* If KFD init failed, return failure */ - if (!adev->kfd.init_complete) - ret = -EIO; - - amdgpu_amdkfd_unlock_kfd(adev); -out: - mutex_unlock(&adev->gfx.partition_mutex); + ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8be4ab50b171..2287768ed141 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -278,11 +278,7 @@ struct amdgpu_gfx_funcs { (*query_partition_mode)(struct amdgpu_device *adev); enum amdgpu_memory_partition (*query_mem_partition_mode)(struct amdgpu_device *adev); - int (*switch_partition_mode)(struct amdgpu_device *adev, - enum amdgpu_gfx_partition mode); - - int (*switch_gfx_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); }; @@ -416,7 +412,6 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ - enum amdgpu_gfx_partition partition_mode; uint16_t xcc_mask; enum amdgpu_memory_partition mem_partition_mode; uint32_t num_xcc_per_xcp; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index a9de229a2828..bbcdececfd2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -307,8 +307,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, goto unlock; num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode); - if (adev->gfx.funcs->switch_gfx_partition_mode) - adev->gfx.funcs->switch_gfx_partition_mode(xcp_mgr->adev, + if (adev->gfx.funcs->switch_partition_mode) + adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev, num_xcc_per_xcp); if (adev->nbio.funcs->set_compute_partition_mode) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 93a0baa4515c..d684037a7a5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -38,6 +38,7 @@ #include "gc/gc_9_4_3_sh_mask.h" #include "gfx_v9_4_3.h" +#include "amdgpu_xcp.h" MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); @@ -614,61 +615,23 @@ gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev) return mode; } -static enum amdgpu_gfx_partition -gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) -{ - enum amdgpu_gfx_partition mode = adev->gfx.partition_mode; - - if (adev->nbio.funcs->get_compute_partition_mode) - mode = adev->nbio.funcs->get_compute_partition_mode(adev); - - return mode; -} - static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, - enum amdgpu_gfx_partition mode) + int num_xccs_per_xcp) { + int i, num_xcc; u32 tmp = 0; - int num_xcc_per_partition, i, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - switch (mode) { - case AMDGPU_SPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc; - break; - case AMDGPU_DPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 2; - break; - case AMDGPU_TPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 3; - break; - case AMDGPU_QPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 4; - break; - case AMDGPU_CPX_PARTITION_MODE: - num_xcc_per_partition = 1; - break; - default: - return -EINVAL; - } - - /* TODO: - * Stop user queues and threads, and make sure GPU is empty of work. - */ for (i = 0; i < num_xcc; i++) { tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, - num_xcc_per_partition); + num_xccs_per_xcp); tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, - i % num_xcc_per_partition); + i % num_xccs_per_xcp); WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp); } - if (adev->nbio.funcs->set_compute_partition_mode) - adev->nbio.funcs->set_compute_partition_mode(adev, mode); - - adev->gfx.num_xcc_per_xcp = num_xcc_per_partition; - adev->gfx.partition_mode = mode; + adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; return 0; } @@ -680,7 +643,6 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, - .query_partition_mode = &gfx_v9_4_3_query_compute_partition, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, }; @@ -1899,10 +1861,6 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) return r; } - if (adev->gfx.partition_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - gfx_v9_4_3_switch_compute_partition(adev, - amdgpu_user_partt_mode); - /* set the virtual and physical id based on partition_mode */ gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id); @@ -1931,6 +1889,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { int r, i, num_xcc; + if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode); + num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { r = gfx_v9_4_3_xcc_cp_resume(adev, i); @@ -2146,8 +2107,6 @@ static int gfx_v9_4_3_early_init(void *handle) num_xcc = NUM_XCC(adev->gfx.xcc_mask); - adev->gfx.partition_mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; - adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_4_3_set_kiq_pm4_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 293787290e36..7a963d0a34e2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -34,6 +34,7 @@ #include "kfd_smi_events.h" #include "kfd_migrate.h" #include "amdgpu.h" +#include "amdgpu_xcp.h" #define MQD_SIZE_ALIGNED 768 @@ -592,7 +593,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; - int num_xcd; + int num_xcd, partition_mode; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -644,8 +645,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * If the VMID range changes for GFX9.4.3, then this code MUST be * revisited. */ + partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr); if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { vmid_num_kfd /= 2; first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; @@ -761,7 +763,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->start_xcc_id = node->num_xcc_per_node * i; if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { /* For GFX9.4.3 and CPX mode, first XCD gets VMID range * 4-9 and second XCD gets VMID range 10-15. -- cgit v1.2.3 From 98b2e9cad2279132e3aa4b9caf9164b2e35c1a52 Mon Sep 17 00:00:00 2001 From: Le Ma <le.ma@amd.com> Date: Fri, 9 Dec 2022 19:44:05 +0800 Subject: drm/amdgpu: correct the vmhub index when page fault occurs The AMDGPU_GFXHUB was bind to each xcc in the logical order. Thus convert the node_id to logical xcc_id to index the correct AMDGPU_GFXHUB. And "node_id / 4" can get the correct AMDGPU_MMHUB0 index. Signed-off-by: Le Ma <le.ma@amd.com> Tested-by: Asad kamal <asad.kamal@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 27 ++++++++++++++------------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 ++++++++++---- 3 files changed, 25 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 2287768ed141..81b4c7e684af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -280,6 +280,7 @@ struct amdgpu_gfx_funcs { (*query_mem_partition_mode)(struct amdgpu_device *adev); int (*switch_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); + int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); }; struct sq_work { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index ef552c9b19b5..6aaa810ea044 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -637,6 +637,19 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, return 0; } +static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) +{ + int xcc; + + xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); + if (!xcc) { + dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); + return -EINVAL; + } + + return xcc - 1; +} + static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, @@ -646,6 +659,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, + .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, }; static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) @@ -2754,19 +2768,6 @@ static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, return 0; } -static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) -{ - int xcc; - - xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); - if (!xcc) { - dev_err(adev->dev, "Couldn't find xcc mapping from IH node"); - return -EINVAL; - } - - return xcc - 1; -} - static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3765178e6fc5..841333148610 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -557,22 +557,28 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, u64 addr; uint32_t cam_index = 0; int ret; - uint32_t node_id; + uint32_t node_id, xcc_id = 0; - node_id = (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? entry->node_id : 0; + node_id = entry->node_id; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; if (entry->client_id == SOC15_IH_CLIENTID_VMC) { hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)]; } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { hub_name = "mmhub1"; hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; } else { hub_name = "gfxhub0"; - hub = &adev->vmhub[node_id/2]; + if (adev->gfx.funcs->ih_node_to_logical_xcc) { + xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev, + node_id); + if (xcc_id < 0) + xcc_id = 0; + } + hub = &adev->vmhub[xcc_id]; } if (retry_fault) { -- cgit v1.2.3 From b6f90baafe267a0705c5d9b1429c875d3c39fbc7 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Tue, 31 Jan 2023 12:39:49 +0530 Subject: drm/amdgpu: Move memory partition query to gmc GMC block handles memory related information, it makes more sense to keep memory partition functions in gmc block. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Le Ma <le.ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 30 +--------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 11 --------- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 44 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 16 ++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 10 -------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 18 ++++++++++++++ 6 files changed, 79 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1487ecac2705..2f7a101593e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1204,24 +1204,6 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, return sysfs_emit(buf, "%s\n", partition_mode); } -static ssize_t amdgpu_gfx_get_current_memory_partition(struct device *dev, - struct device_attribute *addr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(ddev); - enum amdgpu_memory_partition mode; - static const char *partition_modes[] = { - "UNKNOWN", "NPS1", "NPS2", "NPS4", "NPS8" - }; - BUILD_BUG_ON(ARRAY_SIZE(partition_modes) <= AMDGPU_NPS8_PARTITION_MODE); - - mode = min((int)adev->gfx.funcs->query_mem_partition_mode(adev), - AMDGPU_NPS8_PARTITION_MODE); - - return sysfs_emit(buf, "%s\n", partition_modes[mode]); -} - static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, struct device_attribute *addr, const char *buf, size_t count) @@ -1305,9 +1287,6 @@ static DEVICE_ATTR(current_compute_partition, S_IRUGO | S_IWUSR, static DEVICE_ATTR(available_compute_partition, S_IRUGO, amdgpu_gfx_get_available_compute_partition, NULL); -static DEVICE_ATTR(current_memory_partition, S_IRUGO, - amdgpu_gfx_get_current_memory_partition, NULL); - int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) { int r; @@ -1317,19 +1296,12 @@ int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) return r; r = device_create_file(adev->dev, &dev_attr_available_compute_partition); - if (r) - return r; - r = device_create_file(adev->dev, &dev_attr_current_memory_partition); - if (r) - return r; - - return 0; + return r; } void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) { device_remove_file(adev->dev, &dev_attr_current_compute_partition); device_remove_file(adev->dev, &dev_attr_available_compute_partition); - device_remove_file(adev->dev, &dev_attr_current_memory_partition); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 81b4c7e684af..728977f8afe7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -71,14 +71,6 @@ enum amdgpu_pkg_type { AMDGPU_PKG_TYPE_UNKNOWN, }; -enum amdgpu_memory_partition { - UNKNOWN_MEMORY_PARTITION_MODE = 0, - AMDGPU_NPS1_PARTITION_MODE = 1, - AMDGPU_NPS2_PARTITION_MODE = 2, - AMDGPU_NPS4_PARTITION_MODE = 3, - AMDGPU_NPS8_PARTITION_MODE = 4, -}; - struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -276,8 +268,6 @@ struct amdgpu_gfx_funcs { struct amdgpu_gfx_shadow_info *shadow_info); enum amdgpu_gfx_partition (*query_partition_mode)(struct amdgpu_device *adev); - enum amdgpu_memory_partition - (*query_mem_partition_mode)(struct amdgpu_device *adev); int (*switch_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); @@ -414,7 +404,6 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ uint16_t xcc_mask; - enum amdgpu_memory_partition mem_partition_mode; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index b8825a0670a4..d12625f1de5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -891,3 +891,47 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) return 0; } + +static ssize_t current_memory_partition_show( + struct device *dev, struct device_attribute *addr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + enum amdgpu_memory_partition mode; + + mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); + switch (mode) { + case AMDGPU_NPS1_PARTITION_MODE: + return sysfs_emit(buf, "NPS1\n"); + case AMDGPU_NPS2_PARTITION_MODE: + return sysfs_emit(buf, "NPS2\n"); + case AMDGPU_NPS3_PARTITION_MODE: + return sysfs_emit(buf, "NPS3\n"); + case AMDGPU_NPS4_PARTITION_MODE: + return sysfs_emit(buf, "NPS4\n"); + case AMDGPU_NPS6_PARTITION_MODE: + return sysfs_emit(buf, "NPS6\n"); + case AMDGPU_NPS8_PARTITION_MODE: + return sysfs_emit(buf, "NPS8\n"); + default: + return sysfs_emit(buf, "UNKNOWN\n"); + } + + return sysfs_emit(buf, "UNKNOWN\n"); +} + +static DEVICE_ATTR_RO(current_memory_partition); + +int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev) +{ + if (!adev->gmc.gmc_funcs->query_mem_partition_mode) + return 0; + + return device_create_file(adev->dev, + &dev_attr_current_memory_partition); +} + +void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_current_memory_partition); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index e408abfc2daf..2bd3b9665ebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -63,6 +63,16 @@ struct firmware; +enum amdgpu_memory_partition { + UNKNOWN_MEMORY_PARTITION_MODE = 0, + AMDGPU_NPS1_PARTITION_MODE = 1, + AMDGPU_NPS2_PARTITION_MODE = 2, + AMDGPU_NPS3_PARTITION_MODE = 3, + AMDGPU_NPS4_PARTITION_MODE = 4, + AMDGPU_NPS6_PARTITION_MODE = 6, + AMDGPU_NPS8_PARTITION_MODE = 8, +}; + /* * GMC page fault information */ @@ -140,6 +150,9 @@ struct amdgpu_gmc_funcs { uint64_t *flags); /* get the amount of memory used by the vbios for pre-OS console */ unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); + + enum amdgpu_memory_partition (*query_mem_partition_mode)( + struct amdgpu_device *adev); }; struct amdgpu_xgmi_ras { @@ -375,4 +388,7 @@ uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); int amdgpu_gmc_vram_checking(struct amdgpu_device *adev); +int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev); +void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 69867294117e..81ab3cd2f229 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -607,16 +607,7 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, { soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id)); } -static enum amdgpu_memory_partition -gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev) -{ - enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; - - if (adev->nbio.funcs->get_memory_partition_mode) - mode = adev->nbio.funcs->get_memory_partition_mode(adev); - return mode; -} static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, int num_xccs_per_xcp) @@ -660,7 +651,6 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, - .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 245de27c7540..db157a31a780 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1330,6 +1330,17 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } +static enum amdgpu_memory_partition +gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; + + if (adev->nbio.funcs->get_memory_partition_mode) + mode = adev->nbio.funcs->get_memory_partition_mode(adev); + + return mode; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, @@ -1339,6 +1350,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .get_vm_pde = gmc_v9_0_get_vm_pde, .get_vm_pte = gmc_v9_0_get_vm_pte, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, + .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -1901,6 +1913,9 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + amdgpu_gmc_sysfs_init(adev); + return 0; } @@ -1908,6 +1923,9 @@ static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) + amdgpu_gmc_sysfs_fini(adev); + amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); -- cgit v1.2.3 From 570de94b9c5d93e1c5bc4e357946efb93c662da9 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Mon, 13 Feb 2023 19:26:18 +0530 Subject: drm/amdgpu: Add auto mode for compute partition When auto mode is specified, driver will choose the right compute partition mode. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Le Ma <le.ma@amd.com> Reviewed-by: Philip Yang <philip.yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 8 ++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 ++ .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 28 +++++++++++++++++++++- 4 files changed, 35 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f2bafab15ceb..cb9373f8c25a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -242,7 +242,7 @@ extern int amdgpu_num_kcq; extern int amdgpu_vcnfw_log; extern int amdgpu_sg_display; -extern uint amdgpu_user_partt_mode; +extern int amdgpu_user_partt_mode; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f319318a8813..da4e50aef95a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -193,7 +193,7 @@ int amdgpu_smartshift_bias; int amdgpu_use_xgmi_p2p = 1; int amdgpu_vcnfw_log; int amdgpu_sg_display = -1; /* auto */ -uint amdgpu_user_partt_mode; +int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE; static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -955,8 +955,10 @@ module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); * DOC: partition_mode (int) * Used to override the default SPX mode. */ -MODULE_PARM_DESC(user_partt_mode, - "specify partition mode to be used (0 = AMDGPU_SPX_PARTITION_MODE(default value), \ +MODULE_PARM_DESC( + user_partt_mode, + "specify partition mode to be used (-2 = AMDGPU_AUTO_COMPUTE_PARTITION_MODE(default value) \ + 0 = AMDGPU_SPX_PARTITION_MODE, \ 1 = AMDGPU_DPX_PARTITION_MODE, \ 2 = AMDGPU_TPX_PARTITION_MODE, \ 3 = AMDGPU_QPX_PARTITION_MODE, \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 728977f8afe7..e9c93f6e12b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -62,6 +62,8 @@ enum amdgpu_gfx_partition { AMDGPU_QPX_PARTITION_MODE = 3, AMDGPU_CPX_PARTITION_MODE = 4, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1, + /* Automatically choose the right mode */ + AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2, }; #define NUM_XCC(x) hweight16(x) diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 7469de3fd6fe..a165b51e9e58 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -235,6 +235,30 @@ int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, return 0; } +static enum amdgpu_gfx_partition +__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + int num_xcc; + + num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask); + + if (adev->gmc.num_mem_partitions == 1) + return AMDGPU_SPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == num_xcc) + return AMDGPU_CPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == num_xcc / 2) + return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE : + AMDGPU_QPX_PARTITION_MODE; + + if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU)) + return AMDGPU_DPX_PARTITION_MODE; + + return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; +} + static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, enum amdgpu_gfx_partition mode) { @@ -304,7 +328,9 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, adev = xcp_mgr->adev; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) + if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) + mode = __aqua_vanjaram_get_auto_mode(xcp_mgr); + else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) return -EINVAL; if (adev->kfd.init_complete) -- cgit v1.2.3 From f9632096be49ed31e86541e3e79960e21e8f1578 Mon Sep 17 00:00:00 2001 From: Lijo Lazar <lijo.lazar@amd.com> Date: Tue, 7 Mar 2023 10:33:05 +0530 Subject: drm/amdgpu: Add compute mode descriptor function Keep a helper function to get description of compute partition mode. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Le Ma <le.ma@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 24 +----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 2f7a101593e7..6098b8b1ae5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1175,33 +1175,11 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); int mode; - char *partition_mode; mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, AMDGPU_XCP_FL_NONE); - switch (mode) { - case AMDGPU_SPX_PARTITION_MODE: - partition_mode = "SPX"; - break; - case AMDGPU_DPX_PARTITION_MODE: - partition_mode = "DPX"; - break; - case AMDGPU_TPX_PARTITION_MODE: - partition_mode = "TPX"; - break; - case AMDGPU_QPX_PARTITION_MODE: - partition_mode = "QPX"; - break; - case AMDGPU_CPX_PARTITION_MODE: - partition_mode = "CPX"; - break; - default: - partition_mode = "UNKNOWN"; - break; - } - - return sysfs_emit(buf, "%s\n", partition_mode); + return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode)); } static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index e9c93f6e12b8..3d11b7a0bd75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -494,4 +494,25 @@ int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); + +static inline const char *amdgpu_gfx_compute_mode_desc(int mode) +{ + switch (mode) { + case AMDGPU_SPX_PARTITION_MODE: + return "SPX"; + case AMDGPU_DPX_PARTITION_MODE: + return "DPX"; + case AMDGPU_TPX_PARTITION_MODE: + return "TPX"; + case AMDGPU_QPX_PARTITION_MODE: + return "QPX"; + case AMDGPU_CPX_PARTITION_MODE: + return "CPX"; + default: + return "UNKNOWN"; + } + + return "UNKNOWN"; +} + #endif -- cgit v1.2.3 From d78c71321ea963378cd3c5646ac6c6483d8d1a50 Mon Sep 17 00:00:00 2001 From: Tao Zhou <tao.zhou1@amd.com> Date: Thu, 2 Feb 2023 18:57:04 +0800 Subject: drm/amdgpu: add GFX RAS common function The common function can help reduce redundant code. v2: remove xcp operation, only need to do RAS operations for all instances. v3: remove check for GFX RAS support, will be checked in higher level. add amdgpu prefix for the function name. Signed-off-by: Tao Zhou <tao.zhou1@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 19 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 ++++ 2 files changed, 23 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 6098b8b1ae5b..8883d5ee13cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -884,6 +884,25 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, return 0; } +void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, + void *ras_error_status, + void (*func)(struct amdgpu_device *adev, void *ras_error_status, + int xcc_id)) +{ + int i; + int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; + uint32_t xcc_mask = GENMASK(num_xcc - 1, 0); + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + if (err_data) { + err_data->ue_count = 0; + err_data->ce_count = 0; + } + + for_each_inst(i, xcc_mask) + func(adev, ras_error_status, i); +} + uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) { signed long r, cnt = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 3d11b7a0bd75..0df53fe7b199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -494,6 +494,10 @@ int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); +void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, + void *ras_error_status, + void (*func)(struct amdgpu_device *adev, void *ras_error_status, + int xcc_id)); static inline const char *amdgpu_gfx_compute_mode_desc(int mode) { -- cgit v1.2.3 From 77462ab8c62b0dc65261c042771efea44a111131 Mon Sep 17 00:00:00 2001 From: Tao Zhou <tao.zhou1@amd.com> Date: Tue, 7 Feb 2023 18:30:55 +0800 Subject: drm/amdgpu: add RAS definitions for GFX Add common GFX RAS definitions. v2: remove instance from amdgpu_gfx_ras_reg_entry, amdgpu_ras_err_status_reg_entry has already defined it. v3: remove memory id definitions from amdgpu_gfx.h, they are related to IP version. Signed-off-by: Tao Zhou <tao.zhou1@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 39 +++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 0df53fe7b199..ce0f7a8ad4b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -73,6 +73,32 @@ enum amdgpu_pkg_type { AMDGPU_PKG_TYPE_UNKNOWN, }; +enum amdgpu_gfx_ras_mem_id_type { + AMDGPU_GFX_CP_MEM = 0, + AMDGPU_GFX_GCEA_MEM, + AMDGPU_GFX_GC_CANE_MEM, + AMDGPU_GFX_GCUTCL2_MEM, + AMDGPU_GFX_GDS_MEM, + AMDGPU_GFX_LDS_MEM, + AMDGPU_GFX_RLC_MEM, + AMDGPU_GFX_SP_MEM, + AMDGPU_GFX_SPI_MEM, + AMDGPU_GFX_SQC_MEM, + AMDGPU_GFX_SQ_MEM, + AMDGPU_GFX_TA_MEM, + AMDGPU_GFX_TCC_MEM, + AMDGPU_GFX_TCA_MEM, + AMDGPU_GFX_TCI_MEM, + AMDGPU_GFX_TCP_MEM, + AMDGPU_GFX_TD_MEM, + AMDGPU_GFX_TCX_MEM, + AMDGPU_GFX_ATC_L2_MEM, + AMDGPU_GFX_UTCL2_MEM, + AMDGPU_GFX_VML2_MEM, + AMDGPU_GFX_VML2_WALKER_MEM, + AMDGPU_GFX_MEM_TYPE_NUM +}; + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -410,6 +436,19 @@ struct amdgpu_gfx { struct mutex partition_mutex; }; +struct amdgpu_gfx_ras_reg_entry { + struct amdgpu_ras_err_status_reg_entry reg_entry; + enum amdgpu_gfx_ras_mem_id_type mem_id_type; + uint32_t se_num; +}; + +struct amdgpu_gfx_ras_mem_id_entry { + const struct amdgpu_ras_memory_id_entry *mem_id_ent; + uint32_t size; +}; + +#define AMDGPU_GFX_MEMID_ENT(x) {(x), ARRAY_SIZE(x)}, + #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id))) -- cgit v1.2.3