diff options
Diffstat (limited to 'drivers/gpu/drm/scheduler')
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_entity.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_fence.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_internal.h | 91 | ||||
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 131 |
4 files changed, 169 insertions, 91 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 69bcf0e99d57..bd39db7bb240 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -28,10 +28,9 @@ #include <drm/drm_print.h> #include <drm/gpu_scheduler.h> -#include "gpu_scheduler_trace.h" +#include "sched_internal.h" -#define to_drm_sched_job(sched_job) \ - container_of((sched_job), struct drm_sched_job, queue_node) +#include "gpu_scheduler_trace.h" /** * drm_sched_entity_init - Init a context entity used by scheduler when @@ -92,7 +91,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, * the lowest priority available. */ if (entity->priority >= sched_list[0]->num_rqs) { - drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", + dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n", entity->priority, sched_list[0]->num_rqs); entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, (s32) DRM_SCHED_PRIORITY_KERNEL); @@ -152,18 +151,6 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) return false; } -/* Return true if entity could provide a job. */ -bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) -{ - if (spsc_queue_peek(&entity->job_queue) == NULL) - return false; - - if (READ_ONCE(entity->dependency)) - return false; - - return true; -} - /** * drm_sched_entity_error - return error of last scheduled job * @entity: scheduler entity to check @@ -255,13 +242,20 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) /* The entity is guaranteed to not be used by the scheduler */ prev = rcu_dereference_check(entity->last_scheduled, true); dma_fence_get(prev); - while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + while ((job = drm_sched_entity_queue_pop(entity))) { struct drm_sched_fence *s_fence = job->s_fence; dma_fence_get(&s_fence->finished); - if (!prev || dma_fence_add_callback(prev, &job->finish_cb, - drm_sched_entity_kill_jobs_cb)) + if (!prev || + dma_fence_add_callback(prev, &job->finish_cb, + drm_sched_entity_kill_jobs_cb)) { + /* + * Adding callback above failed. + * dma_fence_put() checks for NULL. + */ + dma_fence_put(prev); drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + } prev = &s_fence->finished; } @@ -477,7 +471,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) { struct drm_sched_job *sched_job; - sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); + sched_job = drm_sched_entity_queue_peek(entity); if (!sched_job) return NULL; @@ -513,7 +507,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { struct drm_sched_job *next; - next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); + next = drm_sched_entity_queue_peek(entity); if (next) { struct drm_sched_rq *rq; diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 0f35f009b9d3..e971528504a5 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -29,6 +29,8 @@ #include <drm/gpu_scheduler.h> +#include "sched_internal.h" + static struct kmem_cache *sched_fence_slab; static int __init drm_sched_fence_slab_init(void) diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h new file mode 100644 index 000000000000..599cf6e1bb74 --- /dev/null +++ b/drivers/gpu/drm/scheduler/sched_internal.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _DRM_GPU_SCHEDULER_INTERNAL_H_ +#define _DRM_GPU_SCHEDULER_INTERNAL_H_ + + +/* Used to choose between FIFO and RR job-scheduling */ +extern int drm_sched_policy; + +#define DRM_SCHED_POLICY_RR 0 +#define DRM_SCHED_POLICY_FIFO 1 + +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); + +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); + +void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, ktime_t ts); + +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); + +struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity, + void *owner); +void drm_sched_fence_init(struct drm_sched_fence *fence, + struct drm_sched_entity *entity); +void drm_sched_fence_free(struct drm_sched_fence *fence); + +void drm_sched_fence_scheduled(struct drm_sched_fence *fence, + struct dma_fence *parent); +void drm_sched_fence_finished(struct drm_sched_fence *fence, int result); + +/** + * drm_sched_entity_queue_pop - Low level helper for popping queued jobs + * + * @entity: scheduler entity + * + * Low level helper for popping queued jobs. + * + * Returns: The job dequeued or NULL. + */ +static inline struct drm_sched_job * +drm_sched_entity_queue_pop(struct drm_sched_entity *entity) +{ + struct spsc_node *node; + + node = spsc_queue_pop(&entity->job_queue); + if (!node) + return NULL; + + return container_of(node, struct drm_sched_job, queue_node); +} + +/** + * drm_sched_entity_queue_peek - Low level helper for peeking at the job queue + * + * @entity: scheduler entity + * + * Low level helper for peeking at the job queue + * + * Returns: The job at the head of the queue or NULL. + */ +static inline struct drm_sched_job * +drm_sched_entity_queue_peek(struct drm_sched_entity *entity) +{ + struct spsc_node *node; + + node = spsc_queue_peek(&entity->job_queue); + if (!node) + return NULL; + + return container_of(node, struct drm_sched_job, queue_node); +} + +/* Return true if entity could provide a job. */ +static inline bool +drm_sched_entity_is_ready(struct drm_sched_entity *entity) +{ + if (!spsc_queue_count(&entity->job_queue)) + return false; + + if (READ_ONCE(entity->dependency)) + return false; + + return true; +} + +#endif diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 57da84908752..bfea608a7106 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -64,12 +64,6 @@ * credit limit, the job won't be executed. Instead, the scheduler will wait * until the credit count has decreased enough to not overflow its credit limit. * This implies waiting for previously executed jobs. - * - * Optionally, drivers may register a callback (update_job_credits) provided by - * struct drm_sched_backend_ops to update the job's credits dynamically. The - * scheduler executes this callback every time the scheduler considers a job for - * execution and subsequently checks whether the job fits the scheduler's credit - * limit. */ #include <linux/wait.h> @@ -84,6 +78,8 @@ #include <drm/gpu_scheduler.h> #include <drm/spsc_queue.h> +#include "sched_internal.h" + #define CREATE_TRACE_POINTS #include "gpu_scheduler_trace.h" @@ -93,9 +89,6 @@ static struct lockdep_map drm_sched_lockdep_map = { }; #endif -#define to_drm_sched_job(sched_job) \ - container_of((sched_job), struct drm_sched_job, queue_node) - int drm_sched_policy = DRM_SCHED_POLICY_FIFO; /** @@ -109,9 +102,9 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) { u32 credits; - drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit, - atomic_read(&sched->credit_count), - &credits)); + WARN_ON(check_sub_overflow(sched->credit_limit, + atomic_read(&sched->credit_count), + &credits)); return credits; } @@ -129,23 +122,18 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, { struct drm_sched_job *s_job; - s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); + s_job = drm_sched_entity_queue_peek(entity); if (!s_job) return false; - if (sched->ops->update_job_credits) { - s_job->credits = sched->ops->update_job_credits(s_job); - - drm_WARN(sched, !s_job->credits, - "Jobs with zero credits bypass job-flow control.\n"); - } - /* If a job exceeds the credit limit, truncate it to the credit limit * itself to guarantee forward progress. */ - if (drm_WARN(sched, s_job->credits > sched->credit_limit, - "Jobs may not exceed the credit limit, truncate.\n")) + if (s_job->credits > sched->credit_limit) { + dev_WARN(sched->dev, + "Jobs may not exceed the credit limit, truncate.\n"); s_job->credits = sched->credit_limit; + } return drm_sched_available_credits(sched) >= s_job->credits; } @@ -803,7 +791,7 @@ int drm_sched_job_init(struct drm_sched_job *job, * or worse--a blank screen--leave a trail in the * logs, so this can be debugged easier. */ - drm_err(job->sched, "%s: entity has no rq!\n", __func__); + dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); return -ENOENT; } @@ -998,17 +986,42 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); /** + * drm_sched_job_has_dependency - check whether fence is the job's dependency + * @job: scheduler job to check + * @fence: fence to look for + * + * Returns: + * True if @fence is found within the job's dependencies, or otherwise false. + */ +bool drm_sched_job_has_dependency(struct drm_sched_job *job, + struct dma_fence *fence) +{ + struct dma_fence *f; + unsigned long index; + + xa_for_each(&job->dependencies, index, f) { + if (f == fence) + return true; + } + + return false; +} +EXPORT_SYMBOL(drm_sched_job_has_dependency); + +/** * drm_sched_job_cleanup - clean up scheduler job resources * @job: scheduler job to clean up * * Cleans up the resources allocated with drm_sched_job_init(). * * Drivers should call this from their error unwind code if @job is aborted - * before drm_sched_job_arm() is called. + * before it was submitted to an entity with drm_sched_entity_push_job(). * - * After that point of no return @job is committed to be executed by the - * scheduler, and this function should be called from the - * &drm_sched_backend_ops.free_job callback. + * Since calling drm_sched_job_arm() causes the job's fences to be initialized, + * it is up to the driver to ensure that fences that were exposed to external + * parties get signaled. drm_sched_job_cleanup() does not ensure this. + * + * This function must also be called in &struct drm_sched_backend_ops.free_job */ void drm_sched_job_cleanup(struct drm_sched_job *job) { @@ -1019,7 +1032,7 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) /* drm_sched_job_arm() has been called */ dma_fence_put(&job->s_fence->finished); } else { - /* aborted job before committing to run it */ + /* aborted job before arming */ drm_sched_fence_free(job->s_fence); } @@ -1166,9 +1179,6 @@ static void drm_sched_free_job_work(struct work_struct *w) container_of(w, struct drm_gpu_scheduler, work_free_job); struct drm_sched_job *job; - if (READ_ONCE(sched->pause_submit)) - return; - job = drm_sched_get_finished_job(sched); if (job) sched->ops->free_job(job); @@ -1192,9 +1202,6 @@ static void drm_sched_run_job_work(struct work_struct *w) struct drm_sched_job *sched_job; int r; - if (READ_ONCE(sched->pause_submit)) - return; - /* Find entity with a ready job */ entity = drm_sched_select_entity(sched); if (!entity) @@ -1240,43 +1247,27 @@ static void drm_sched_run_job_work(struct work_struct *w) * drm_sched_init - Init a gpu scheduler instance * * @sched: scheduler instance - * @ops: backend operations for this scheduler - * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is - * allocated and used - * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT - * @credit_limit: the number of credits this scheduler can hold from all jobs - * @hang_limit: number of times to allow a job to hang before dropping it - * @timeout: timeout value in jiffies for the scheduler - * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is - * used - * @score: optional score atomic shared with other schedulers - * @name: name used for debugging - * @dev: target &struct device + * @args: scheduler initialization arguments * * Return 0 on success, otherwise error code. */ -int drm_sched_init(struct drm_gpu_scheduler *sched, - const struct drm_sched_backend_ops *ops, - struct workqueue_struct *submit_wq, - u32 num_rqs, u32 credit_limit, unsigned int hang_limit, - long timeout, struct workqueue_struct *timeout_wq, - atomic_t *score, const char *name, struct device *dev) +int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args) { int i; - sched->ops = ops; - sched->credit_limit = credit_limit; - sched->name = name; - sched->timeout = timeout; - sched->timeout_wq = timeout_wq ? : system_wq; - sched->hang_limit = hang_limit; - sched->score = score ? score : &sched->_score; - sched->dev = dev; + sched->ops = args->ops; + sched->credit_limit = args->credit_limit; + sched->name = args->name; + sched->timeout = args->timeout; + sched->hang_limit = args->hang_limit; + sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq; + sched->score = args->score ? args->score : &sched->_score; + sched->dev = args->dev; - if (num_rqs > DRM_SCHED_PRIORITY_COUNT) { + if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) { /* This is a gross violation--tell drivers what the problem is. */ - drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", + dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", __func__); return -EINVAL; } else if (sched->sched_rq) { @@ -1284,20 +1275,20 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, * fine-tune their DRM calling order, and return all * is good. */ - drm_warn(sched, "%s: scheduler already initialized!\n", __func__); + dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__); return 0; } - if (submit_wq) { - sched->submit_wq = submit_wq; + if (args->submit_wq) { + sched->submit_wq = args->submit_wq; sched->own_submit_wq = false; } else { #ifdef CONFIG_LOCKDEP - sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name, WQ_MEM_RECLAIM, &drm_sched_lockdep_map); #else - sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM); #endif if (!sched->submit_wq) return -ENOMEM; @@ -1305,11 +1296,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->own_submit_wq = true; } - sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq), + sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq), GFP_KERNEL | __GFP_ZERO); if (!sched->sched_rq) goto Out_check_own; - sched->num_rqs = num_rqs; + sched->num_rqs = args->num_rqs; for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); if (!sched->sched_rq[i]) @@ -1339,7 +1330,7 @@ Out_unroll: Out_check_own: if (sched->own_submit_wq) destroy_workqueue(sched->submit_wq); - drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); + dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); return -ENOMEM; } EXPORT_SYMBOL(drm_sched_init); |