summaryrefslogtreecommitdiff
path: root/include/drm/gpu_scheduler.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r--include/drm/gpu_scheduler.h178
1 files changed, 84 insertions, 94 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 95e17504e46a..50928a7ae98e 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -71,12 +71,6 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_COUNT
};
-/* Used to choose between FIFO and RR job-scheduling */
-extern int drm_sched_policy;
-
-#define DRM_SCHED_POLICY_RR 0
-#define DRM_SCHED_POLICY_FIFO 1
-
/**
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
@@ -338,8 +332,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* to schedule the job.
*/
struct drm_sched_job {
- struct spsc_node queue_node;
- struct list_head list;
+ u64 id;
+
+ /**
+ * @submit_ts:
+ *
+ * When the job was pushed into the entity queue.
+ */
+ ktime_t submit_ts;
/**
* @sched:
@@ -349,24 +349,30 @@ struct drm_sched_job {
* has finished.
*/
struct drm_gpu_scheduler *sched;
+
struct drm_sched_fence *s_fence;
+ struct drm_sched_entity *entity;
+ enum drm_sched_priority s_priority;
u32 credits;
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned int last_dependency;
+ atomic_t karma;
+
+ struct spsc_node queue_node;
+ struct list_head list;
/*
* work is used only after finish_cb has been used and will not be
* accessed anymore.
*/
union {
- struct dma_fence_cb finish_cb;
- struct work_struct work;
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
};
- uint64_t id;
- atomic_t karma;
- enum drm_sched_priority s_priority;
- struct drm_sched_entity *entity;
struct dma_fence_cb cb;
+
/**
* @dependencies:
*
@@ -375,24 +381,8 @@ struct drm_sched_job {
* drm_sched_job_add_implicit_dependencies().
*/
struct xarray dependencies;
-
- /** @last_dependency: tracks @dependencies as they signal */
- unsigned long last_dependency;
-
- /**
- * @submit_ts:
- *
- * When the job was pushed into the entity queue.
- */
- ktime_t submit_ts;
};
-static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
- int threshold)
-{
- return s_job && atomic_inc_return(&s_job->karma) > threshold;
-}
-
enum drm_gpu_sched_stat {
DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
DRM_GPU_SCHED_STAT_NOMINAL,
@@ -476,19 +466,6 @@ struct drm_sched_backend_ops {
* and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
-
- /**
- * @update_job_credits: Called when the scheduler is considering this
- * job for execution.
- *
- * This callback returns the number of credits the job would take if
- * pushed to the hardware. Drivers may use this to dynamically update
- * the job's credit count. For instance, deduct the number of credits
- * for already signalled native fences.
- *
- * This callback is optional.
- */
- u32 (*update_job_credits)(struct drm_sched_job *sched_job);
};
/**
@@ -553,18 +530,66 @@ struct drm_gpu_scheduler {
struct device *dev;
};
+/**
+ * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler
+ *
+ * @ops: backend operations provided by the driver
+ * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
+ * allocated and used.
+ * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but may be less.
+ * @credit_limit: the number of credits this scheduler can hold from all jobs
+ * @hang_limit: number of times to allow a job to hang before dropping it.
+ * This mechanism is DEPRECATED. Set it to 0.
+ * @timeout: timeout value in jiffies for submitted jobs.
+ * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used.
+ * @score: score atomic shared with other schedulers. May be NULL.
+ * @name: name (typically the driver's name). Used for debugging
+ * @dev: associated device. Used for debugging
+ */
+struct drm_sched_init_args {
+ const struct drm_sched_backend_ops *ops;
+ struct workqueue_struct *submit_wq;
+ struct workqueue_struct *timeout_wq;
+ u32 num_rqs;
+ u32 credit_limit;
+ unsigned int hang_limit;
+ long timeout;
+ atomic_t *score;
+ const char *name;
+ struct device *dev;
+};
+
+/* Scheduler operations */
+
int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- struct workqueue_struct *submit_wq,
- u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
- long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name, struct device *dev);
+ const struct drm_sched_init_args *args);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
+
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
+
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
+/* Jobs */
+
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
u32 credits, void *owner);
void drm_sched_job_arm(struct drm_sched_job *job);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
@@ -577,35 +602,18 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
struct drm_gem_object *obj,
bool write);
-
-
-void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
- struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
-
-void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
-bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
-void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
-void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
-void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
-void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
-void drm_sched_reset_karma(struct drm_sched_job *bad);
-void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
-void drm_sched_fault(struct drm_gpu_scheduler *sched);
-void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
-void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
+static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+ int threshold)
+{
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
+}
-void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
- struct drm_sched_rq *rq, ktime_t ts);
+/* Entities */
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
@@ -615,29 +623,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
-void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
-struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
int drm_sched_entity_error(struct drm_sched_entity *entity);
-
-struct drm_sched_fence *drm_sched_fence_alloc(
- struct drm_sched_entity *s_entity, void *owner);
-void drm_sched_fence_init(struct drm_sched_fence *fence,
- struct drm_sched_entity *entity);
-void drm_sched_fence_free(struct drm_sched_fence *fence);
-
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
- struct dma_fence *parent);
-void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
-
-unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
-void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
- unsigned long remaining);
-struct drm_gpu_scheduler *
-drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif