diff options
author | Eric Anholt <eric@anholt.net> | 2018-06-06 10:48:51 -0700 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2018-06-21 14:46:04 -0700 |
commit | 7122b68b8a9692dcc3acf89595f04c492872115f (patch) | |
tree | c1bbdd237d54dca2fb474dbcc8c5b690bd26fc98 /drivers/gpu/drm/v3d | |
parent | 6aa13402c11035a428d8c621b334734971d5d39d (diff) | |
download | lwn-7122b68b8a9692dcc3acf89595f04c492872115f.tar.gz lwn-7122b68b8a9692dcc3acf89595f04c492872115f.zip |
drm/v3d: Take a lock across GPU scheduler job creation and queuing.
Between creation and queueing of a job, you need to prevent any other
job from being created and queued. Otherwise the scheduler's fences
may be signaled out of seqno order.
v2: move mutex unlock to the error label.
Signed-off-by: Eric Anholt <eric@anholt.net>
Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D V3.x+")
Link: https://patchwork.freedesktop.org/patch/msgid/20180606174851.12433-1-eric@anholt.net
Reviewed-by: Lucas Stach <l.stach@pengutronix.de>
Diffstat (limited to 'drivers/gpu/drm/v3d')
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_drv.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_gem.c | 4 |
2 files changed, 9 insertions, 0 deletions
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index a043ac3aae98..26005abd9c5d 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -85,6 +85,11 @@ struct v3d_dev { */ struct mutex reset_lock; + /* Lock taken when creating and pushing the GPU scheduler + * jobs, to keep the sched-fence seqnos in order. + */ + struct mutex sched_lock; + struct { u32 num_allocated; u32 pages_allocated; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index b513f9189caf..269fe16379c0 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -550,6 +550,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (ret) goto fail; + mutex_lock(&v3d->sched_lock); if (exec->bin.start != exec->bin.end) { ret = drm_sched_job_init(&exec->bin.base, &v3d->queue[V3D_BIN].sched, @@ -576,6 +577,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, kref_get(&exec->refcount); /* put by scheduler job completion */ drm_sched_entity_push_job(&exec->render.base, &v3d_priv->sched_entity[V3D_RENDER]); + mutex_unlock(&v3d->sched_lock); v3d_attach_object_fences(exec); @@ -594,6 +596,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, return 0; fail_unreserve: + mutex_unlock(&v3d->sched_lock); v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); fail: v3d_exec_put(exec); @@ -615,6 +618,7 @@ v3d_gem_init(struct drm_device *dev) spin_lock_init(&v3d->job_lock); mutex_init(&v3d->bo_lock); mutex_init(&v3d->reset_lock); + mutex_init(&v3d->sched_lock); /* Note: We don't allocate address 0. Various bits of HW * treat 0 as special, such as the occlusion query counters |