summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-08-14 19:24:01 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-08-16 12:40:58 +0100
commitb016cd6ed4b772759804e0d6082bd1f5ca63b8ee (patch)
tree278f8b1426b40316f5744cd6155ec51ecc9dd6c7 /include/linux
parentdc2e1e5b279966affbd11ff7cfef52eb634ca2c9 (diff)
downloadlwn-b016cd6ed4b772759804e0d6082bd1f5ca63b8ee.tar.gz
lwn-b016cd6ed4b772759804e0d6082bd1f5ca63b8ee.zip
dma-buf: Restore seqlock around dma_resv updates
This reverts 67c97fb79a7f ("dma-buf: add reservation_object_fences helper") dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper") 0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence") 5d344f58da76 ("dma-buf: nuke reservation_object seq number") The scenario that defeats simply grabbing a set of shared/exclusive fences and using them blissfully under RCU is that any of those fences may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this scenario, while keeping the rcu_read_lock we need to establish that no fence was changed in the dma_resv after a read (or full) memory barrier. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190814182401.25009-1-chris@chris-wilson.co.uk
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/dma-resv.h113
1 files changed, 48 insertions, 65 deletions
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 38f2802afabb..ee50d10f052b 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -46,6 +46,8 @@
#include <linux/rcupdate.h>
extern struct ww_class reservation_ww_class;
+extern struct lock_class_key reservation_seqcount_class;
+extern const char reservation_seqcount_string[];
/**
* struct dma_resv_list - a list of shared fences
@@ -69,6 +71,7 @@ struct dma_resv_list {
*/
struct dma_resv {
struct ww_mutex lock;
+ seqcount_t seq;
struct dma_fence __rcu *fence_excl;
struct dma_resv_list __rcu *fence;
@@ -78,24 +81,6 @@ struct dma_resv {
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any). Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
-{
- return rcu_dereference_protected(obj->fence_excl,
- dma_resv_held(obj));
-}
-
-/**
* dma_resv_get_list - get the reservation object's
* shared fence list, with update-side lock held
* @obj: the reservation object
@@ -110,53 +95,6 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
}
/**
- * dma_resv_fences - read consistent fence pointers
- * @obj: reservation object where we get the fences from
- * @excl: pointer for the exclusive fence
- * @list: pointer for the shared fence list
- *
- * Make sure we have a consisten exclusive fence and shared fence list.
- * Must be called with rcu read side lock held.
- */
-static inline void dma_resv_fences(struct dma_resv *obj,
- struct dma_fence **excl,
- struct dma_resv_list **list,
- u32 *shared_count)
-{
- do {
- *excl = rcu_dereference(obj->fence_excl);
- *list = rcu_dereference(obj->fence);
- *shared_count = *list ? (*list)->shared_count : 0;
- smp_rmb(); /* See dma_resv_add_excl_fence */
- } while (rcu_access_pointer(obj->fence_excl) != *excl);
-}
-
-/**
- * dma_resv_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
-{
- struct dma_fence *fence;
-
- if (!rcu_access_pointer(obj->fence_excl))
- return NULL;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&obj->fence_excl);
- rcu_read_unlock();
-
- return fence;
-}
-
-/**
* dma_resv_lock - lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
@@ -290,6 +228,51 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
ww_mutex_unlock(&obj->lock);
}
+/**
+ * dma_resv_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any). Does NOT take a
+ * reference. Writers must hold obj->lock, readers may only
+ * hold a RCU read side lock.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
+static inline struct dma_fence *
+dma_resv_get_excl(struct dma_resv *obj)
+{
+ return rcu_dereference_protected(obj->fence_excl,
+ dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct dma_fence *
+dma_resv_get_excl_rcu(struct dma_resv *obj)
+{
+ struct dma_fence *fence;
+
+ if (!rcu_access_pointer(obj->fence_excl))
+ return NULL;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&obj->fence_excl);
+ rcu_read_unlock();
+
+ return fence;
+}
+
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);