From b016cd6ed4b772759804e0d6082bd1f5ca63b8ee Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 14 Aug 2019 19:24:01 +0100 Subject: dma-buf: Restore seqlock around dma_resv updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts 67c97fb79a7f ("dma-buf: add reservation_object_fences helper") dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper") 0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence") 5d344f58da76 ("dma-buf: nuke reservation_object seq number") The scenario that defeats simply grabbing a set of shared/exclusive fences and using them blissfully under RCU is that any of those fences may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this scenario, while keeping the rcu_read_lock we need to establish that no fence was changed in the dma_resv after a read (or full) memory barrier. Signed-off-by: Chris Wilson Cc: Chris Wilson Cc: Daniel Vetter Acked-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20190814182401.25009-1-chris@chris-wilson.co.uk --- include/linux/dma-resv.h | 113 ++++++++++++++++++++--------------------------- 1 file changed, 48 insertions(+), 65 deletions(-) (limited to 'include/linux/dma-resv.h') diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 38f2802afabb..ee50d10f052b 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -46,6 +46,8 @@ #include extern struct ww_class reservation_ww_class; +extern struct lock_class_key reservation_seqcount_class; +extern const char reservation_seqcount_string[]; /** * struct dma_resv_list - a list of shared fences @@ -69,6 +71,7 @@ struct dma_resv_list { */ struct dma_resv { struct ww_mutex lock; + seqcount_t seq; struct dma_fence __rcu *fence_excl; struct dma_resv_list __rcu *fence; @@ -77,24 +80,6 @@ struct dma_resv { #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) -/** - * dma_resv_get_excl - get the reservation object's - * exclusive fence, with update-side lock held - * @obj: the reservation object - * - * Returns the exclusive fence (if any). Does NOT take a - * reference. Writers must hold obj->lock, readers may only - * hold a RCU read side lock. - * - * RETURNS - * The exclusive fence or NULL - */ -static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj) -{ - return rcu_dereference_protected(obj->fence_excl, - dma_resv_held(obj)); -} - /** * dma_resv_get_list - get the reservation object's * shared fence list, with update-side lock held @@ -109,53 +94,6 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) dma_resv_held(obj)); } -/** - * dma_resv_fences - read consistent fence pointers - * @obj: reservation object where we get the fences from - * @excl: pointer for the exclusive fence - * @list: pointer for the shared fence list - * - * Make sure we have a consisten exclusive fence and shared fence list. - * Must be called with rcu read side lock held. - */ -static inline void dma_resv_fences(struct dma_resv *obj, - struct dma_fence **excl, - struct dma_resv_list **list, - u32 *shared_count) -{ - do { - *excl = rcu_dereference(obj->fence_excl); - *list = rcu_dereference(obj->fence); - *shared_count = *list ? (*list)->shared_count : 0; - smp_rmb(); /* See dma_resv_add_excl_fence */ - } while (rcu_access_pointer(obj->fence_excl) != *excl); -} - -/** - * dma_resv_get_excl_rcu - get the reservation object's - * exclusive fence, without lock held. - * @obj: the reservation object - * - * If there is an exclusive fence, this atomically increments it's - * reference count and returns it. - * - * RETURNS - * The exclusive fence or NULL if none - */ -static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj) -{ - struct dma_fence *fence; - - if (!rcu_access_pointer(obj->fence_excl)) - return NULL; - - rcu_read_lock(); - fence = dma_fence_get_rcu_safe(&obj->fence_excl); - rcu_read_unlock(); - - return fence; -} - /** * dma_resv_lock - lock the reservation object * @obj: the reservation object @@ -290,6 +228,51 @@ static inline void dma_resv_unlock(struct dma_resv *obj) ww_mutex_unlock(&obj->lock); } +/** + * dma_resv_get_excl - get the reservation object's + * exclusive fence, with update-side lock held + * @obj: the reservation object + * + * Returns the exclusive fence (if any). Does NOT take a + * reference. Writers must hold obj->lock, readers may only + * hold a RCU read side lock. + * + * RETURNS + * The exclusive fence or NULL + */ +static inline struct dma_fence * +dma_resv_get_excl(struct dma_resv *obj) +{ + return rcu_dereference_protected(obj->fence_excl, + dma_resv_held(obj)); +} + +/** + * dma_resv_get_excl_rcu - get the reservation object's + * exclusive fence, without lock held. + * @obj: the reservation object + * + * If there is an exclusive fence, this atomically increments it's + * reference count and returns it. + * + * RETURNS + * The exclusive fence or NULL if none + */ +static inline struct dma_fence * +dma_resv_get_excl_rcu(struct dma_resv *obj) +{ + struct dma_fence *fence; + + if (!rcu_access_pointer(obj->fence_excl)) + return NULL; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&obj->fence_excl); + rcu_read_unlock(); + + return fence; +} + void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); -- cgit v1.2.3