diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-11-24 18:44:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-11-24 18:44:01 -0800 |
commit | e2125dac22f2c9c66c412cd8e049a7305af59f73 (patch) | |
tree | 8584af248ee592622bd6e8a7f9cee0b041b784cc /lib | |
parent | e195ca6cb6f21633e56322d5aa11ed59cdb22fb2 (diff) | |
parent | fffc9a260e38acec3187515738122a3ecb24ac90 (diff) | |
download | lwn-e2125dac22f2c9c66c412cd8e049a7305af59f73.tar.gz lwn-e2125dac22f2c9c66c412cd8e049a7305af59f73.zip |
Merge tag 'xarray-4.20-rc4' of git://git.infradead.org/users/willy/linux-dax
Pull XArray updates from Matthew Wilcox:
"We found some bugs in the DAX conversion to XArray (and one bug which
predated the XArray conversion). There were a couple of bugs in some
of the higher-level functions, which aren't actually being called in
today's kernel, but surfaced as a result of converting existing radix
tree & IDR users over to the XArray.
Some of the other changes to how the higher-level APIs work were also
motivated by converting various users; again, they're not in use in
today's kernel, so changing them has a low probability of introducing
a bug.
Dan can still trigger a bug in the DAX code with hot-offline/online,
and we're working on tracking that down"
* tag 'xarray-4.20-rc4' of git://git.infradead.org/users/willy/linux-dax:
XArray tests: Add missing locking
dax: Avoid losing wakeup in dax_lock_mapping_entry
dax: Fix huge page faults
dax: Fix dax_unlock_mapping_entry for PMD pages
dax: Reinstate RCU protection of inode
dax: Make sure the unlocking entry isn't locked
dax: Remove optimisation from dax_lock_mapping_entry
XArray tests: Correct some 64-bit assumptions
XArray: Correct xa_store_range
XArray: Fix Documentation
XArray: Handle NULL pointers differently for allocation
XArray: Unify xa_store and __xa_store
XArray: Add xa_store_bh() and xa_store_irq()
XArray: Turn xa_erase into an exported function
XArray: Unify xa_cmpxchg and __xa_cmpxchg
XArray: Regularise xa_reserve
nilfs2: Use xa_erase_irq
XArray: Export __xa_foo to non-GPL modules
XArray: Fix xa_for_each with a single element at 0
Diffstat (limited to 'lib')
-rw-r--r-- | lib/test_xarray.c | 50 | ||||
-rw-r--r-- | lib/xarray.c | 139 |
2 files changed, 107 insertions, 82 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index aa47754150ce..0598e86af8fc 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -208,15 +208,19 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); /* We should see two elements in the array */ + rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) seen++; + rcu_read_unlock(); XA_BUG_ON(xa, seen != 2); /* One of which is marked */ xas_set(&xas, 0); seen = 0; + rcu_read_lock(); xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) seen++; + rcu_read_unlock(); XA_BUG_ON(xa, seen != 1); } XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); @@ -373,6 +377,12 @@ static noinline void check_reserve(struct xarray *xa) xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); + /* And so does xa_insert */ + xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); + xa_erase_index(xa, 12345678); + XA_BUG_ON(xa, !xa_empty(xa)); + /* Can iterate through a reserved entry */ xa_store_index(xa, 5, GFP_KERNEL); xa_reserve(xa, 6, GFP_KERNEL); @@ -436,7 +446,9 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); + xas_lock(&xas); XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); + xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); XA_BUG_ON(xa, xa_load(xa, max) != NULL); @@ -452,9 +464,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, XA_STATE(xas, xa, index); xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); + xas_lock(&xas); XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); XA_BUG_ON(xa, xas.xa_index != index); XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); + xas_unlock(&xas); XA_BUG_ON(xa, !xa_empty(xa)); } #endif @@ -498,7 +512,7 @@ static noinline void check_multi_store(struct xarray *xa) rcu_read_unlock(); /* We can erase multiple values with a single store */ - xa_store_order(xa, 0, 63, NULL, GFP_KERNEL); + xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); XA_BUG_ON(xa, !xa_empty(xa)); /* Even when the first slot is empty but the others aren't */ @@ -702,7 +716,7 @@ static noinline void check_multi_find_2(struct xarray *xa) } } -static noinline void check_find(struct xarray *xa) +static noinline void check_find_1(struct xarray *xa) { unsigned long i, j, k; @@ -748,6 +762,34 @@ static noinline void check_find(struct xarray *xa) XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); } XA_BUG_ON(xa, !xa_empty(xa)); +} + +static noinline void check_find_2(struct xarray *xa) +{ + void *entry; + unsigned long i, j, index = 0; + + xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + XA_BUG_ON(xa, true); + } + + for (i = 0; i < 1024; i++) { + xa_store_index(xa, index, GFP_KERNEL); + j = 0; + index = 0; + xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { + XA_BUG_ON(xa, xa_mk_value(index) != entry); + XA_BUG_ON(xa, index != j++); + } + } + + xa_destroy(xa); +} + +static noinline void check_find(struct xarray *xa) +{ + check_find_1(xa); + check_find_2(xa); check_multi_find(xa); check_multi_find_2(xa); } @@ -1067,7 +1109,7 @@ static noinline void check_store_range(struct xarray *xa) __check_store_range(xa, 4095 + i, 4095 + j); __check_store_range(xa, 4096 + i, 4096 + j); __check_store_range(xa, 123456 + i, 123456 + j); - __check_store_range(xa, UINT_MAX + i, UINT_MAX + j); + __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); } } } @@ -1146,10 +1188,12 @@ static noinline void check_account(struct xarray *xa) XA_STATE(xas, xa, 1 << order); xa_store_order(xa, 0, order, xa, GFP_KERNEL); + rcu_read_lock(); xas_load(&xas); XA_BUG_ON(xa, xas.xa_node->count == 0); XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); XA_BUG_ON(xa, xas.xa_node->nr_values != 0); + rcu_read_unlock(); xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), GFP_KERNEL); diff --git a/lib/xarray.c b/lib/xarray.c index 8b176f009c08..bbacca576593 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -610,8 +610,8 @@ static int xas_expand(struct xa_state *xas, void *head) * (see the xa_cmpxchg() implementation for an example). * * Return: If the slot already existed, returns the contents of this slot. - * If the slot was newly created, returns NULL. If it failed to create the - * slot, returns NULL and indicates the error in @xas. + * If the slot was newly created, returns %NULL. If it failed to create the + * slot, returns %NULL and indicates the error in @xas. */ static void *xas_create(struct xa_state *xas) { @@ -1334,44 +1334,31 @@ void *__xa_erase(struct xarray *xa, unsigned long index) XA_STATE(xas, xa, index); return xas_result(&xas, xas_store(&xas, NULL)); } -EXPORT_SYMBOL_GPL(__xa_erase); +EXPORT_SYMBOL(__xa_erase); /** - * xa_store() - Store this entry in the XArray. + * xa_erase() - Erase this entry from the XArray. * @xa: XArray. - * @index: Index into array. - * @entry: New entry. - * @gfp: Memory allocation flags. + * @index: Index of entry. * - * After this function returns, loads from this index will return @entry. - * Storing into an existing multislot entry updates the entry of every index. - * The marks associated with @index are unaffected unless @entry is %NULL. + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. * - * Context: Process context. Takes and releases the xa_lock. May sleep - * if the @gfp flags permit. - * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry - * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation - * failed. + * Context: Any context. Takes and releases the xa_lock. + * Return: The entry which used to be at this index. */ -void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +void *xa_erase(struct xarray *xa, unsigned long index) { - XA_STATE(xas, xa, index); - void *curr; - - if (WARN_ON_ONCE(xa_is_internal(entry))) - return XA_ERROR(-EINVAL); + void *entry; - do { - xas_lock(&xas); - curr = xas_store(&xas, entry); - if (xa_track_free(xa) && entry) - xas_clear_mark(&xas, XA_FREE_MARK); - xas_unlock(&xas); - } while (xas_nomem(&xas, gfp)); + xa_lock(xa); + entry = __xa_erase(xa, index); + xa_unlock(xa); - return xas_result(&xas, curr); + return entry; } -EXPORT_SYMBOL(xa_store); +EXPORT_SYMBOL(xa_erase); /** * __xa_store() - Store this entry in the XArray. @@ -1395,10 +1382,12 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) if (WARN_ON_ONCE(xa_is_internal(entry))) return XA_ERROR(-EINVAL); + if (xa_track_free(xa) && !entry) + entry = XA_ZERO_ENTRY; do { curr = xas_store(&xas, entry); - if (xa_track_free(xa) && entry) + if (xa_track_free(xa)) xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); @@ -1407,45 +1396,33 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) EXPORT_SYMBOL(__xa_store); /** - * xa_cmpxchg() - Conditionally replace an entry in the XArray. + * xa_store() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. - * @old: Old value to test against. - * @entry: New value to place in array. + * @entry: New entry. * @gfp: Memory allocation flags. * - * If the entry at @index is the same as @old, replace it with @entry. - * If the return value is equal to @old, then the exchange was successful. + * After this function returns, loads from this index will return @entry. + * Storing into an existing multislot entry updates the entry of every index. + * The marks associated with @index are unaffected unless @entry is %NULL. * - * Context: Process context. Takes and releases the xa_lock. May sleep - * if the @gfp flags permit. - * Return: The old value at this index or xa_err() if an error happened. + * Context: Any context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry + * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation + * failed. */ -void *xa_cmpxchg(struct xarray *xa, unsigned long index, - void *old, void *entry, gfp_t gfp) +void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { - XA_STATE(xas, xa, index); void *curr; - if (WARN_ON_ONCE(xa_is_internal(entry))) - return XA_ERROR(-EINVAL); - - do { - xas_lock(&xas); - curr = xas_load(&xas); - if (curr == XA_ZERO_ENTRY) - curr = NULL; - if (curr == old) { - xas_store(&xas, entry); - if (xa_track_free(xa) && entry) - xas_clear_mark(&xas, XA_FREE_MARK); - } - xas_unlock(&xas); - } while (xas_nomem(&xas, gfp)); + xa_lock(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock(xa); - return xas_result(&xas, curr); + return curr; } -EXPORT_SYMBOL(xa_cmpxchg); +EXPORT_SYMBOL(xa_store); /** * __xa_cmpxchg() - Store this entry in the XArray. @@ -1471,6 +1448,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, if (WARN_ON_ONCE(xa_is_internal(entry))) return XA_ERROR(-EINVAL); + if (xa_track_free(xa) && !entry) + entry = XA_ZERO_ENTRY; do { curr = xas_load(&xas); @@ -1478,7 +1457,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, curr = NULL; if (curr == old) { xas_store(&xas, entry); - if (xa_track_free(xa) && entry) + if (xa_track_free(xa)) xas_clear_mark(&xas, XA_FREE_MARK); } } while (__xas_nomem(&xas, gfp)); @@ -1488,7 +1467,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, EXPORT_SYMBOL(__xa_cmpxchg); /** - * xa_reserve() - Reserve this index in the XArray. + * __xa_reserve() - Reserve this index in the XArray. * @xa: XArray. * @index: Index into array. * @gfp: Memory allocation flags. @@ -1496,33 +1475,32 @@ EXPORT_SYMBOL(__xa_cmpxchg); * Ensures there is somewhere to store an entry at @index in the array. * If there is already something stored at @index, this function does * nothing. If there was nothing there, the entry is marked as reserved. - * Loads from @index will continue to see a %NULL pointer until a - * subsequent store to @index. + * Loading from a reserved entry returns a %NULL pointer. * * If you do not use the entry that you have reserved, call xa_release() * or xa_erase() to free any unnecessary memory. * - * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe - * if specified in XArray flags. May sleep if the @gfp flags permit. + * Context: Any context. Expects the xa_lock to be held on entry. May + * release the lock, sleep and reacquire the lock if the @gfp flags permit. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) +int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) { XA_STATE(xas, xa, index); - unsigned int lock_type = xa_lock_type(xa); void *curr; do { - xas_lock_type(&xas, lock_type); curr = xas_load(&xas); - if (!curr) + if (!curr) { xas_store(&xas, XA_ZERO_ENTRY); - xas_unlock_type(&xas, lock_type); - } while (xas_nomem(&xas, gfp)); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } + } while (__xas_nomem(&xas, gfp)); return xas_error(&xas); } -EXPORT_SYMBOL(xa_reserve); +EXPORT_SYMBOL(__xa_reserve); #ifdef CONFIG_XARRAY_MULTI static void xas_set_range(struct xa_state *xas, unsigned long first, @@ -1587,8 +1565,9 @@ void *xa_store_range(struct xarray *xa, unsigned long first, do { xas_lock(&xas); if (entry) { - unsigned int order = (last == ~0UL) ? 64 : - ilog2(last + 1); + unsigned int order = BITS_PER_LONG; + if (last + 1) + order = __ffs(last + 1); xas_set_order(&xas, last, order); xas_create(&xas); if (xas_error(&xas)) @@ -1662,7 +1641,7 @@ EXPORT_SYMBOL(__xa_alloc); * @index: Index of entry. * @mark: Mark number. * - * Attempting to set a mark on a NULL entry does not succeed. + * Attempting to set a mark on a %NULL entry does not succeed. * * Context: Any context. Expects xa_lock to be held on entry. */ @@ -1674,7 +1653,7 @@ void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) if (entry) xas_set_mark(&xas, mark); } -EXPORT_SYMBOL_GPL(__xa_set_mark); +EXPORT_SYMBOL(__xa_set_mark); /** * __xa_clear_mark() - Clear this mark on this entry while locked. @@ -1692,7 +1671,7 @@ void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) if (entry) xas_clear_mark(&xas, mark); } -EXPORT_SYMBOL_GPL(__xa_clear_mark); +EXPORT_SYMBOL(__xa_clear_mark); /** * xa_get_mark() - Inquire whether this mark is set on this entry. @@ -1732,7 +1711,7 @@ EXPORT_SYMBOL(xa_get_mark); * @index: Index of entry. * @mark: Mark number. * - * Attempting to set a mark on a NULL entry does not succeed. + * Attempting to set a mark on a %NULL entry does not succeed. * * Context: Process context. Takes and releases the xa_lock. */ @@ -1829,6 +1808,8 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp, entry = xas_find_marked(&xas, max, filter); else entry = xas_find(&xas, max); + if (xas.xa_node == XAS_BOUNDS) + break; if (xas.xa_shift) { if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) continue; @@ -1899,7 +1880,7 @@ static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, * * The @filter may be an XArray mark value, in which case entries which are * marked with that mark will be copied. It may also be %XA_PRESENT, in - * which case all entries which are not NULL will be copied. + * which case all entries which are not %NULL will be copied. * * The entries returned may not represent a snapshot of the XArray at a * moment in time. For example, if another thread stores to index 5, then |