summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-08-15 14:13:29 -0400
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:46 -0400
commit0e9446c35a80931044b6d8d2d74a9cabd248539f (patch)
tree22064e303ba555570acaefb65734a53be36d4d44 /lib
parent4f06d6302da682157890f72c0573e12a73536814 (diff)
downloadlwn-0e9446c35a80931044b6d8d2d74a9cabd248539f.tar.gz
lwn-0e9446c35a80931044b6d8d2d74a9cabd248539f.zip
xarray: Add range store functionality
This version of xa_store_range() really only supports load and store. Our only user only needs basic load and store functionality, so there's no need to do the extra work to support marking and overlapping stores correctly yet. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/test_xarray.c34
-rw-r--r--lib/xarray.c97
2 files changed, 129 insertions, 2 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 0f06a93b4d0e..aa47754150ce 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1039,6 +1039,39 @@ static noinline void check_create_range(struct xarray *xa)
check_create_range_3();
}
+static noinline void __check_store_range(struct xarray *xa, unsigned long first,
+ unsigned long last)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL);
+
+ XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first));
+ XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first));
+ XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
+
+ xa_store_range(xa, first, last, NULL, GFP_KERNEL);
+#endif
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_store_range(struct xarray *xa)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < 128; i++) {
+ for (j = i; j < 128; j++) {
+ __check_store_range(xa, i, j);
+ __check_store_range(xa, 128 + i, 128 + j);
+ __check_store_range(xa, 4095 + i, 4095 + j);
+ __check_store_range(xa, 4096 + i, 4096 + j);
+ __check_store_range(xa, 123456 + i, 123456 + j);
+ __check_store_range(xa, UINT_MAX + i, UINT_MAX + j);
+ }
+ }
+}
+
static LIST_HEAD(shadow_nodes);
static void test_update_node(struct xa_node *node)
@@ -1184,6 +1217,7 @@ static int xarray_checks(void)
check_destroy(&array);
check_move(&array);
check_create_range(&array);
+ check_store_range(&array);
check_store_iter(&array);
check_workingset(&array, 0);
diff --git a/lib/xarray.c b/lib/xarray.c
index 9a0d49d4b5f0..8b176f009c08 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -376,6 +376,14 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
return node;
}
+#ifdef CONFIG_XARRAY_MULTI
+/* Returns the number of indices covered by a given xa_state */
+static unsigned long xas_size(const struct xa_state *xas)
+{
+ return (xas->xa_sibs + 1UL) << xas->xa_shift;
+}
+#endif
+
/*
* Use this to calculate the maximum index that will need to be created
* in order to add the entry described by @xas. Because we cannot store a
@@ -388,8 +396,7 @@ static unsigned long xas_max(struct xa_state *xas)
#ifdef CONFIG_XARRAY_MULTI
if (xas->xa_shift || xas->xa_sibs) {
- unsigned long mask;
- mask = (((xas->xa_sibs + 1UL) << xas->xa_shift) - 1);
+ unsigned long mask = xas_size(xas) - 1;
max |= mask;
if (mask == max)
max++;
@@ -1517,6 +1524,92 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
}
EXPORT_SYMBOL(xa_reserve);
+#ifdef CONFIG_XARRAY_MULTI
+static void xas_set_range(struct xa_state *xas, unsigned long first,
+ unsigned long last)
+{
+ unsigned int shift = 0;
+ unsigned long sibs = last - first;
+ unsigned int offset = XA_CHUNK_MASK;
+
+ xas_set(xas, first);
+
+ while ((first & XA_CHUNK_MASK) == 0) {
+ if (sibs < XA_CHUNK_MASK)
+ break;
+ if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
+ break;
+ shift += XA_CHUNK_SHIFT;
+ if (offset == XA_CHUNK_MASK)
+ offset = sibs & XA_CHUNK_MASK;
+ sibs >>= XA_CHUNK_SHIFT;
+ first >>= XA_CHUNK_SHIFT;
+ }
+
+ offset = first & XA_CHUNK_MASK;
+ if (offset + sibs > XA_CHUNK_MASK)
+ sibs = XA_CHUNK_MASK - offset;
+ if ((((first + sibs + 1) << shift) - 1) > last)
+ sibs -= 1;
+
+ xas->xa_shift = shift;
+ xas->xa_sibs = sibs;
+}
+
+/**
+ * xa_store_range() - Store this entry at a range of indices in the XArray.
+ * @xa: XArray.
+ * @first: First index to affect.
+ * @last: Last index to affect.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * After this function returns, loads from any index between @first and @last,
+ * inclusive will return @entry.
+ * Storing into an existing multislot entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
+ * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
+ */
+void *xa_store_range(struct xarray *xa, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, 0);
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+ if (last < first)
+ return XA_ERROR(-EINVAL);
+
+ do {
+ xas_lock(&xas);
+ if (entry) {
+ unsigned int order = (last == ~0UL) ? 64 :
+ ilog2(last + 1);
+ xas_set_order(&xas, last, order);
+ xas_create(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ }
+ do {
+ xas_set_range(&xas, first, last);
+ xas_store(&xas, entry);
+ if (xas_error(&xas))
+ goto unlock;
+ first += xas_size(&xas);
+ } while (first <= last);
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, NULL);
+}
+EXPORT_SYMBOL(xa_store_range);
+#endif /* CONFIG_XARRAY_MULTI */
+
/**
* __xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.