summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-10-30 11:09:38 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-10-30 11:09:38 -1000
commit9e87705289667a6c5185c619ea32f3d39314eb1b (patch)
treea5ac51396fa85c67dd37ec3f4d25429d1104a2bd /lib
parentd5acbc60fafbe0fc94c552ce916dd592cd4c6371 (diff)
parentb827ac419721a106ae2fccaa40576b0594edad92 (diff)
downloadlwn-9e87705289667a6c5185c619ea32f3d39314eb1b.tar.gz
lwn-9e87705289667a6c5185c619ea32f3d39314eb1b.zip
Merge tag 'bcachefs-2023-10-30' of https://evilpiepirate.org/git/bcachefs
Pull initial bcachefs updates from Kent Overstreet: "Here's the bcachefs filesystem pull request. One new patch since last week: the exportfs constants ended up conflicting with other filesystems that are also getting added to the global enum, so switched to new constants picked by Amir. The only new non fs/bcachefs/ patch is the objtool patch that adds bcachefs functions to the list of noreturns. The patch that exports osq_lock() has been dropped for now, per Ingo" * tag 'bcachefs-2023-10-30' of https://evilpiepirate.org/git/bcachefs: (2781 commits) exportfs: Change bcachefs fid_type enum to avoid conflicts bcachefs: Refactor memcpy into direct assignment bcachefs: Fix drop_alloc_keys() bcachefs: snapshot_create_lock bcachefs: Fix snapshot skiplists during snapshot deletion bcachefs: bch2_sb_field_get() refactoring bcachefs: KEY_TYPE_error now counts towards i_sectors bcachefs: Fix handling of unknown bkey types bcachefs: Switch to unsafe_memcpy() in a few places bcachefs: Use struct_size() bcachefs: Correctly initialize new buckets on device resize bcachefs: Fix another smatch complaint bcachefs: Use strsep() in split_devs() bcachefs: Add iops fields to bch_member bcachefs: Rename bch_sb_field_members -> bch_sb_field_members_v1 bcachefs: New superblock section members_v2 bcachefs: Add new helper to retrieve bch_member from sb bcachefs: bucket_lock() is now a sleepable lock bcachefs: fix crc32c checksum merge byte order problem bcachefs: Fix bch2_inode_delete_keys() ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/Makefile2
-rw-r--r--lib/closure.c205
-rw-r--r--lib/errname.c1
-rw-r--r--lib/generic-radix-tree.c76
-rw-r--r--lib/string_helpers.c10
7 files changed, 299 insertions, 7 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 76fe64f933fc..2d90935d5a21 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -506,6 +506,9 @@ config ASSOCIATIVE_ARRAY
for more information.
+config CLOSURES
+ bool
+
config HAS_IOMEM
bool
depends on !NO_IOMEM
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index fa307f93fa2e..ce3a4abf40f8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1720,6 +1720,15 @@ config DEBUG_NOTIFIERS
This is a relatively cheap check but if you care about maximum
performance, say N.
+config DEBUG_CLOSURES
+ bool "Debug closures (bcache async widgits)"
+ depends on CLOSURES
+ select DEBUG_FS
+ help
+ Keeps all active closures in a linked list and provides a debugfs
+ interface to list them, which makes it possible to see asynchronous
+ operations that get stuck.
+
config DEBUG_MAPLE_TREE
bool "Debug maple trees"
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index d0c116b706e6..1b311c7fd32b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -255,6 +255,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+obj-$(CONFIG_CLOSURES) += closure.o
+
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
diff --git a/lib/closure.c b/lib/closure.c
new file mode 100644
index 000000000000..0855e698ced1
--- /dev/null
+++ b/lib/closure.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Asynchronous refcounty things
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/closure.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ int r = flags & CLOSURE_REMAINING_MASK;
+
+ BUG_ON(flags & CLOSURE_GUARD_MASK);
+ BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
+
+ if (!r) {
+ if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
+ atomic_set(&cl->remaining,
+ CLOSURE_REMAINING_INITIALIZER);
+ closure_queue(cl);
+ } else {
+ struct closure *parent = cl->parent;
+ closure_fn *destructor = cl->fn;
+
+ closure_debug_destroy(cl);
+
+ if (destructor)
+ destructor(cl);
+
+ if (parent)
+ closure_put(parent);
+ }
+ }
+}
+
+/* For clearing flags with the same atomic op as a put */
+void closure_sub(struct closure *cl, int v)
+{
+ closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
+}
+EXPORT_SYMBOL(closure_sub);
+
+/*
+ * closure_put - decrement a closure's refcount
+ */
+void closure_put(struct closure *cl)
+{
+ closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
+}
+EXPORT_SYMBOL(closure_put);
+
+/*
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
+void __closure_wake_up(struct closure_waitlist *wait_list)
+{
+ struct llist_node *list;
+ struct closure *cl, *t;
+ struct llist_node *reverse = NULL;
+
+ list = llist_del_all(&wait_list->list);
+
+ /* We first reverse the list to preserve FIFO ordering and fairness */
+ reverse = llist_reverse_order(list);
+
+ /* Then do the wakeups */
+ llist_for_each_entry_safe(cl, t, reverse, list) {
+ closure_set_waiting(cl, 0);
+ closure_sub(cl, CLOSURE_WAITING + 1);
+ }
+}
+EXPORT_SYMBOL(__closure_wake_up);
+
+/**
+ * closure_wait - add a closure to a waitlist
+ * @waitlist: will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ * @cl: closure pointer.
+ *
+ */
+bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
+{
+ if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
+ return false;
+
+ closure_set_waiting(cl, _RET_IP_);
+ atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
+ llist_add(&cl->list, &waitlist->list);
+
+ return true;
+}
+EXPORT_SYMBOL(closure_wait);
+
+struct closure_syncer {
+ struct task_struct *task;
+ int done;
+};
+
+static void closure_sync_fn(struct closure *cl)
+{
+ struct closure_syncer *s = cl->s;
+ struct task_struct *p;
+
+ rcu_read_lock();
+ p = READ_ONCE(s->task);
+ s->done = 1;
+ wake_up_process(p);
+ rcu_read_unlock();
+}
+
+void __sched __closure_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+}
+EXPORT_SYMBOL(__closure_sync);
+
+#ifdef CONFIG_DEBUG_CLOSURES
+
+static LIST_HEAD(closure_list);
+static DEFINE_SPINLOCK(closure_list_lock);
+
+void closure_debug_create(struct closure *cl)
+{
+ unsigned long flags;
+
+ BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
+ cl->magic = CLOSURE_MAGIC_ALIVE;
+
+ spin_lock_irqsave(&closure_list_lock, flags);
+ list_add(&cl->all, &closure_list);
+ spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+EXPORT_SYMBOL(closure_debug_create);
+
+void closure_debug_destroy(struct closure *cl)
+{
+ unsigned long flags;
+
+ BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
+ cl->magic = CLOSURE_MAGIC_DEAD;
+
+ spin_lock_irqsave(&closure_list_lock, flags);
+ list_del(&cl->all);
+ spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+EXPORT_SYMBOL(closure_debug_destroy);
+
+static int debug_show(struct seq_file *f, void *data)
+{
+ struct closure *cl;
+
+ spin_lock_irq(&closure_list_lock);
+
+ list_for_each_entry(cl, &closure_list, all) {
+ int r = atomic_read(&cl->remaining);
+
+ seq_printf(f, "%p: %pS -> %pS p %p r %i ",
+ cl, (void *) cl->ip, cl->fn, cl->parent,
+ r & CLOSURE_REMAINING_MASK);
+
+ seq_printf(f, "%s%s\n",
+ test_bit(WORK_STRUCT_PENDING_BIT,
+ work_data_bits(&cl->work)) ? "Q" : "",
+ r & CLOSURE_RUNNING ? "R" : "");
+
+ if (r & CLOSURE_WAITING)
+ seq_printf(f, " W %pS\n",
+ (void *) cl->waiting_on);
+
+ seq_puts(f, "\n");
+ }
+
+ spin_unlock_irq(&closure_list_lock);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(debug);
+
+static int __init closure_debug_init(void)
+{
+ debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops);
+ return 0;
+}
+late_initcall(closure_debug_init)
+
+#endif
diff --git a/lib/errname.c b/lib/errname.c
index 67739b174a8c..dd1b998552cd 100644
--- a/lib/errname.c
+++ b/lib/errname.c
@@ -228,3 +228,4 @@ const char *errname(int err)
return err > 0 ? name + 1 : name;
}
+EXPORT_SYMBOL(errname);
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index f25eb111c051..41f1bcdc4488 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -1,4 +1,5 @@
+#include <linux/atomic.h>
#include <linux/export.h>
#include <linux/generic-radix-tree.h>
#include <linux/gfp.h>
@@ -166,6 +167,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
struct genradix_root *r;
struct genradix_node *n;
unsigned level, i;
+
+ if (iter->offset == SIZE_MAX)
+ return NULL;
+
restart:
r = READ_ONCE(radix->root);
if (!r)
@@ -184,10 +189,17 @@ restart:
(GENRADIX_ARY - 1);
while (!n->children[i]) {
+ size_t objs_per_ptr = genradix_depth_size(level);
+
+ if (iter->offset + objs_per_ptr < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return NULL;
+ }
+
i++;
- iter->offset = round_down(iter->offset +
- genradix_depth_size(level),
- genradix_depth_size(level));
+ iter->offset = round_down(iter->offset + objs_per_ptr,
+ objs_per_ptr);
iter->pos = (iter->offset >> PAGE_SHIFT) *
objs_per_page;
if (i == GENRADIX_ARY)
@@ -201,6 +213,64 @@ restart:
}
EXPORT_SYMBOL(__genradix_iter_peek);
+void *__genradix_iter_peek_prev(struct genradix_iter *iter,
+ struct __genradix *radix,
+ size_t objs_per_page,
+ size_t obj_size_plus_page_remainder)
+{
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
+
+ if (iter->offset == SIZE_MAX)
+ return NULL;
+
+restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+ return NULL;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
+ iter->offset = genradix_depth_size(level);
+ iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+
+ iter->offset -= obj_size_plus_page_remainder;
+ iter->pos--;
+ }
+
+ while (level) {
+ level--;
+
+ i = (iter->offset >> genradix_depth_shift(level)) &
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
+ size_t objs_per_ptr = genradix_depth_size(level);
+
+ iter->offset = round_down(iter->offset, objs_per_ptr);
+ iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+
+ if (!iter->offset)
+ return NULL;
+
+ iter->offset -= obj_size_plus_page_remainder;
+ iter->pos--;
+
+ if (!i)
+ goto restart;
+ --i;
+ }
+
+ n = n->children[i];
+ }
+
+ return &n->data[iter->offset & (PAGE_SIZE - 1)];
+}
+EXPORT_SYMBOL(__genradix_iter_peek_prev);
+
static void genradix_free_recurse(struct genradix_node *n, unsigned level)
{
if (level) {
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 9982344cca34..7713f73e66b0 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -31,9 +31,11 @@
* giving the size in the required units. @buf should have room for
* at least 9 bytes and will always be zero terminated.
*
+ * Return value: number of characters of output that would have been written
+ * (which may be greater than len, if output was truncated).
*/
-void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
- char *buf, int len)
+int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ char *buf, int len)
{
static const char *const units_10[] = {
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
@@ -126,8 +128,8 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
else
unit = units_str[units][i];
- snprintf(buf, len, "%u%s %s", (u32)size,
- tmp, unit);
+ return snprintf(buf, len, "%u%s %s", (u32)size,
+ tmp, unit);
}
EXPORT_SYMBOL(string_get_size);