diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-07 17:04:10 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-07 17:04:10 -0700 |
commit | e8bc52cb8df80c31c73c726ab58ea9746e9ff734 (patch) | |
tree | db3b83d22ba1bb4003e5736648782d838a3d6f5a /fs | |
parent | d3dcbe24a0fc6373ce7e4a65acd5c785aa8e2396 (diff) | |
parent | fda8c908bc2d523c0770ded667dcdad29c06ff52 (diff) | |
download | lwn-e8bc52cb8df80c31c73c726ab58ea9746e9ff734.tar.gz lwn-e8bc52cb8df80c31c73c726ab58ea9746e9ff734.zip |
Merge tag 'driver-core-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
Pull driver core updates from Greg KH:
"Here is the big set of driver core and debug printk changes for
6.1-rc1. Included in here is:
- dynamic debug updates for the core and the drm subsystem. The drm
changes have all been acked by the relevant maintainers
- kernfs fixes for syzbot reported problems
- kernfs refactors and updates for cgroup requirements
- magic number cleanups and removals from the kernel tree (they were
not being used and they really did not actually do anything)
- other tiny cleanups
All of these have been in linux-next for a while with no reported
issues"
* tag 'driver-core-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (74 commits)
docs: filesystems: sysfs: Make text and code for ->show() consistent
Documentation: NBD_REQUEST_MAGIC isn't a magic number
a.out: restore CMAGIC
device property: Add const qualifier to device_get_match_data() parameter
drm_print: add _ddebug descriptor to drm_*dbg prototypes
drm_print: prefer bare printk KERN_DEBUG on generic fn
drm_print: optimize drm_debug_enabled for jump-label
drm-print: add drm_dbg_driver to improve namespace symmetry
drm-print.h: include dyndbg header
drm_print: wrap drm_*_dbg in dyndbg descriptor factory macro
drm_print: interpose drm_*dbg with forwarding macros
drm: POC drm on dyndbg - use in core, 2 helpers, 3 drivers.
drm_print: condense enum drm_debug_category
debugfs: use DEFINE_SHOW_ATTRIBUTE to define debugfs_regset32_fops
driver core: use IS_ERR_OR_NULL() helper in device_create_groups_vargs()
Documentation: ENI155_MAGIC isn't a magic number
Documentation: NBD_REPLY_MAGIC isn't a magic number
nbd: remove define-only NBD_MAGIC, previously magic number
Documentation: FW_HEADER_MAGIC isn't a magic number
Documentation: EEPROM_MAGIC_VALUE isn't a magic number
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/debugfs/file.c | 16 | ||||
-rw-r--r-- | fs/debugfs/inode.c | 37 | ||||
-rw-r--r-- | fs/kernfs/dir.c | 107 | ||||
-rw-r--r-- | fs/kernfs/file.c | 151 | ||||
-rw-r--r-- | fs/kernfs/kernfs-internal.h | 1 |
5 files changed, 179 insertions, 133 deletions
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 950c63fa4d0b..ddb3fc258df9 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -1121,7 +1121,7 @@ void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, } EXPORT_SYMBOL_GPL(debugfs_print_regs32); -static int debugfs_show_regset32(struct seq_file *s, void *data) +static int debugfs_regset32_show(struct seq_file *s, void *data) { struct debugfs_regset32 *regset = s->private; @@ -1136,17 +1136,7 @@ static int debugfs_show_regset32(struct seq_file *s, void *data) return 0; } -static int debugfs_open_regset32(struct inode *inode, struct file *file) -{ - return single_open(file, debugfs_show_regset32, inode->i_private); -} - -static const struct file_operations fops_regset32 = { - .open = debugfs_open_regset32, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(debugfs_regset32); /** * debugfs_create_regset32 - create a debugfs file that returns register values @@ -1167,7 +1157,7 @@ void debugfs_create_regset32(const char *name, umode_t mode, struct dentry *parent, struct debugfs_regset32 *regset) { - debugfs_create_file(name, mode, parent, regset, &fops_regset32); + debugfs_create_file(name, mode, parent, regset, &debugfs_regset32_fops); } EXPORT_SYMBOL_GPL(debugfs_create_regset32); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 232cfdf095ae..2e8e112b1993 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -82,6 +82,8 @@ struct debugfs_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; + /* Opt_* bitfield. */ + unsigned int opts; }; enum { @@ -111,6 +113,7 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts) kgid_t gid; char *p; + opts->opts = 0; opts->mode = DEBUGFS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { @@ -145,24 +148,44 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts) * but traditionally debugfs has ignored all mount options */ } + + opts->opts |= BIT(token); } return 0; } -static int debugfs_apply_options(struct super_block *sb) +static void _debugfs_apply_options(struct super_block *sb, bool remount) { struct debugfs_fs_info *fsi = sb->s_fs_info; struct inode *inode = d_inode(sb->s_root); struct debugfs_mount_opts *opts = &fsi->mount_opts; - inode->i_mode &= ~S_IALLUGO; - inode->i_mode |= opts->mode; + /* + * On remount, only reset mode/uid/gid if they were provided as mount + * options. + */ - inode->i_uid = opts->uid; - inode->i_gid = opts->gid; + if (!remount || opts->opts & BIT(Opt_mode)) { + inode->i_mode &= ~S_IALLUGO; + inode->i_mode |= opts->mode; + } - return 0; + if (!remount || opts->opts & BIT(Opt_uid)) + inode->i_uid = opts->uid; + + if (!remount || opts->opts & BIT(Opt_gid)) + inode->i_gid = opts->gid; +} + +static void debugfs_apply_options(struct super_block *sb) +{ + _debugfs_apply_options(sb, false); +} + +static void debugfs_apply_options_remount(struct super_block *sb) +{ + _debugfs_apply_options(sb, true); } static int debugfs_remount(struct super_block *sb, int *flags, char *data) @@ -175,7 +198,7 @@ static int debugfs_remount(struct super_block *sb, int *flags, char *data) if (err) goto fail; - debugfs_apply_options(sb); + debugfs_apply_options_remount(sb); fail: return err; diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 1cc88ba6de90..3990f3e270cb 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -472,6 +472,16 @@ static void kernfs_drain(struct kernfs_node *kn) lockdep_assert_held_write(&root->kernfs_rwsem); WARN_ON_ONCE(kernfs_active(kn)); + /* + * Skip draining if already fully drained. This avoids draining and its + * lockdep annotations for nodes which have never been activated + * allowing embedding kernfs_remove() in create error paths without + * worrying about draining. + */ + if (atomic_read(&kn->active) == KN_DEACTIVATED_BIAS && + !kernfs_should_drain_open_files(kn)) + return; + up_write(&root->kernfs_rwsem); if (kernfs_lockdep(kn)) { @@ -480,7 +490,6 @@ static void kernfs_drain(struct kernfs_node *kn) lock_contended(&kn->dep_map, _RET_IP_); } - /* but everyone should wait for draining */ wait_event(root->deactivate_waitq, atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); @@ -489,7 +498,8 @@ static void kernfs_drain(struct kernfs_node *kn) rwsem_release(&kn->dep_map, _RET_IP_); } - kernfs_drain_open_files(kn); + if (kernfs_should_drain_open_files(kn)) + kernfs_drain_open_files(kn); down_write(&root->kernfs_rwsem); } @@ -695,13 +705,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, goto err_unlock; } - /* - * ACTIVATED is protected with kernfs_mutex but it was clear when - * @kn was added to idr and we just wanna see it set. No need to - * grab kernfs_mutex. - */ - if (unlikely(!(kn->flags & KERNFS_ACTIVATED) || - !atomic_inc_not_zero(&kn->count))) + if (unlikely(!kernfs_active(kn) || !atomic_inc_not_zero(&kn->count))) goto err_unlock; spin_unlock(&kernfs_idr_lock); @@ -743,10 +747,7 @@ int kernfs_add_one(struct kernfs_node *kn) goto out_unlock; ret = -ENOENT; - if (parent->flags & KERNFS_EMPTY_DIR) - goto out_unlock; - - if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent)) + if (parent->flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR)) goto out_unlock; kn->hash = kernfs_name_hash(kn->name, kn->ns); @@ -1304,6 +1305,21 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, return pos->parent; } +static void kernfs_activate_one(struct kernfs_node *kn) +{ + lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem); + + kn->flags |= KERNFS_ACTIVATED; + + if (kernfs_active(kn) || (kn->flags & (KERNFS_HIDDEN | KERNFS_REMOVING))) + return; + + WARN_ON_ONCE(kn->parent && RB_EMPTY_NODE(&kn->rb)); + WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); + + atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); +} + /** * kernfs_activate - activate a node which started deactivated * @kn: kernfs_node whose subtree is to be activated @@ -1325,15 +1341,42 @@ void kernfs_activate(struct kernfs_node *kn) down_write(&root->kernfs_rwsem); pos = NULL; - while ((pos = kernfs_next_descendant_post(pos, kn))) { - if (pos->flags & KERNFS_ACTIVATED) - continue; + while ((pos = kernfs_next_descendant_post(pos, kn))) + kernfs_activate_one(pos); - WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); - WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS); + up_write(&root->kernfs_rwsem); +} - atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); - pos->flags |= KERNFS_ACTIVATED; +/** + * kernfs_show - show or hide a node + * @kn: kernfs_node to show or hide + * @show: whether to show or hide + * + * If @show is %false, @kn is marked hidden and deactivated. A hidden node is + * ignored in future activaitons. If %true, the mark is removed and activation + * state is restored. This function won't implicitly activate a new node in a + * %KERNFS_ROOT_CREATE_DEACTIVATED root which hasn't been activated yet. + * + * To avoid recursion complexities, directories aren't supported for now. + */ +void kernfs_show(struct kernfs_node *kn, bool show) +{ + struct kernfs_root *root = kernfs_root(kn); + + if (WARN_ON_ONCE(kernfs_type(kn) == KERNFS_DIR)) + return; + + down_write(&root->kernfs_rwsem); + + if (show) { + kn->flags &= ~KERNFS_HIDDEN; + if (kn->flags & KERNFS_ACTIVATED) + kernfs_activate_one(kn); + } else { + kn->flags |= KERNFS_HIDDEN; + if (kernfs_active(kn)) + atomic_add(KN_DEACTIVATED_BIAS, &kn->active); + kernfs_drain(kn); } up_write(&root->kernfs_rwsem); @@ -1358,34 +1401,27 @@ static void __kernfs_remove(struct kernfs_node *kn) pr_debug("kernfs %s: removing\n", kn->name); - /* prevent any new usage under @kn by deactivating all nodes */ + /* prevent new usage by marking all nodes removing and deactivating */ pos = NULL; - while ((pos = kernfs_next_descendant_post(pos, kn))) + while ((pos = kernfs_next_descendant_post(pos, kn))) { + pos->flags |= KERNFS_REMOVING; if (kernfs_active(pos)) atomic_add(KN_DEACTIVATED_BIAS, &pos->active); + } /* deactivate and unlink the subtree node-by-node */ do { pos = kernfs_leftmost_descendant(kn); /* - * kernfs_drain() drops kernfs_rwsem temporarily and @pos's + * kernfs_drain() may drop kernfs_rwsem temporarily and @pos's * base ref could have been put by someone else by the time * the function returns. Make sure it doesn't go away * underneath us. */ kernfs_get(pos); - /* - * Drain iff @kn was activated. This avoids draining and - * its lockdep annotations for nodes which have never been - * activated and allows embedding kernfs_remove() in create - * error paths without worrying about draining. - */ - if (kn->flags & KERNFS_ACTIVATED) - kernfs_drain(pos); - else - WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); + kernfs_drain(pos); /* * kernfs_unlink_sibling() succeeds once per node. Use it @@ -1585,8 +1621,11 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, down_write(&root->kernfs_rwsem); kn = kernfs_find_ns(parent, name, ns); - if (kn) + if (kn) { + kernfs_get(kn); __kernfs_remove(kn); + kernfs_put(kn); + } up_write(&root->kernfs_rwsem); diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index b3ec34386b43..9ab6c92e02da 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -23,6 +23,8 @@ struct kernfs_open_node { atomic_t event; wait_queue_head_t poll; struct list_head files; /* goes through kernfs_open_file.list */ + unsigned int nr_mmapped; + unsigned int nr_to_release; }; /* @@ -57,31 +59,17 @@ static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn) } /** - * kernfs_deref_open_node - Get kernfs_open_node corresponding to @kn. - * - * @of: associated kernfs_open_file instance. - * @kn: target kernfs_node. - * - * Fetch and return ->attr.open of @kn if @of->list is non empty. - * If @of->list is not empty we can safely assume that @of is on - * @kn->attr.open->files list and this guarantees that @kn->attr.open - * will not vanish i.e. dereferencing outside RCU read-side critical - * section is safe here. - * - * The caller needs to make sure that @of->list is not empty. + * of_on - Return the kernfs_open_node of the specified kernfs_open_file + * @of: taret kernfs_open_file */ -static struct kernfs_open_node * -kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn) +static struct kernfs_open_node *of_on(struct kernfs_open_file *of) { - struct kernfs_open_node *on; - - on = rcu_dereference_check(kn->attr.open, !list_empty(&of->list)); - - return on; + return rcu_dereference_protected(of->kn->attr.open, + !list_empty(&of->list)); } /** - * kernfs_deref_open_node_protected - Get kernfs_open_node corresponding to @kn + * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn * * @kn: target kernfs_node. * @@ -96,7 +84,7 @@ kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn) * The caller needs to make sure that kernfs_open_file_mutex is held. */ static struct kernfs_open_node * -kernfs_deref_open_node_protected(struct kernfs_node *kn) +kernfs_deref_open_node_locked(struct kernfs_node *kn) { return rcu_dereference_protected(kn->attr.open, lockdep_is_held(kernfs_open_file_mutex_ptr(kn))); @@ -207,12 +195,8 @@ static void kernfs_seq_stop(struct seq_file *sf, void *v) static int kernfs_seq_show(struct seq_file *sf, void *v) { struct kernfs_open_file *of = sf->private; - struct kernfs_open_node *on = kernfs_deref_open_node(of, of->kn); - - if (!on) - return -EINVAL; - of->event = atomic_read(&on->event); + of->event = atomic_read(&of_on(of)->event); return of->kn->attr.ops->seq_show(sf, v); } @@ -235,7 +219,6 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) struct kernfs_open_file *of = kernfs_of(iocb->ki_filp); ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE); const struct kernfs_ops *ops; - struct kernfs_open_node *on; char *buf; buf = of->prealloc_buf; @@ -257,14 +240,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) goto out_free; } - on = kernfs_deref_open_node(of, of->kn); - if (!on) { - len = -EINVAL; - mutex_unlock(&of->mutex); - goto out_free; - } - - of->event = atomic_read(&on->event); + of->event = atomic_read(&of_on(of)->event); ops = kernfs_ops(of->kn); if (ops->read) @@ -553,6 +529,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) rc = 0; of->mmapped = true; + of_on(of)->nr_mmapped++; of->vm_ops = vma->vm_ops; vma->vm_ops = &kernfs_vm_ops; out_put: @@ -580,31 +557,30 @@ out_unlock: static int kernfs_get_open_node(struct kernfs_node *kn, struct kernfs_open_file *of) { - struct kernfs_open_node *on, *new_on = NULL; - struct mutex *mutex = NULL; + struct kernfs_open_node *on; + struct mutex *mutex; mutex = kernfs_open_file_mutex_lock(kn); - on = kernfs_deref_open_node_protected(kn); + on = kernfs_deref_open_node_locked(kn); - if (on) { - list_add_tail(&of->list, &on->files); - mutex_unlock(mutex); - return 0; - } else { + if (!on) { /* not there, initialize a new one */ - new_on = kmalloc(sizeof(*new_on), GFP_KERNEL); - if (!new_on) { + on = kzalloc(sizeof(*on), GFP_KERNEL); + if (!on) { mutex_unlock(mutex); return -ENOMEM; } - atomic_set(&new_on->event, 1); - init_waitqueue_head(&new_on->poll); - INIT_LIST_HEAD(&new_on->files); - list_add_tail(&of->list, &new_on->files); - rcu_assign_pointer(kn->attr.open, new_on); + atomic_set(&on->event, 1); + init_waitqueue_head(&on->poll); + INIT_LIST_HEAD(&on->files); + rcu_assign_pointer(kn->attr.open, on); } - mutex_unlock(mutex); + list_add_tail(&of->list, &on->files); + if (kn->flags & KERNFS_HAS_RELEASE) + on->nr_to_release++; + + mutex_unlock(mutex); return 0; } @@ -613,6 +589,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn, * * @kn: target kernfs_node * @of: associated kernfs_open_file + * @open_failed: ->open() failed, cancel ->release() * * Unlink @of from list of @kn's associated open files. If list of * associated open files becomes empty, disassociate and free @@ -622,21 +599,30 @@ static int kernfs_get_open_node(struct kernfs_node *kn, * None. */ static void kernfs_unlink_open_file(struct kernfs_node *kn, - struct kernfs_open_file *of) + struct kernfs_open_file *of, + bool open_failed) { struct kernfs_open_node *on; - struct mutex *mutex = NULL; + struct mutex *mutex; mutex = kernfs_open_file_mutex_lock(kn); - on = kernfs_deref_open_node_protected(kn); + on = kernfs_deref_open_node_locked(kn); if (!on) { mutex_unlock(mutex); return; } - if (of) + if (of) { + if (kn->flags & KERNFS_HAS_RELEASE) { + WARN_ON_ONCE(of->released == open_failed); + if (open_failed) + on->nr_to_release--; + } + if (of->mmapped) + on->nr_mmapped--; list_del(&of->list); + } if (list_empty(&on->files)) { rcu_assign_pointer(kn->attr.open, NULL); @@ -763,7 +749,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) return 0; err_put_node: - kernfs_unlink_open_file(kn, of); + kernfs_unlink_open_file(kn, of, true); err_seq_release: seq_release(inode, file); err_free: @@ -795,6 +781,7 @@ static void kernfs_release_file(struct kernfs_node *kn, */ kn->attr.ops->release(of); of->released = true; + of_on(of)->nr_to_release--; } } @@ -802,15 +789,16 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) { struct kernfs_node *kn = inode->i_private; struct kernfs_open_file *of = kernfs_of(filp); - struct mutex *mutex = NULL; if (kn->flags & KERNFS_HAS_RELEASE) { + struct mutex *mutex; + mutex = kernfs_open_file_mutex_lock(kn); kernfs_release_file(kn, of); mutex_unlock(mutex); } - kernfs_unlink_open_file(kn, of); + kernfs_unlink_open_file(kn, of, false); seq_release(inode, filp); kfree(of->prealloc_buf); kfree(of); @@ -818,28 +806,33 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) return 0; } -void kernfs_drain_open_files(struct kernfs_node *kn) +bool kernfs_should_drain_open_files(struct kernfs_node *kn) { struct kernfs_open_node *on; - struct kernfs_open_file *of; - struct mutex *mutex = NULL; - - if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE))) - return; + bool ret; /* - * lockless opportunistic check is safe below because no one is adding to - * ->attr.open at this point of time. This check allows early bail out - * if ->attr.open is already NULL. kernfs_unlink_open_file makes - * ->attr.open NULL only while holding kernfs_open_file_mutex so below - * check under kernfs_open_file_mutex_ptr(kn) will ensure bailing out if - * ->attr.open became NULL while waiting for the mutex. + * @kn being deactivated guarantees that @kn->attr.open can't change + * beneath us making the lockless test below safe. */ - if (!rcu_access_pointer(kn->attr.open)) - return; + WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); + + rcu_read_lock(); + on = rcu_dereference(kn->attr.open); + ret = on && (on->nr_mmapped || on->nr_to_release); + rcu_read_unlock(); + + return ret; +} + +void kernfs_drain_open_files(struct kernfs_node *kn) +{ + struct kernfs_open_node *on; + struct kernfs_open_file *of; + struct mutex *mutex; mutex = kernfs_open_file_mutex_lock(kn); - on = kernfs_deref_open_node_protected(kn); + on = kernfs_deref_open_node_locked(kn); if (!on) { mutex_unlock(mutex); return; @@ -848,13 +841,17 @@ void kernfs_drain_open_files(struct kernfs_node *kn) list_for_each_entry(of, &on->files, list) { struct inode *inode = file_inode(of->file); - if (kn->flags & KERNFS_HAS_MMAP) + if (of->mmapped) { unmap_mapping_range(inode->i_mapping, 0, 0, 1); + of->mmapped = false; + on->nr_mmapped--; + } if (kn->flags & KERNFS_HAS_RELEASE) kernfs_release_file(kn, of); } + WARN_ON_ONCE(on->nr_mmapped || on->nr_to_release); mutex_unlock(mutex); } @@ -874,11 +871,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn) */ __poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait) { - struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry); - struct kernfs_open_node *on = kernfs_deref_open_node(of, kn); - - if (!on) - return EPOLLERR; + struct kernfs_open_node *on = of_on(of); poll_wait(of->file, &on->poll, wait); diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 3ae214d02d44..fc5821effd97 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -157,6 +157,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, */ extern const struct file_operations kernfs_file_fops; +bool kernfs_should_drain_open_files(struct kernfs_node *kn); void kernfs_drain_open_files(struct kernfs_node *kn); /* |