diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-06 20:15:38 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-16 11:44:48 -0300 |
commit | 56c57103db17db9ecdad0507a3f0e3eea747fabe (patch) | |
tree | 7615cdd6e0936d1739b42983f3ec19b81fac1ca0 /mm/mmu_notifier.c | |
parent | 9c240a7bb33787d4fed56508425ae0b2936b1674 (diff) | |
download | lwn-56c57103db17db9ecdad0507a3f0e3eea747fabe.tar.gz lwn-56c57103db17db9ecdad0507a3f0e3eea747fabe.zip |
mm/mmu_notifiers: hoist do_mmu_notifier_register down_write to the caller
This simplifies the code to not have so many one line functions and extra
logic. __mmu_notifier_register() simply becomes the entry point to
register the notifier, and the other one calls it under lock.
Also add a lockdep_assert to check that the callers are holding the lock
as expected.
Link: https://lore.kernel.org/r/20190806231548.25242-2-jgg@ziepe.ca
Suggested-by: Christoph Hellwig <hch@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Tested-by: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/mmu_notifier.c')
-rw-r--r-- | mm/mmu_notifier.c | 35 |
1 files changed, 14 insertions, 21 deletions
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index b5670620aea0..218a6f108bc2 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -236,22 +236,22 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm, } EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range); -static int do_mmu_notifier_register(struct mmu_notifier *mn, - struct mm_struct *mm, - int take_mmap_sem) +/* + * Same as mmu_notifier_register but here the caller must hold the + * mmap_sem in write mode. + */ +int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) { struct mmu_notifier_mm *mmu_notifier_mm; int ret; + lockdep_assert_held_write(&mm->mmap_sem); BUG_ON(atomic_read(&mm->mm_users) <= 0); - ret = -ENOMEM; mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); if (unlikely(!mmu_notifier_mm)) - goto out; + return -ENOMEM; - if (take_mmap_sem) - down_write(&mm->mmap_sem); ret = mm_take_all_locks(mm); if (unlikely(ret)) goto out_clean; @@ -279,13 +279,11 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, mm_drop_all_locks(mm); out_clean: - if (take_mmap_sem) - up_write(&mm->mmap_sem); kfree(mmu_notifier_mm); -out: BUG_ON(atomic_read(&mm->mm_users) <= 0); return ret; } +EXPORT_SYMBOL_GPL(__mmu_notifier_register); /* * Must not hold mmap_sem nor any other VM related lock when calling @@ -302,19 +300,14 @@ out: */ int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) { - return do_mmu_notifier_register(mn, mm, 1); -} -EXPORT_SYMBOL_GPL(mmu_notifier_register); + int ret; -/* - * Same as mmu_notifier_register but here the caller must hold the - * mmap_sem in write mode. - */ -int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) -{ - return do_mmu_notifier_register(mn, mm, 0); + down_write(&mm->mmap_sem); + ret = __mmu_notifier_register(mn, mm); + up_write(&mm->mmap_sem); + return ret; } -EXPORT_SYMBOL_GPL(__mmu_notifier_register); +EXPORT_SYMBOL_GPL(mmu_notifier_register); /* this is called after the last mmu_notifier_unregister() returned */ void __mmu_notifier_mm_destroy(struct mm_struct *mm) |