summaryrefslogtreecommitdiff
path: root/mm/userfaultfd.c
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2017-02-22 15:43:16 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 16:41:28 -0800
commit21205bf8f77b23484966cb057ceaec860cc400b3 (patch)
treef7d5a23cf21e316e370ebe1a7ed704b8c9b078da /mm/userfaultfd.c
parent87ffc118b54dcd4cc642723603d944673248152f (diff)
downloadlwn-21205bf8f77b23484966cb057ceaec860cc400b3.tar.gz
lwn-21205bf8f77b23484966cb057ceaec860cc400b3.zip
userfaultfd: hugetlbfs: reserve count on error in __mcopy_atomic_hugetlb
If __mcopy_atomic_hugetlb exits with an error, put_page will be called if a huge page was allocated and needs to be freed. If a reservation was associated with the huge page, the PagePrivate flag will be set. Clear PagePrivate before calling put_page/free_huge_page so that the global reservation count is not incremented. Link: http://lkml.kernel.org/r/20161216144821.5183-26-aarcange@redhat.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Michael Rapoport <RAPOPORT@il.ibm.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Pavel Emelyanov <xemul@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/userfaultfd.c')
-rw-r--r--mm/userfaultfd.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 09976745be23..31207b47ea92 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -301,8 +301,23 @@ retry:
out_unlock:
up_read(&dst_mm->mmap_sem);
out:
- if (page)
+ if (page) {
+ /*
+ * We encountered an error and are about to free a newly
+ * allocated huge page. It is possible that there was a
+ * reservation associated with the page that has been
+ * consumed. See the routine restore_reserve_on_error
+ * for details. Unfortunately, we can not call
+ * restore_reserve_on_error now as it would require holding
+ * mmap_sem. Clear the PagePrivate flag so that the global
+ * reserve count will not be incremented in free_huge_page.
+ * The reservation map will still indicate the reservation
+ * was consumed and possibly prevent later page allocation.
+ * This is better than leaking a global reservation.
+ */
+ ClearPagePrivate(page);
put_page(page);
+ }
BUG_ON(copied < 0);
BUG_ON(err > 0);
BUG_ON(!copied && !err);