diff options
author | Adam Litke <agl@us.ibm.com> | 2005-10-29 18:16:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-29 21:40:43 -0700 |
commit | 2e9b367c2273ed21c9852a04d90944d472c4f3e6 (patch) | |
tree | 75e802f07a8c4f0554547e8dd795f544c7e9d7e8 /fs | |
parent | 4c887265977213985091476be40ab11dfdcb4caf (diff) | |
download | lwn-2e9b367c2273ed21c9852a04d90944d472c4f3e6.tar.gz lwn-2e9b367c2273ed21c9852a04d90944d472c4f3e6.zip |
[PATCH] hugetlb: overcommit accounting check
Basic overcommit checking for hugetlb_file_map() based on an implementation
used with demand faulting in SLES9.
Since demand faulting can't guarantee the availability of pages at mmap
time, this patch implements a basic sanity check to ensure that the number
of huge pages required to satisfy the mmap are currently available.
Despite the obvious race, I think it is a good start on doing proper
accounting. I'd like to work towards an accounting system that mimics the
semantics of normal pages (especially for the MAP_PRIVATE/COW case). That
work is underway and builds on what this patch starts.
Huge page shared memory segments are simpler and still maintain their
commit on shmget semantics.
Signed-off-by: Adam Litke <agl@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/hugetlbfs/inode.c | 63 |
1 files changed, 53 insertions, 10 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 2627efe767cf..e026c807e6b3 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -45,9 +45,58 @@ static struct backing_dev_info hugetlbfs_backing_dev_info = { int sysctl_hugetlb_shm_group; +static void huge_pagevec_release(struct pagevec *pvec) +{ + int i; + + for (i = 0; i < pagevec_count(pvec); ++i) + put_page(pvec->pages[i]); + + pagevec_reinit(pvec); +} + +/* + * huge_pages_needed tries to determine the number of new huge pages that + * will be required to fully populate this VMA. This will be equal to + * the size of the VMA in huge pages minus the number of huge pages + * (covered by this VMA) that are found in the page cache. + * + * Result is in bytes to be compatible with is_hugepage_mem_enough() + */ +unsigned long +huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma) +{ + int i; + struct pagevec pvec; + unsigned long start = vma->vm_start; + unsigned long end = vma->vm_end; + unsigned long hugepages = (end - start) >> HPAGE_SHIFT; + pgoff_t next = vma->vm_pgoff; + pgoff_t endpg = next + ((end - start) >> PAGE_SHIFT); + + pagevec_init(&pvec, 0); + while (next < endpg) { + if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) + break; + for (i = 0; i < pagevec_count(&pvec); i++) { + struct page *page = pvec.pages[i]; + if (page->index > next) + next = page->index; + if (page->index >= endpg) + break; + next++; + hugepages--; + } + huge_pagevec_release(&pvec); + } + return hugepages << HPAGE_SHIFT; +} + static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_dentry->d_inode; + struct address_space *mapping = inode->i_mapping; + unsigned long bytes; loff_t len, vma_len; int ret; @@ -66,6 +115,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_end - vma->vm_start < HPAGE_SIZE) return -EINVAL; + bytes = huge_pages_needed(mapping, vma); + if (!is_hugepage_mem_enough(bytes)) + return -ENOMEM; + vma_len = (loff_t)(vma->vm_end - vma->vm_start); down(&inode->i_sem); @@ -168,16 +221,6 @@ static int hugetlbfs_commit_write(struct file *file, return -EINVAL; } -static void huge_pagevec_release(struct pagevec *pvec) -{ - int i; - - for (i = 0; i < pagevec_count(pvec); ++i) - put_page(pvec->pages[i]); - - pagevec_reinit(pvec); -} - static void truncate_huge_page(struct page *page) { clear_page_dirty(page); |