diff options
author | Arnd Bergmann <arnd@arndb.de> | 2012-01-09 17:06:36 +0000 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2012-01-09 17:06:36 +0000 |
commit | 421b759b86eb8a914cbbd11f6d09a74f411762c6 (patch) | |
tree | 505ca7f23987d8eaaa519a7e8506b854e2c0d030 /arch/arm/mm/fault.c | |
parent | e067096c8d57d191f29d734cd5692695c95cc36e (diff) | |
parent | a07613a54d700a974f3a4a657da78ef5d097315d (diff) | |
download | lwn-421b759b86eb8a914cbbd11f6d09a74f411762c6.tar.gz lwn-421b759b86eb8a914cbbd11f6d09a74f411762c6.zip |
Merge branch 'samsung/cleanup' into next/boards
Conflicts:
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-omap2/board-ti8168evm.c
arch/arm/mach-s3c64xx/Kconfig
arch/arm/mach-tegra/Makefile
arch/arm/mach-tegra/board-dt-tegra20.c
arch/arm/mach-tegra/common.c
Lots of relatively simple conflicts between the board
changes and stuff from the arm tree. This pulls in
the resolution from the samsung/cleanup tree, so we
don't get conflicting merges.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm/mm/fault.c')
-rw-r--r-- | arch/arm/mm/fault.c | 58 |
1 files changed, 39 insertions, 19 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index eb5520fc755f..bb7eac381a8e 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -220,7 +220,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) static int __kprobes __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - struct task_struct *tsk) + unsigned int flags, struct task_struct *tsk) { struct vm_area_struct *vma; int fault; @@ -242,18 +242,7 @@ good_area: goto out; } - /* - * If for any reason at all we couldn't handle the fault, make - * sure we exit gracefully rather than endlessly redo the fault. - */ - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0); - if (unlikely(fault & VM_FAULT_ERROR)) - return fault; - if (fault & VM_FAULT_MAJOR) - tsk->maj_flt++; - else - tsk->min_flt++; - return fault; + return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) @@ -268,6 +257,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; + int write = fsr & FSR_WRITE; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + (write ? FAULT_FLAG_WRITE : 0); if (notify_page_fault(regs, fsr)) return 0; @@ -294,6 +286,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!down_read_trylock(&mm->mmap_sem)) { if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) goto no_context; +retry: down_read(&mm->mmap_sem); } else { /* @@ -309,14 +302,41 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) #endif } - fault = __do_page_fault(mm, addr, fsr, tsk); - up_read(&mm->mmap_sem); + fault = __do_page_fault(mm, addr, fsr, flags, tsk); + + /* If we need to retry but a fatal signal is pending, handle the + * signal first. We do not need to release the mmap_sem because + * it would already be released in __lock_page_or_retry in + * mm/filemap.c. */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return 0; + + /* + * Major/minor page fault accounting is only done on the + * initial attempt. If we go through a retry, it is extremely + * likely that the page will be found in page cache at that point. + */ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - if (fault & VM_FAULT_MAJOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); - else if (fault & VM_FAULT_MINOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + tsk->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, addr); + } else { + tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, addr); + } + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } + } + + up_read(&mm->mmap_sem); /* * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |