summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/exec.c10
-rw-r--r--include/asm-generic/tlb.h51
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/pagevec.h2
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/memory.c27
-rw-r--r--mm/mmap.c26
7 files changed, 64 insertions, 58 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 8d5f4bd0d6e8..ddaebef9879d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -502,7 +502,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
unsigned long length = old_end - old_start;
unsigned long new_start = old_start - shift;
unsigned long new_end = old_end - shift;
- struct mmu_gather *tlb;
+ struct mmu_gather tlb;
BUG_ON(new_start > new_end);
@@ -527,12 +527,12 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
return -ENOMEM;
lru_add_drain();
- tlb = tlb_gather_mmu(mm, 0);
+ tlb_gather_mmu(&tlb, mm, 0);
if (new_end > old_start) {
/*
* when the old and new regions overlap clear from new_end.
*/
- free_pgd_range(tlb, new_end, old_end, new_end,
+ free_pgd_range(&tlb, new_end, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : 0);
} else {
/*
@@ -541,10 +541,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* have constraints on va-space that make this illegal (IA64) -
* for the others its just a little faster.
*/
- free_pgd_range(tlb, old_start, old_end, new_end,
+ free_pgd_range(&tlb, old_start, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : 0);
}
- tlb_finish_mmu(tlb, new_end, old_end);
+ tlb_finish_mmu(&tlb, new_end, old_end);
/*
* shrink the vma to just the new range.
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e43f9766259f..30f998d8c5c9 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -22,14 +22,8 @@
* and page free order so much..
*/
#ifdef CONFIG_SMP
- #ifdef ARCH_FREE_PTR_NR
- #define FREE_PTR_NR ARCH_FREE_PTR_NR
- #else
- #define FREE_PTE_NR 506
- #endif
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
- #define FREE_PTE_NR 1
#define tlb_fast_mode(tlb) 1
#endif
@@ -39,30 +33,48 @@
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* set to ~0U means fast mode */
+ unsigned int max; /* nr < max */
unsigned int need_flush;/* Really unmapped some ptes? */
unsigned int fullmm; /* non-zero means full mm flush */
- struct page * pages[FREE_PTE_NR];
+#ifdef HAVE_ARCH_MMU_GATHER
+ struct arch_mmu_gather arch;
+#endif
+ struct page ** pages;
+ struct page * local[8];
};
-/* Users of the generic TLB shootdown code must declare this storage space. */
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+static inline void __tlb_alloc_pages(struct mmu_gather *tlb)
+{
+ unsigned long addr = __get_free_pages(GFP_ATOMIC, 0);
+
+ if (addr) {
+ tlb->pages = (void *)addr;
+ tlb->max = PAGE_SIZE / sizeof(struct page *);
+ }
+}
/* tlb_gather_mmu
* Return a pointer to an initialized struct mmu_gather.
*/
-static inline struct mmu_gather *
-tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+static inline void
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
{
- struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
-
tlb->mm = mm;
- /* Use fast mode if only one CPU is online */
- tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
+ tlb->max = ARRAY_SIZE(tlb->local);
+ tlb->pages = tlb->local;
+
+ if (num_online_cpus() > 1) {
+ tlb->nr = 0;
+ __tlb_alloc_pages(tlb);
+ } else /* Use fast mode if only one CPU is online */
+ tlb->nr = ~0U;
tlb->fullmm = full_mm_flush;
- return tlb;
+#ifdef HAVE_ARCH_MMU_GATHER
+ tlb->arch = ARCH_MMU_GATHER_INIT;
+#endif
}
static inline void
@@ -75,6 +87,8 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
+ if (tlb->pages == tlb->local)
+ __tlb_alloc_pages(tlb);
}
}
@@ -90,7 +104,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
/* keep the page table cache within bounds */
check_pgt_cache();
- put_cpu_var(mmu_gathers);
+ if (tlb->pages != tlb->local)
+ free_pages((unsigned long)tlb->pages, 0);
}
/* tlb_remove_page
@@ -106,7 +121,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return;
}
tlb->pages[tlb->nr++] = page;
- if (tlb->nr >= FREE_PTE_NR)
+ if (tlb->nr >= tlb->max)
tlb_flush_mmu(tlb, 0, 0);
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ba3a7cb1eaa0..e52dfbbca053 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -759,7 +759,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
-unsigned long unmap_vmas(struct mmu_gather **tlb,
+unsigned long unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *);
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index bab82f4c571c..0af5218b93a8 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -9,7 +9,7 @@
#define _LINUX_PAGEVEC_H
/* 14 pointers + two long's align the pagevec structure to a power of two */
-#define PAGEVEC_SIZE 14
+#define PAGEVEC_SIZE 8
struct page;
struct address_space;
diff --git a/mm/bounce.c b/mm/bounce.c
index a2b76a588e34..2fd099cb47ff 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -49,11 +49,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
unsigned long flags;
unsigned char *vto;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
kunmap_atomic(vto, KM_BOUNCE_READ);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
#else /* CONFIG_HIGHMEM */
diff --git a/mm/memory.c b/mm/memory.c
index a6fb32fc67dc..c39396955651 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -959,17 +959,14 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules.
*/
-unsigned long unmap_vmas(struct mmu_gather **tlbp,
+unsigned long unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *details)
{
long zap_work = ZAP_BLOCK_SIZE;
- unsigned long tlb_start = 0; /* For tlb_finish_mmu */
- int tlb_start_valid = 0;
unsigned long start = start_addr;
spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
- int fullmm = (*tlbp)->fullmm;
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
@@ -990,11 +987,6 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
untrack_pfn_vma(vma, 0, 0);
while (start != end) {
- if (!tlb_start_valid) {
- tlb_start = start;
- tlb_start_valid = 1;
- }
-
if (unlikely(is_vm_hugetlb_page(vma))) {
/*
* It is undesirable to test vma->vm_file as it
@@ -1015,7 +1007,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
start = end;
} else
- start = unmap_page_range(*tlbp, vma,
+ start = unmap_page_range(tlb, vma,
start, end, &zap_work, details);
if (zap_work > 0) {
@@ -1023,19 +1015,13 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
break;
}
- tlb_finish_mmu(*tlbp, tlb_start, start);
-
if (need_resched() ||
(i_mmap_lock && spin_needbreak(i_mmap_lock))) {
- if (i_mmap_lock) {
- *tlbp = NULL;
+ if (i_mmap_lock)
goto out;
- }
cond_resched();
}
- *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
- tlb_start_valid = 0;
zap_work = ZAP_BLOCK_SIZE;
}
}
@@ -1055,16 +1041,15 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
- struct mmu_gather *tlb;
+ struct mmu_gather tlb;
unsigned long end = address + size;
unsigned long nr_accounted = 0;
lru_add_drain();
- tlb = tlb_gather_mmu(mm, 0);
+ tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm);
end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
- if (tlb)
- tlb_finish_mmu(tlb, address, end);
+ tlb_finish_mmu(&tlb, address, end);
return end;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 34579b23ebd5..c7c61d04aac3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1775,17 +1775,17 @@ static void unmap_region(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
- struct mmu_gather *tlb;
+ struct mmu_gather tlb;
unsigned long nr_accounted = 0;
lru_add_drain();
- tlb = tlb_gather_mmu(mm, 0);
+ tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted);
- free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
+ free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
next? next->vm_start: 0);
- tlb_finish_mmu(tlb, start, end);
+ tlb_finish_mmu(&tlb, start, end);
}
/*
@@ -1967,10 +1967,16 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
static inline void verify_mm_writelocked(struct mm_struct *mm)
{
#ifdef CONFIG_DEBUG_VM
- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+# ifdef CONFIG_PREEMPT_RT
+ if (unlikely(!rwsem_is_locked(&mm->mmap_sem))) {
WARN_ON(1);
- up_read(&mm->mmap_sem);
}
+# else
+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+ WARN_ON(1);
+ up_read(&mm->mmap_sem);
+ }
+# endif
#endif
}
@@ -2084,7 +2090,7 @@ EXPORT_SYMBOL(do_brk);
/* Release all mmaps. */
void exit_mmap(struct mm_struct *mm)
{
- struct mmu_gather *tlb;
+ struct mmu_gather tlb;
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
unsigned long end;
@@ -2109,13 +2115,13 @@ void exit_mmap(struct mm_struct *mm)
lru_add_drain();
flush_cache_mm(mm);
- tlb = tlb_gather_mmu(mm, 1);
+ tlb_gather_mmu(&tlb, mm, 1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted);
- free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
- tlb_finish_mmu(tlb, 0, end);
+ free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
+ tlb_finish_mmu(&tlb, 0, end);
/*
* Walk the list again, actually closing and freeing it,