diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-02-25 12:43:52 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-02-26 10:53:39 +0100 |
commit | e946864ec813ebab2f42752c99b080e1e012aaf6 (patch) | |
tree | ad50b8cdd5424ebd94c82513c831c65c6ccd0893 /arch | |
parent | dcde507ee290ee07509dc81c65eaec9e20da2279 (diff) | |
download | lwn-e946864ec813ebab2f42752c99b080e1e012aaf6.tar.gz lwn-e946864ec813ebab2f42752c99b080e1e012aaf6.zip |
highmem, -rt: Implement pfn and prot kmaps
iomap_32 uses kmap_atomic_prot_pfn() for its maps, but on -rt we have
to use kmap() for such mappings, so teach kmap about pfn and prot
thingies.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/highmem.h | 9 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 27 | ||||
-rw-r--r-- | arch/x86/mm/iomap_32.c | 32 |
3 files changed, 25 insertions, 43 deletions
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 433ae1f02e95..827965748b96 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -55,14 +55,17 @@ extern unsigned long highstart_pfn, highend_pfn; #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) extern void *kmap_high(struct page *page); +extern void *kmap_pfn_prot(unsigned long pfn, pgprot_t prot); extern void kunmap_high(struct page *page); void *kmap(struct page *page); +void *kmap_page_prot(struct page *page, pgprot_t prot); extern void kunmap_virt(void *ptr); extern struct page *kmap_to_page(void *ptr); void kunmap(struct page *page); void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); +void *__kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); void *__kmap_atomic(struct page *page, enum km_type type); void *__kmap_atomic_direct(struct page *page, enum km_type type); void __kunmap_atomic(void *kvaddr, enum km_type type); @@ -85,15 +88,17 @@ extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, * on PREEMPT_RT kmap_atomic() is a wrapper that uses kmap(): */ #ifdef CONFIG_PREEMPT_RT -# define kmap_atomic_prot(page, type, prot) ({ pagefault_disable(); kmap(page); }) +# define kmap_atomic_prot(page, type, prot) ({ pagefault_disable(); kmap_pfn_prot(page_to_pfn(page), prot); }) +# define kmap_atomic_prot_pfn(pfn, type, prot) ({ pagefault_disable(); kmap_pfn_prot(pfn, prot); }) # define kmap_atomic(page, type) ({ pagefault_disable(); kmap(page); }) # define kmap_atomic_pfn(pfn, type) kmap(pfn_to_page(pfn)) -# define kunmap_atomic(kvaddr, type) do { pagefault_enable(); kunmap_virt(kvaddr); } while(0) +# define kunmap_atomic(kvaddr, type) do { kunmap_virt(kvaddr); pagefault_enable(); } while(0) # define kmap_atomic_to_page(kvaddr) kmap_to_page(kvaddr) # define kmap_atomic_direct(page, type) __kmap_atomic_direct(page, type) # define kunmap_atomic_direct(kvaddr, type) __kunmap_atomic(kvaddr, type) #else # define kmap_atomic_prot(page, type, prot) __kmap_atomic_prot(page, type, prot) +# define kmap_atomic_prot_pfn(pfn, type, prot) __kmap_atomic_prot_pfn(pfn, type, prot) # define kmap_atomic(page, type) __kmap_atomic(page, type) # define kmap_atomic_pfn(pfn, type) __kmap_atomic_pfn(pfn, type) # define kunmap_atomic(kvaddr, type) __kunmap_atomic(kvaddr, type) diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index dcb1899bcff7..b4eb59a59ef4 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -19,16 +19,6 @@ void kunmap(struct page *page) kunmap_high(page); } -void kunmap_virt(void *ptr) -{ - struct page *page; - - if ((unsigned long)ptr < PKMAP_ADDR(0)) - return; - page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]); - kunmap(page); -} - struct page *kmap_to_page(void *ptr) { struct page *page; @@ -70,6 +60,23 @@ void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) return (void *)vaddr; } +void *__kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) +{ + enum fixed_addresses idx; + unsigned long vaddr; + + preempt_disable(); + pagefault_disable(); + + debug_kmap_atomic(type); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +} + void *__kmap_atomic_direct(struct page *page, enum km_type type) { return __kmap_atomic_prot(page, type, kmap_prot); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 715d822334b4..38a1a6845a73 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -55,23 +55,6 @@ iomap_free(resource_size_t base, unsigned long size) } EXPORT_SYMBOL_GPL(iomap_free); -void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) -{ - enum fixed_addresses idx; - unsigned long vaddr; - - preempt_disable(); - pagefault_disable(); - - debug_kmap_atomic(type); - idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -} - /* * Map 'pfn' using fixed map 'type' and protections 'prot' */ @@ -94,19 +77,6 @@ EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); void iounmap_atomic(void *kvaddr, enum km_type type) { - unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - - /* - * Force other mappings to Oops if they'll try to access this pte - * without first remap it. Keeping stale mappings around is a bad idea - * also, in case the page changes cacheability attributes or becomes - * a protected page in a hypervisor. - */ - if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) - kpte_clear_flush(kmap_pte-idx, vaddr); - - pagefault_enable(); - preempt_enable(); + kunmap_atomic(kvaddr, type); } EXPORT_SYMBOL_GPL(iounmap_atomic); |