diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 09:23:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 09:23:45 -0700 |
commit | 1f0918d03ff4b5c94540c71ce889672abdbc2f4a (patch) | |
tree | ecee710444fb3405da55933501e339e10e4ac880 /drivers/lguest | |
parent | 4266c97a3ef4604561a22212eb0eab8a3c338971 (diff) | |
parent | ca60a42c9be41c07ebcc2ec8c43dd1be53f147bf (diff) | |
download | lwn-1f0918d03ff4b5c94540c71ce889672abdbc2f4a.tar.gz lwn-1f0918d03ff4b5c94540c71ce889672abdbc2f4a.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
lguest: don't force VIRTIO_F_NOTIFY_ON_EMPTY
lguest: cleanup for map_switcher()
lguest: use PGDIR_SHIFT for PAE code to allow different PAGE_OFFSET
lguest: use set_pte/set_pmd uniformly for real page table entries
lguest: move panic notifier registration to its expected place.
virtio_blk: add support for cache flush
virtio: add virtio IDs file
virtio: get rid of redundant VIRTIO_ID_9P definition
virtio: make add_buf return capacity remaining
virtio_pci: minor MSI-X cleanups
Diffstat (limited to 'drivers/lguest')
-rw-r--r-- | drivers/lguest/core.c | 5 | ||||
-rw-r--r-- | drivers/lguest/page_tables.c | 45 |
2 files changed, 21 insertions, 29 deletions
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 1e2cb846b3c9..8744d24ac6e6 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -67,12 +67,11 @@ static __init int map_switcher(void) * so we make sure they're zeroed. */ for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { - unsigned long addr = get_zeroed_page(GFP_KERNEL); - if (!addr) { + switcher_page[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); + if (!switcher_page[i]) { err = -ENOMEM; goto free_some_pages; } - switcher_page[i] = virt_to_page(addr); } /* diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 8aaad65c3bb5..cf94326f1b59 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -380,7 +380,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) * And we copy the flags to the shadow PMD entry. The page * number in the shadow PMD is the page we just allocated. */ - native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); + set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); } /* @@ -447,7 +447,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) * we will come back here when a write does actually occur, so * we can update the Guest's _PAGE_DIRTY flag. */ - native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); + set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); /* * Finally, we write the Guest PTE entry back: we've set the @@ -528,7 +528,7 @@ static void release_pmd(pmd_t *spmd) /* Now we can free the page of PTEs */ free_page((long)ptepage); /* And zero out the PMD entry so we never release it twice. */ - native_set_pmd(spmd, __pmd(0)); + set_pmd(spmd, __pmd(0)); } } @@ -833,15 +833,15 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, */ if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { check_gpte(cpu, gpte); - native_set_pte(spte, - gpte_to_spte(cpu, gpte, + set_pte(spte, + gpte_to_spte(cpu, gpte, pte_flags(gpte) & _PAGE_DIRTY)); } else { /* * Otherwise kill it and we can demand_page() * it in later. */ - native_set_pte(spte, __pte(0)); + set_pte(spte, __pte(0)); } #ifdef CONFIG_X86_PAE } @@ -983,25 +983,22 @@ static unsigned long setup_pagetables(struct lguest *lg, */ for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; i += PTRS_PER_PTE, j++) { - /* FIXME: native_set_pmd is overkill here. */ - native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) - - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); + pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE, + __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) return -EFAULT; } /* One PGD entry, pointing to that PMD page. */ - set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); + pgd = __pgd(((unsigned long)pmds - mem_base) | _PAGE_PRESENT); /* Copy it in as the first PGD entry (ie. addresses 0-1G). */ if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) return -EFAULT; /* - * And the third PGD entry (ie. addresses 3G-4G). - * - * FIXME: This assumes that PAGE_OFFSET for the Guest is 0xC0000000. + * And the other PGD entry to make the linear mapping at PAGE_OFFSET */ - if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) + if (copy_to_user(&pgdir[KERNEL_PGD_BOUNDARY], &pgd, sizeof(pgd))) return -EFAULT; #else /* @@ -1141,15 +1138,13 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) { pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); pte_t regs_pte; - unsigned long pfn; #ifdef CONFIG_X86_PAE pmd_t switcher_pmd; pmd_t *pmd_table; - /* FIXME: native_set_pmd is overkill here. */ - native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> - PAGE_SHIFT, PAGE_KERNEL_EXEC)); + switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT, + PAGE_KERNEL_EXEC); /* Figure out where the pmd page is, by reading the PGD, and converting * it to a virtual address. */ @@ -1157,7 +1152,7 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) << PAGE_SHIFT); /* Now write it into the shadow page table. */ - native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); + set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); #else pgd_t switcher_pgd; @@ -1179,10 +1174,8 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) * page is already mapped there, we don't have to copy them out * again. */ - pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; - native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); - native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], - regs_pte); + regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL); + set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte); } /*:*/ @@ -1209,7 +1202,7 @@ static __init void populate_switcher_pte_page(unsigned int cpu, /* The first entries are easy: they map the Switcher code. */ for (i = 0; i < pages; i++) { - native_set_pte(&pte[i], mk_pte(switcher_page[i], + set_pte(&pte[i], mk_pte(switcher_page[i], __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); } @@ -1217,14 +1210,14 @@ static __init void populate_switcher_pte_page(unsigned int cpu, i = pages + cpu*2; /* First page (Guest registers) is writable from the Guest */ - native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), + set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); /* * The second page contains the "struct lguest_ro_state", and is * read-only. */ - native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), + set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); } |