summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-07-17 18:37:05 -0700
committerJeremy Fitzhardinge <jeremy@goop.org>2007-07-18 08:47:43 -0700
commit9a4029fd3409eb224eb62c32d9792071382694ec (patch)
tree04a8f54abadf6a3c8afff3281b120108c4e33d45 /arch
parentf4f97b3ea90130520afb478cbc2918be2b6587b8 (diff)
downloadlwn-9a4029fd3409eb224eb62c32d9792071382694ec.tar.gz
lwn-9a4029fd3409eb224eb62c32d9792071382694ec.zip
xen: ignore RW mapping of RO pages in pagetable_init
When setting up the initial pagetable, which includes mappings of all low physical memory, ignore a mapping which tries to set the RW bit on an RO pte. An RO pte indicates a page which is part of the current pagetable, and so it cannot be allowed to become RW. Once xen_pagetable_setup_done is called, set_pte reverts to its normal behaviour. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Acked-by: Chris Wright <chrisw@sous-sol.org> Cc: ebiederm@xmission.com (Eric W. Biederman)
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/xen/enlighten.c27
1 files changed, 25 insertions, 2 deletions
diff --git a/arch/i386/xen/enlighten.c b/arch/i386/xen/enlighten.c
index 86e68e680116..9550ae3b1fb1 100644
--- a/arch/i386/xen/enlighten.c
+++ b/arch/i386/xen/enlighten.c
@@ -505,7 +505,7 @@ static void xen_write_cr3(unsigned long cr3)
/* Early in boot, while setting up the initial pagetable, assume
everything is pinned. */
-static void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
+static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
{
BUG_ON(mem_map); /* should only be used early */
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
@@ -557,10 +557,32 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
}
#endif
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+ /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+ if (pte_val_ma(*ptep) & _PAGE_PRESENT)
+ pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+ pte_val_ma(pte));
+
+ return pte;
+}
+
+/* Init-time set_pte while constructing initial pagetables, which
+ doesn't allow RO pagetable pages to be remapped RW */
+static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+ pte = mask_rw_pte(ptep, pte);
+
+ xen_set_pte(ptep, pte);
+}
+
static __init void xen_pagetable_setup_start(pgd_t *base)
{
pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
+ /* special set_pte for pagetable initialization */
+ paravirt_ops.set_pte = xen_set_pte_init;
+
init_mm.pgd = base;
/*
* copy top-level of Xen-supplied pagetable into place. For
@@ -607,6 +629,7 @@ static __init void xen_pagetable_setup_done(pgd_t *base)
/* This will work as long as patching hasn't happened yet
(which it hasn't) */
paravirt_ops.alloc_pt = xen_alloc_pt;
+ paravirt_ops.set_pte = xen_set_pte;
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/*
@@ -745,7 +768,7 @@ static const struct paravirt_ops xen_paravirt_ops __initdata = {
.kmap_atomic_pte = xen_kmap_atomic_pte,
#endif
- .set_pte = xen_set_pte,
+ .set_pte = NULL, /* see xen_pagetable_setup_* */
.set_pte_at = xen_set_pte_at,
.set_pmd = xen_set_pmd,