summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2011-06-03 09:51:34 +0000
committerGreg Kroah-Hartman <gregkh@suse.de>2011-07-13 05:31:23 +0200
commit6961b18f7cdd1357b3e31a6b97807034204220c6 (patch)
tree15ae3a8db84a322994bf7b80c599b490233a5f32 /arch
parentc3ce2d3992b7028a545b5dd9d25f892ef066a62f (diff)
downloadlwn-6961b18f7cdd1357b3e31a6b97807034204220c6.tar.gz
lwn-6961b18f7cdd1357b3e31a6b97807034204220c6.zip
xen: partially revert "xen: set max_pfn_mapped to the last pfn mapped"
commit a91d92875ee94e4703fd017ccaadb48cfb344994 upstream. We only need to set max_pfn_mapped to the last pfn mapped on x86_64 to make sure that cleanup_highmap doesn't remove important mappings at _end. We don't need to do this on x86_32 because cleanup_highmap is not called on x86_32. Besides lowering max_pfn_mapped on x86_32 has the unwanted side effect of limiting the amount of memory available for the 1:1 kernel pagetable allocation. This patch reverts the x86_32 part of the original patch. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/xen/mmu.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 356a79904b9b..3f90a2ceff81 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1658,6 +1658,11 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte;
+#ifdef CONFIG_X86_32
+ if (pfn > max_pfn_mapped)
+ max_pfn_mapped = pfn;
+#endif
+
if (!pte_none(pte_page[pteidx]))
continue;
@@ -1770,7 +1775,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
{
pmd_t *kernel_pmd;
- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
+ xen_start_info->nr_pt_frames * PAGE_SIZE +
+ 512*1024);
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);