Patch 2.6.32.35 to 2.6.32.36
/arch/x86/xen/mmu.c
blob:350a3deedf25496dc12571dd950c6aca0f740ff6 -> blob:6ec047d827b7d3b1011012dcf38f9ca77182364f
--- arch/x86/xen/mmu.c
+++ arch/x86/xen/mmu.c
@@ -1658,9 +1658,6 @@ static __init void xen_map_identity_earl
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte;
- if (pfn > max_pfn_mapped)
- max_pfn_mapped = pfn;
-
if (!pte_none(pte_page[pteidx]))
continue;
@@ -1704,6 +1701,12 @@ __init pgd_t *xen_setup_kernel_pagetable
pud_t *l3;
pmd_t *l2;
+ /* max_pfn_mapped is the last pfn mapped in the initial memory
+ * mappings. Considering that on Xen after the kernel mappings we
+ * have the mappings of some pages that don't exist in pfn space, we
+ * set max_pfn_mapped to the last real pfn mapped. */
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
+
/* Zap identity mapping */
init_level4_pgt[0] = __pgd(0);
@@ -1767,9 +1770,7 @@ __init pgd_t *xen_setup_kernel_pagetable
{
pmd_t *kernel_pmd;
- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
- xen_start_info->nr_pt_frames * PAGE_SIZE +
- 512*1024);
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);