summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBaoquan He <bhe@redhat.com>2018-03-02 14:49:00 +0900
committerMasaki Tachibana <mas-tachibana@vf.jp.nec.com>2018-06-15 21:32:14 +0900
commitacab2a2630f45c2733abf67235b3f72b4b3849f5 (patch)
tree8538518bf2e13cb8e5b5ce83186fe9e33f71e00f /arch
parent342fdab5b258d5c82aec392b44549883f8d44c42 (diff)
PATCH 1/4 arch/x86_64: Cleanup the address translation of the 4-level page tables
From Dou Liyang <douly.fnst@cn.fujitsu.com> Due to the changing of 4-level page tables implementation in kernel, makedumpfile left behind some of the redundant macros. this make the translation not clear and hard to expand the code to support 5-level page tables. Remove the PML4* and PGDIR_* and unify the macro to get the index of PGD. Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86_64.c59
1 files changed, 36 insertions, 23 deletions
diff --git a/arch/x86_64.c b/arch/x86_64.c
index 1f24415..cbe45c2 100644
--- a/arch/x86_64.c
+++ b/arch/x86_64.c
@@ -257,7 +257,7 @@ get_versiondep_info_x86_64(void)
unsigned long long
__vtop4_x86_64(unsigned long vaddr, unsigned long pagetable)
{
- unsigned long page_dir, pml4, pgd_paddr, pgd_pte, pmd_paddr, pmd_pte;
+ unsigned long page_dir, pgd, pud_paddr, pud_pte, pmd_paddr, pmd_pte;
unsigned long pte_paddr, pte;
/*
@@ -269,43 +269,43 @@ __vtop4_x86_64(unsigned long vaddr, unsigned long pagetable)
if (page_dir == NOT_PADDR)
return NOT_PADDR;
}
- page_dir += pml4_index(vaddr) * sizeof(unsigned long);
- if (!readmem(PADDR, page_dir, &pml4, sizeof pml4)) {
- ERRMSG("Can't get pml4 (page_dir:%lx).\n", page_dir);
+ page_dir += pgd_index(vaddr) * sizeof(unsigned long);
+ if (!readmem(PADDR, page_dir, &pgd, sizeof pgd)) {
+ ERRMSG("Can't get pgd (page_dir:%lx).\n", page_dir);
return NOT_PADDR;
}
if (info->vaddr_for_vtop == vaddr)
- MSG(" PGD : %16lx => %16lx\n", page_dir, pml4);
+ MSG(" PGD : %16lx => %16lx\n", page_dir, pgd);
- if (!(pml4 & _PAGE_PRESENT)) {
- ERRMSG("Can't get a valid pml4.\n");
+ if (!(pgd & _PAGE_PRESENT)) {
+ ERRMSG("Can't get a valid pgd.\n");
return NOT_PADDR;
}
/*
* Get PUD.
*/
- pgd_paddr = pml4 & ENTRY_MASK;
- pgd_paddr += pgd_index(vaddr) * sizeof(unsigned long);
- if (!readmem(PADDR, pgd_paddr, &pgd_pte, sizeof pgd_pte)) {
- ERRMSG("Can't get pgd_pte (pgd_paddr:%lx).\n", pgd_paddr);
+ pud_paddr = pgd & ENTRY_MASK;
+ pud_paddr += pud_index(vaddr) * sizeof(unsigned long);
+ if (!readmem(PADDR, pud_paddr, &pud_pte, sizeof pud_pte)) {
+ ERRMSG("Can't get pud_pte (pud_paddr:%lx).\n", pud_paddr);
return NOT_PADDR;
}
if (info->vaddr_for_vtop == vaddr)
- MSG(" PUD : %16lx => %16lx\n", pgd_paddr, pgd_pte);
+ MSG(" PUD : %16lx => %16lx\n", pud_paddr, pud_pte);
- if (!(pgd_pte & _PAGE_PRESENT)) {
- ERRMSG("Can't get a valid pgd_pte.\n");
+ if (!(pud_pte & _PAGE_PRESENT)) {
+ ERRMSG("Can't get a valid pud_pte.\n");
return NOT_PADDR;
}
- if (pgd_pte & _PAGE_PSE) /* 1GB pages */
- return (pgd_pte & ENTRY_MASK & PGDIR_MASK) +
- (vaddr & ~PGDIR_MASK);
+ if (pud_pte & _PAGE_PSE) /* 1GB pages */
+ return (pud_pte & ENTRY_MASK & PUD_MASK) +
+ (vaddr & ~PUD_MASK);
/*
* Get PMD.
*/
- pmd_paddr = pgd_pte & ENTRY_MASK;
+ pmd_paddr = pud_pte & ENTRY_MASK;
pmd_paddr += pmd_index(vaddr) * sizeof(unsigned long);
if (!readmem(PADDR, pmd_paddr, &pmd_pte, sizeof pmd_pte)) {
ERRMSG("Can't get pmd_pte (pmd_paddr:%lx).\n", pmd_paddr);
@@ -391,15 +391,22 @@ kvtop_xen_x86_64(unsigned long kvaddr)
if ((dirp = kvtop_xen_x86_64(SYMBOL(pgd_l4))) == NOT_PADDR)
return NOT_PADDR;
- dirp += pml4_index(kvaddr) * sizeof(unsigned long long);
+
+ /*
+ * Get PGD.
+ */
+ dirp += pgd_index(kvaddr) * sizeof(unsigned long long);
if (!readmem(PADDR, dirp, &entry, sizeof(entry)))
return NOT_PADDR;
if (!(entry & _PAGE_PRESENT))
return NOT_PADDR;
+ /*
+ * Get PUD.
+ */
dirp = entry & ENTRY_MASK;
- dirp += pgd_index(kvaddr) * sizeof(unsigned long long);
+ dirp += pud_index(kvaddr) * sizeof(unsigned long long);
if (!readmem(PADDR, dirp, &entry, sizeof(entry)))
return NOT_PADDR;
@@ -407,9 +414,12 @@ kvtop_xen_x86_64(unsigned long kvaddr)
return NOT_PADDR;
if (entry & _PAGE_PSE) /* 1GB pages */
- return (entry & ENTRY_MASK & PGDIR_MASK) +
- (kvaddr & ~PGDIR_MASK);
+ return (entry & ENTRY_MASK & PUD_MASK) +
+ (kvaddr & ~PUD_MASK);
+ /*
+ * Get PMD.
+ */
dirp = entry & ENTRY_MASK;
dirp += pmd_index(kvaddr) * sizeof(unsigned long long);
if (!readmem(PADDR, dirp, &entry, sizeof(entry)))
@@ -422,6 +432,9 @@ kvtop_xen_x86_64(unsigned long kvaddr)
return (entry & ENTRY_MASK & PMD_MASK) +
(kvaddr & ~PMD_MASK);
+ /*
+ * Get PTE.
+ */
dirp = entry & ENTRY_MASK;
dirp += pte_index(kvaddr) * sizeof(unsigned long long);
if (!readmem(PADDR, dirp, &entry, sizeof(entry)))
@@ -596,7 +609,7 @@ find_vmemmap_x86_64()
* for max_paddr >> 12 page structures
*/
high_pfn = max_paddr >> 12;
- pgd_index = pgd4_index(vaddr_base);
+ pgd_index = pgd_index(vaddr_base);
pgd_addr = vaddr_to_paddr(init_level4_pgt); /* address of pgd */
pgd_addr += pgd_index * sizeof(unsigned long);
page_structs_per_pud = (PTRS_PER_PUD * PTRS_PER_PMD * info->page_size) /