summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm.c169
-rw-r--r--arch/ia64.c377
-rw-r--r--arch/ppc64.c95
-rw-r--r--arch/s390x.c279
-rw-r--r--arch/x86.c279
-rw-r--r--arch/x86_64.c386
6 files changed, 1585 insertions, 0 deletions
diff --git a/arch/arm.c b/arch/arm.c
new file mode 100644
index 0000000..b65c2a0
--- /dev/null
+++ b/arch/arm.c
@@ -0,0 +1,169 @@
+/*
+ * arm.c
+ *
+ * Created by: Mika Westerberg <ext-mika.1.westerberg@nokia.com>
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifdef __arm__
+
+#include "../print_info.h"
+#include "../makedumpfile.h"
+
+#define PMD_TYPE_MASK 3
+#define PMD_TYPE_SECT 2
+#define PMD_TYPE_TABLE 1
+
+#define pgd_index(vaddr) ((vaddr) >> PGDIR_SHIFT)
+#define pte_index(vaddr) ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1))
+
+#define pgd_offset(pgdir, vaddr) \
+ ((pgdir) + pgd_index(vaddr) * 2 * sizeof(unsigned long))
+#define pmd_offset(dir, vaddr) (dir)
+#define pte_offset(pmd, vaddr) \
+ (pmd_page_vaddr(pmd) + pte_index(vaddr) * sizeof(unsigned long))
+
+/*
+ * These only work for kernel directly mapped addresses.
+ */
+#define __va(paddr) ((paddr) - info->phys_base + info->page_offset)
+#define __pa(vaddr) ((vaddr) - info->page_offset + info->phys_base)
+
+static inline unsigned long
+pmd_page_vaddr(unsigned long pmd)
+{
+ unsigned long ptr;
+
+ ptr = pmd & ~(PTRS_PER_PTE * sizeof(void *) - 1);
+ ptr += PTRS_PER_PTE * sizeof(void *);
+
+ return __va(ptr);
+}
+
+int
+get_phys_base_arm(void)
+{
+ unsigned long phys_base = ULONG_MAX;
+ int i;
+
+ /*
+ * We resolve phys_base from PT_LOAD segments. LMA contains physical
+ * address of the segment, and we use the first one.
+ */
+ for (i = 0; i < info->num_load_memory; i++) {
+ const struct pt_load_segment *pls = &info->pt_load_segments[i];
+
+ if (pls->phys_start < phys_base)
+ phys_base = pls->phys_start;
+ }
+
+ if (phys_base == ULONG_MAX) {
+ ERRMSG("Can't determine phys_base.\n");
+ return FALSE;
+ }
+
+ info->phys_base = phys_base;
+ DEBUG_MSG("phys_base : %lx\n", phys_base);
+
+ return TRUE;
+}
+
+int
+get_machdep_info_arm(void)
+{
+ info->page_offset = SYMBOL(_stext) & 0xffff0000UL;
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS;
+ info->kernel_start = SYMBOL(_stext);
+ info->section_size_bits = _SECTION_SIZE_BITS;
+
+ DEBUG_MSG("page_offset : %lx\n", info->page_offset);
+ DEBUG_MSG("kernel_start : %lx\n", info->kernel_start);
+
+ return TRUE;
+}
+
+/*
+ * vtop_arm() - translate arbitrary virtual address to physical
+ * @vaddr: virtual address to translate
+ *
+ * Function translates @vaddr into physical address using page tables. This
+ * address can be any virtual address. Returns physical address of the
+ * corresponding virtual address or %NOT_PADDR when there is no translation.
+ */
+static unsigned long long
+vtop_arm(unsigned long vaddr)
+{
+ unsigned long long paddr = NOT_PADDR;
+ unsigned long ptr, pgd, pte, pmd;
+
+ if (SYMBOL(swapper_pg_dir) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of swapper_pg_dir.\n");
+ return NOT_PADDR;
+ }
+
+ ptr = pgd_offset(SYMBOL(swapper_pg_dir), vaddr);
+ if (!readmem(VADDR, ptr, &pgd, sizeof(pmd))) {
+ ERRMSG("Can't read pgd\n");
+ return NOT_PADDR;
+ }
+
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PGD : %08lx => %08lx\n", ptr, pgd);
+
+ pmd = pmd_offset(pgd, vaddr);
+
+ switch (pmd & PMD_TYPE_MASK) {
+ case PMD_TYPE_TABLE: {
+ /* 4k small page */
+ ptr = pte_offset(pmd, vaddr);
+ if (!readmem(VADDR, ptr, &pte, sizeof(pte))) {
+ ERRMSG("Can't read pte\n");
+ return NOT_PADDR;
+ }
+
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PTE : %08lx => %08lx\n", ptr, pte);
+
+ if (!(pte & _PAGE_PRESENT)) {
+ ERRMSG("Can't get a valid pte.\n");
+ return NOT_PADDR;
+ }
+
+ paddr = PAGEBASE(pte) + (vaddr & (PAGESIZE() - 1));
+ break;
+ }
+
+ case PMD_TYPE_SECT:
+ /* 1MB section */
+ pte = pmd & PMD_MASK;
+ paddr = pte + (vaddr & (PMD_SIZE - 1));
+ break;
+ }
+
+ return paddr;
+}
+
+unsigned long long
+vaddr_to_paddr_arm(unsigned long vaddr)
+{
+ /*
+ * Only use translation tables when user has explicitly requested us to
+ * perform translation for a given address. Otherwise we assume that the
+ * translation is done within the kernel direct mapped region.
+ */
+ if (info->vaddr_for_vtop == vaddr)
+ return vtop_arm(vaddr);
+
+ return __pa(vaddr);
+}
+
+#endif /* __arm__ */
diff --git a/arch/ia64.c b/arch/ia64.c
new file mode 100644
index 0000000..79faa7a
--- /dev/null
+++ b/arch/ia64.c
@@ -0,0 +1,377 @@
+/*
+ * ia64.c
+ *
+ * Copyright (C) 2006, 2007, 2008 NEC Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifdef __ia64__
+
+#include "../print_info.h"
+#include "../makedumpfile.h"
+
+
+/*
+ * vmalloc() starting address is either the traditional 0xa000000000000000 or
+ * bumped up in 2.6 to 0xa000000200000000.
+ */
+int
+is_vmalloc_addr_ia64(unsigned long vaddr)
+{
+ return ((vaddr >= info->vmalloc_start) &&
+ (vaddr < (unsigned long)KERNEL_UNCACHED_BASE));
+}
+
+int
+get_phys_base_ia64(void)
+{
+ int i;
+ struct pt_load_segment *pls;
+
+ /*
+ * Default to 64MB.
+ */
+ info->phys_base = DEFAULT_PHYS_START;
+
+ for (i = 0; i < info->num_load_memory; i++) {
+ pls = &info->pt_load_segments[i];
+ if (VADDR_REGION(pls->virt_start) == KERNEL_VMALLOC_REGION) {
+
+ info->phys_base = pls->phys_start;
+ break;
+ }
+ }
+ return TRUE;
+}
+
+int
+get_machdep_info_ia64(void)
+{
+ /*
+ * Get kernel_start and vmalloc_start.
+ */
+ if (SYMBOL(_stext) == NOT_FOUND_SYMBOL)
+ return FALSE;
+
+ info->kernel_start = SYMBOL(_stext);
+
+ if (VADDR_REGION(info->kernel_start) == KERNEL_VMALLOC_REGION)
+ info->vmalloc_start = info->kernel_start + 4*1024UL*1024UL*1024UL;
+ else
+ info->vmalloc_start = KERNEL_VMALLOC_BASE;
+
+ /*
+ * Check the pgtable (3 Levels or 4 Levels).
+ */
+ if ((vt.mem_flags & MEMORY_PAGETABLE_4L)
+ || !strncmp(SRCFILE(pud_t), STR_PUD_T_4L, strlen(STR_PUD_T_4L))) {
+ vt.mem_flags |= MEMORY_PAGETABLE_4L;
+ DEBUG_MSG("PAGETABLE_4L : ON\n");
+ } else if ((vt.mem_flags & MEMORY_PAGETABLE_3L)
+ || !strncmp(SRCFILE(pud_t), STR_PUD_T_3L, strlen(STR_PUD_T_3L))) {
+ vt.mem_flags |= MEMORY_PAGETABLE_3L;
+ DEBUG_MSG("PAGETABLE_3L : ON\n");
+ } else {
+ MSG("Can't distinguish the pgtable.\n");
+ }
+
+ info->section_size_bits = _SECTION_SIZE_BITS;
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS;
+
+ return TRUE;
+}
+
+/*
+ * Translate a virtual address to a physical address by using 3 levels paging.
+ */
+unsigned long long
+vtop3_ia64(unsigned long vaddr)
+{
+ unsigned long long paddr, temp, page_dir, pgd_pte, page_middle, pmd_pte;
+ unsigned long long page_table, pte;
+
+ if (SYMBOL(swapper_pg_dir) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of swapper_pg_dir.\n");
+ return NOT_PADDR;
+ }
+
+ /*
+ * Get PGD
+ */
+ temp = vaddr & MASK_PGD_3L;
+ temp = temp >> (PGDIR_SHIFT_3L - 3);
+ page_dir = SYMBOL(swapper_pg_dir) + temp;
+ if (!readmem(VADDR, page_dir, &pgd_pte, sizeof pgd_pte)) {
+ ERRMSG("Can't get pgd_pte (page_dir:%llx).\n", page_dir);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PGD : %16llx => %16llx\n", page_dir, pgd_pte);
+
+ /*
+ * Get PMD
+ */
+ temp = vaddr & MASK_PMD;
+ temp = temp >> (PMD_SHIFT - 3);
+ page_middle = pgd_pte + temp;
+ if (!readmem(PADDR, page_middle, &pmd_pte, sizeof pmd_pte)) {
+ ERRMSG("Can't get pmd_pte (page_middle:%llx).\n", page_middle);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PMD : %16llx => %16llx\n", page_middle, pmd_pte);
+
+ /*
+ * Get PTE
+ */
+ temp = vaddr & MASK_PTE;
+ temp = temp >> (PAGESHIFT() - 3);
+ page_table = pmd_pte + temp;
+ if (!readmem(PADDR, page_table, &pte, sizeof pte)) {
+ ERRMSG("Can't get pte (page_table:%llx).\n", page_table);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PTE : %16llx => %16llx\n", page_table, pte);
+
+ /*
+ * Get physical address
+ */
+ temp = vaddr & MASK_POFFSET;
+ paddr = (pte & _PAGE_PPN_MASK) + temp;
+
+ return paddr;
+}
+
+/*
+ * Translate a virtual address to a physical address by using 4 levels paging.
+ */
+unsigned long long
+vtop4_ia64(unsigned long vaddr)
+{
+ unsigned long long paddr, temp, page_dir, pgd_pte, page_upper, pud_pte;
+ unsigned long long page_middle, pmd_pte, page_table, pte;
+
+ if (SYMBOL(swapper_pg_dir) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of swapper_pg_dir.\n");
+ return NOT_PADDR;
+ }
+
+ /*
+ * Get PGD
+ */
+ temp = vaddr & MASK_PGD_4L;
+ temp = temp >> (PGDIR_SHIFT_4L - 3);
+ page_dir = SYMBOL(swapper_pg_dir) + temp;
+ if (!readmem(VADDR, page_dir, &pgd_pte, sizeof pgd_pte)) {
+ ERRMSG("Can't get pgd_pte (page_dir:%llx).\n", page_dir);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PGD : %16llx => %16llx\n", page_dir, pgd_pte);
+
+ /*
+ * Get PUD
+ */
+ temp = vaddr & MASK_PUD;
+ temp = temp >> (PUD_SHIFT - 3);
+ page_upper = pgd_pte + temp;
+ if (!readmem(PADDR, page_upper, &pud_pte, sizeof pud_pte)) {
+ ERRMSG("Can't get pud_pte (page_upper:%llx).\n", page_upper);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PUD : %16llx => %16llx\n", page_upper, pud_pte);
+
+ /*
+ * Get PMD
+ */
+ temp = vaddr & MASK_PMD;
+ temp = temp >> (PMD_SHIFT - 3);
+ page_middle = pud_pte + temp;
+ if (!readmem(PADDR, page_middle, &pmd_pte, sizeof pmd_pte)) {
+ ERRMSG("Can't get pmd_pte (page_middle:%llx).\n", page_middle);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PMD : %16llx => %16llx\n", page_middle, pmd_pte);
+
+ /*
+ * Get PTE
+ */
+ temp = vaddr & MASK_PTE;
+ temp = temp >> (PAGESHIFT() - 3);
+ page_table = pmd_pte + temp;
+ if (!readmem(PADDR, page_table, &pte, sizeof pte)) {
+ ERRMSG("Can't get pte (page_table:%llx).\n", page_table);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PTE : %16llx => %16llx\n", page_table, pte);
+
+ /*
+ * Get physical address
+ */
+ temp = vaddr & MASK_POFFSET;
+ paddr = (pte & _PAGE_PPN_MASK) + temp;
+
+ return paddr;
+}
+
+unsigned long long
+vtop_ia64(unsigned long vaddr)
+{
+ unsigned long long paddr;
+
+ if (VADDR_REGION(vaddr) != KERNEL_VMALLOC_REGION) {
+ ERRMSG("vaddr(%lx) is not KERNEL_VMALLOC_REGION.\n", vaddr);
+ return NOT_PADDR;
+ }
+ paddr = vaddr_to_paddr_general(vaddr);
+ if (paddr != NOT_PADDR)
+ return paddr;
+
+ if (!is_vmalloc_addr_ia64(vaddr)) {
+ paddr = vaddr - info->kernel_start +
+ (info->phys_base & KERNEL_TR_PAGE_MASK);
+ return paddr;
+ }
+
+ if (vt.mem_flags & MEMORY_PAGETABLE_4L)
+ return vtop4_ia64(vaddr);
+ else
+ return vtop3_ia64(vaddr);
+}
+
+/*
+ * Translate a virtual address to physical address.
+ */
+unsigned long long
+vaddr_to_paddr_ia64(unsigned long vaddr)
+{
+ unsigned long long paddr;
+
+ switch (VADDR_REGION(vaddr)) {
+ case KERNEL_CACHED_REGION:
+ paddr = vaddr - (ulong)(KERNEL_CACHED_BASE);
+ break;
+
+ case KERNEL_UNCACHED_REGION:
+ paddr = vaddr - (ulong)(KERNEL_UNCACHED_BASE);
+ break;
+
+ case KERNEL_VMALLOC_REGION:
+ paddr = vtop_ia64(vaddr);
+ break;
+
+ default:
+ ERRMSG("Unknown region (%ld)\n", VADDR_REGION(vaddr));
+ return 0x0;
+ }
+ return paddr;
+}
+
+/*
+ * for Xen extraction
+ */
+unsigned long long
+kvtop_xen_ia64(unsigned long kvaddr)
+{
+ unsigned long long addr, dirp, entry;
+
+ if (!is_xen_vaddr(kvaddr))
+ return NOT_PADDR;
+
+ if (is_direct(kvaddr))
+ return (unsigned long)kvaddr - DIRECTMAP_VIRT_START;
+
+ if (!is_frame_table_vaddr(kvaddr))
+ return NOT_PADDR;
+
+ addr = kvaddr - VIRT_FRAME_TABLE_ADDR;
+
+ dirp = SYMBOL(frametable_pg_dir) - DIRECTMAP_VIRT_START;
+ dirp += ((addr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ dirp = entry & _PFN_MASK;
+ if (!dirp)
+ return NOT_PADDR;
+
+ dirp += ((addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ dirp = entry & _PFN_MASK;
+ if (!dirp)
+ return NOT_PADDR;
+
+ dirp += ((addr >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ if (!(entry & _PAGE_P))
+ return NOT_PADDR;
+
+ entry = (entry & _PFN_MASK) + (addr & ((1UL << PAGESHIFT()) - 1));
+
+ return entry;
+}
+
+int
+get_xen_info_ia64(void)
+{
+ unsigned long xen_start, xen_end, xen_heap_start;
+ int i;
+
+ info->frame_table_vaddr = VIRT_FRAME_TABLE_ADDR; /* "frame_table" is same value */
+
+ if (SYMBOL(xenheap_phys_end) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of xenheap_phys_end.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR_XEN, SYMBOL(xenheap_phys_end), &xen_end,
+ sizeof(xen_end))) {
+ ERRMSG("Can't get the value of xenheap_phys_end.\n");
+ return FALSE;
+ }
+ if (SYMBOL(xen_pstart) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of xen_pstart.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR_XEN, SYMBOL(xen_pstart), &xen_start,
+ sizeof(xen_start))) {
+ ERRMSG("Can't get the value of xen_pstart.\n");
+ return FALSE;
+ }
+ info->xen_heap_start = paddr_to_pfn(xen_start);
+ info->xen_heap_end = paddr_to_pfn(xen_end);
+
+ if (SYMBOL(xen_heap_start) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of xen_heap_start.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR_XEN, SYMBOL(xen_heap_start), &xen_heap_start,
+ sizeof(xen_heap_start))) {
+ ERRMSG("Can't get the value of xen_heap_start.\n");
+ return FALSE;
+ }
+ for (i = 0; i < info->num_domain; i++) {
+ info->domain_list[i].pickled_id = (unsigned int)
+ (info->domain_list[i].domain_addr - xen_heap_start);
+ }
+
+ return TRUE;
+}
+
+#endif /* ia64 */
+
diff --git a/arch/ppc64.c b/arch/ppc64.c
new file mode 100644
index 0000000..f8fd8d2
--- /dev/null
+++ b/arch/ppc64.c
@@ -0,0 +1,95 @@
+/*
+ * ppc64.c
+ *
+ * Created by: Sachin Sant (sachinp@in.ibm.com)
+ * Copyright (C) IBM Corporation, 2006. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation (version 2 of the License).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef __powerpc__
+
+#include "../print_info.h"
+#include "../makedumpfile.h"
+
+int
+get_machdep_info_ppc64(void)
+{
+ unsigned long vmlist, vmalloc_start;
+
+ info->section_size_bits = _SECTION_SIZE_BITS;
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS;
+ info->page_offset = __PAGE_OFFSET;
+
+ if (SYMBOL(_stext) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of _stext.\n");
+ return FALSE;
+ }
+ info->kernel_start = SYMBOL(_stext);
+ DEBUG_MSG("kernel_start : %lx\n", info->kernel_start);
+
+ /*
+ * For the compatibility, makedumpfile should run without the symbol
+ * vmlist and the offset of vm_struct.addr if they are not necessary.
+ */
+ if ((SYMBOL(vmlist) == NOT_FOUND_SYMBOL)
+ || (OFFSET(vm_struct.addr) == NOT_FOUND_STRUCTURE)) {
+ return TRUE;
+ }
+ if (!readmem(VADDR, SYMBOL(vmlist), &vmlist, sizeof(vmlist))) {
+ ERRMSG("Can't get vmlist.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR, vmlist + OFFSET(vm_struct.addr), &vmalloc_start,
+ sizeof(vmalloc_start))) {
+ ERRMSG("Can't get vmalloc_start.\n");
+ return FALSE;
+ }
+ info->vmalloc_start = vmalloc_start;
+ DEBUG_MSG("vmalloc_start: %lx\n", vmalloc_start);
+
+ return TRUE;
+}
+
+int
+is_vmalloc_addr_ppc64(unsigned long vaddr)
+{
+ return (info->vmalloc_start && vaddr >= info->vmalloc_start);
+}
+
+unsigned long long
+vaddr_to_paddr_ppc64(unsigned long vaddr)
+{
+ unsigned long long paddr;
+
+ paddr = vaddr_to_paddr_general(vaddr);
+ if (paddr != NOT_PADDR)
+ return paddr;
+
+ if ((SYMBOL(vmlist) == NOT_FOUND_SYMBOL)
+ || (OFFSET(vm_struct.addr) == NOT_FOUND_STRUCTURE)) {
+ ERRMSG("Can't get necessary information for vmalloc translation.\n");
+ return NOT_PADDR;
+ }
+ if (!is_vmalloc_addr_ppc64(vaddr))
+ return (vaddr - info->kernel_start);
+
+ /*
+ * TODO: Support vmalloc translation.
+ */
+ ERRMSG("This makedumpfile does not support vmalloc translation.\n");
+ return NOT_PADDR;
+}
+
+#endif /* powerpc */
diff --git a/arch/s390x.c b/arch/s390x.c
new file mode 100644
index 0000000..9dd613f
--- /dev/null
+++ b/arch/s390x.c
@@ -0,0 +1,279 @@
+/*
+ * s390x.c
+ *
+ * Created by: Michael Holzheu (holzheu@de.ibm.com)
+ * Copyright IBM Corp. 2010
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation (version 2 of the License).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef __s390x__
+
+#include "../print_info.h"
+#include "../makedumpfile.h"
+
+#define TABLE_SIZE 4096
+
+/*
+ * Bits in the virtual address
+ *
+ * |<----- RX ---------->|
+ * | RFX | RSX | RTX | SX | PX | BX |
+ * 0 11 22 33 44 52 63
+ *
+ * RX: Region Index
+ * RFX: Region first index
+ * RSX: Region second index
+ * RTX: Region third index
+ * SX: Segment index
+ * PX: Page index
+ * BX: Byte index
+ *
+ * RX part of vaddr is divided into three fields RFX, RSX and RTX each of
+ * 11 bit in size
+ */
+#define _REGION_INDEX_SHIFT 11
+#define _PAGE_INDEX_MASK 0xff000UL /* page index (PX) mask */
+#define _BYTE_INDEX_MASK 0x00fffUL /* Byte index (BX) mask */
+#define _PAGE_BYTE_INDEX_MASK (_PAGE_INDEX_MASK | _BYTE_INDEX_MASK)
+
+/* Region/segment table index */
+#define rsg_index(x, y) \
+ (((x) >> ((_REGION_INDEX_SHIFT * y) + _SEGMENT_INDEX_SHIFT)) \
+ & _REGION_OFFSET_MASK)
+/* Page table index */
+#define pte_index(x) (((x) >> _PAGE_INDEX_SHIFT) & _PAGE_OFFSET_MASK)
+
+#define rsg_offset(x, y) (rsg_index( x, y) * sizeof(unsigned long))
+#define pte_offset(x) (pte_index(x) * sizeof(unsigned long))
+
+int
+get_machdep_info_s390x(void)
+{
+ unsigned long vmlist, vmalloc_start;
+
+ info->section_size_bits = _SECTION_SIZE_BITS;
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS;
+ info->page_offset = __PAGE_OFFSET;
+
+ if (SYMBOL(_stext) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of _stext.\n");
+ return FALSE;
+ }
+ info->kernel_start = SYMBOL(_stext);
+ DEBUG_MSG("kernel_start : %lx\n", info->kernel_start);
+
+ /*
+ * For the compatibility, makedumpfile should run without the symbol
+ * vmlist and the offset of vm_struct.addr if they are not necessary.
+ */
+ if ((SYMBOL(vmlist) == NOT_FOUND_SYMBOL)
+ || (OFFSET(vm_struct.addr) == NOT_FOUND_STRUCTURE)) {
+ return TRUE;
+ }
+ if (!readmem(VADDR, SYMBOL(vmlist), &vmlist, sizeof(vmlist))) {
+ ERRMSG("Can't get vmlist.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR, vmlist + OFFSET(vm_struct.addr), &vmalloc_start,
+ sizeof(vmalloc_start))) {
+ ERRMSG("Can't get vmalloc_start.\n");
+ return FALSE;
+ }
+ info->vmalloc_start = vmalloc_start;
+ DEBUG_MSG("vmalloc_start: %lx\n", vmalloc_start);
+
+ return TRUE;
+}
+
+static int
+is_vmalloc_addr_s390x(unsigned long vaddr)
+{
+ return (info->vmalloc_start && vaddr >= info->vmalloc_start);
+}
+
+static int
+rsg_table_entry_bad(unsigned long entry, int level)
+{
+ unsigned long mask = ~_REGION_ENTRY_INVALID
+ & ~_REGION_ENTRY_TYPE_MASK
+ & ~_REGION_ENTRY_LENGTH;
+
+ if (level)
+ mask &= ~_REGION_ENTRY_ORIGIN;
+ else
+ mask &= ~_SEGMENT_ENTRY_ORIGIN;
+
+ return (entry & mask) != 0;
+}
+
+/* Region or segment table traversal function */
+static unsigned long
+_kl_rsg_table_deref_s390x(unsigned long vaddr, unsigned long table,
+ int len, int level)
+{
+ unsigned long offset, entry;
+
+ offset = rsg_offset(vaddr, level);
+
+ /* check if offset is over the table limit. */
+ if (offset >= ((len + 1) * TABLE_SIZE)) {
+ ERRMSG("offset is over the table limit.\n");
+ return 0;
+ }
+
+ if (!readmem(VADDR, table + offset, &entry, sizeof(entry))) {
+ if (level)
+ ERRMSG("Can't read region table %d entry\n", level);
+ else
+ ERRMSG("Can't read segment table entry\n");
+ return 0;
+ }
+ /*
+ * Check if the segment table entry could be read and doesn't have
+ * any of the reserved bits set.
+ */
+ if (rsg_table_entry_bad(entry, level)) {
+ ERRMSG("Bad region/segment table entry.\n");
+ return 0;
+ }
+ /*
+ * Check if the region/segment table entry is with valid
+ * level and not invalid.
+ */
+ if ((RSG_TABLE_LEVEL(entry) != level)
+ && (entry & _REGION_ENTRY_INVALID)) {
+ ERRMSG("Invalid region/segment table level or entry.\n");
+ return 0;
+ }
+
+ return entry;
+}
+
+/* Page table traversal function */
+static ulong _kl_pg_table_deref_s390x(unsigned long vaddr, unsigned long table)
+{
+ unsigned long offset, entry;
+
+ offset = pte_offset(vaddr);
+ readmem(VADDR, table + offset, &entry, sizeof(entry));
+ /*
+ * Check if the page table entry could be read and doesn't have
+ * any of the reserved bits set.
+ * Check if the page table entry has the invalid bit set.
+ */
+ if (entry & (_PAGE_CO | _PAGE_ZERO | _PAGE_INVALID)) {
+ ERRMSG("Invalid page table entry.\n");
+ return 0;
+ }
+
+ return entry;
+}
+
+/* vtop_s390x() - translate virtual address to physical
+ * @vaddr: virtual address to translate
+ *
+ * Function converts the @vaddr into physical address using page tables.
+ *
+ * Return:
+ * Physical address or NOT_PADDR if translation fails.
+ */
+static unsigned long long
+vtop_s390x(unsigned long vaddr)
+{
+ unsigned long long paddr = NOT_PADDR;
+ unsigned long table, entry;
+ int level, len;
+
+ if (SYMBOL(swapper_pg_dir) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of swapper_pg_dir.\n");
+ return NOT_PADDR;
+ }
+ table = SYMBOL(swapper_pg_dir);
+
+ /* Read the first entry to find the number of page table levels. */
+ readmem(VADDR, table, &entry, sizeof(entry));
+ level = TABLE_LEVEL(entry);
+ len = TABLE_LENGTH(entry);
+
+ if ((vaddr >> (_SEGMENT_PAGE_SHIFT + (_REGION_INDEX_SHIFT * level)))) {
+ ERRMSG("Address too big for the number of page table " \
+ "levels.\n");
+ return NOT_PADDR;
+ }
+
+ /*
+ * Walk the region and segment tables.
+ */
+ while (level >= 0) {
+ entry = _kl_rsg_table_deref_s390x(vaddr, table, len, level);
+ if (!entry) {
+ return NOT_PADDR;
+ }
+ table = entry & _REGION_ENTRY_ORIGIN;
+ len = RSG_TABLE_LENGTH(entry);
+ level--;
+ }
+
+ /*
+ * Check if this is a large page.
+ * if yes, then add the 1MB page offset (PX + BX) and return the value.
+ * if no, then get the page table entry using PX index.
+ */
+ if (entry & _SEGMENT_ENTRY_LARGE) {
+ paddr = table + (vaddr & _PAGE_BYTE_INDEX_MASK);
+ } else {
+ entry = _kl_pg_table_deref_s390x(vaddr,
+ entry & _SEGMENT_ENTRY_ORIGIN);
+ if (!entry)
+ return NOT_PADDR;
+
+ /*
+ * Isolate the page origin from the page table entry.
+ * Add the page offset (BX).
+ */
+ paddr = (entry & _REGION_ENTRY_ORIGIN)
+ + (vaddr & _BYTE_INDEX_MASK);
+ }
+
+ return paddr;
+}
+
+unsigned long long
+vaddr_to_paddr_s390x(unsigned long vaddr)
+{
+ unsigned long long paddr;
+
+ paddr = vaddr_to_paddr_general(vaddr);
+ if (paddr != NOT_PADDR)
+ return paddr;
+
+ if ((SYMBOL(vmlist) == NOT_FOUND_SYMBOL)
+ || (OFFSET(vm_struct.addr) == NOT_FOUND_STRUCTURE)) {
+ ERRMSG("Can't get necessary information for vmalloc "
+ "translation.\n");
+ return NOT_PADDR;
+ }
+
+ if (is_vmalloc_addr_s390x(vaddr)) {
+ paddr = vtop_s390x(vaddr);
+ }
+ else {
+ paddr = vaddr - KVBASE;
+ }
+
+ return paddr;
+}
+
+#endif /* __s390x__ */
diff --git a/arch/x86.c b/arch/x86.c
new file mode 100644
index 0000000..f0cd172
--- /dev/null
+++ b/arch/x86.c
@@ -0,0 +1,279 @@
+/*
+ * x86.c
+ *
+ * Copyright (C) 2006, 2007, 2008 NEC Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifdef __x86__
+
+#include "../print_info.h"
+#include "../makedumpfile.h"
+
+int
+get_machdep_info_x86(void)
+{
+ unsigned long vmlist, vmalloc_start;
+
+ /* PAE */
+ if ((vt.mem_flags & MEMORY_X86_PAE)
+ || ((SYMBOL(pkmap_count) != NOT_FOUND_SYMBOL)
+ && (SYMBOL(pkmap_count_next) != NOT_FOUND_SYMBOL)
+ && ((SYMBOL(pkmap_count_next)-SYMBOL(pkmap_count))/sizeof(int))
+ == 512)) {
+ DEBUG_MSG("\n");
+ DEBUG_MSG("PAE : ON\n");
+ vt.mem_flags |= MEMORY_X86_PAE;
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS_PAE;
+ } else {
+ DEBUG_MSG("\n");
+ DEBUG_MSG("PAE : OFF\n");
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS;
+ }
+ info->page_offset = __PAGE_OFFSET;
+
+ if (SYMBOL(_stext) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of _stext.\n");
+ return FALSE;
+ }
+ info->kernel_start = SYMBOL(_stext) & ~KVBASE_MASK;
+ DEBUG_MSG("kernel_start : %lx\n", info->kernel_start);
+
+ /*
+ * For the compatibility, makedumpfile should run without the symbol
+ * vmlist and the offset of vm_struct.addr if they are not necessary.
+ */
+ if ((SYMBOL(vmlist) == NOT_FOUND_SYMBOL)
+ || (OFFSET(vm_struct.addr) == NOT_FOUND_STRUCTURE)) {
+ return TRUE;
+ }
+ if (!readmem(VADDR, SYMBOL(vmlist), &vmlist, sizeof(vmlist))) {
+ ERRMSG("Can't get vmlist.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR, vmlist + OFFSET(vm_struct.addr), &vmalloc_start,
+ sizeof(vmalloc_start))) {
+ ERRMSG("Can't get vmalloc_start.\n");
+ return FALSE;
+ }
+ info->vmalloc_start = vmalloc_start;
+ DEBUG_MSG("vmalloc_start: %lx\n", vmalloc_start);
+
+ return TRUE;
+}
+
+int
+get_versiondep_info_x86(void)
+{
+ /*
+ * SECTION_SIZE_BITS of PAE has been changed to 29 from 30 since
+ * linux-2.6.26.
+ */
+ if (vt.mem_flags & MEMORY_X86_PAE) {
+ if (info->kernel_version < KERNEL_VERSION(2, 6, 26))
+ info->section_size_bits = _SECTION_SIZE_BITS_PAE_ORIG;
+ else
+ info->section_size_bits = _SECTION_SIZE_BITS_PAE_2_6_26;
+ } else
+ info->section_size_bits = _SECTION_SIZE_BITS;
+
+ return TRUE;
+}
+
+unsigned long long
+vtop_x86_PAE(unsigned long vaddr)
+{
+ unsigned long long page_dir, pgd_pte, pmd_paddr, pmd_pte;
+ unsigned long long pte_paddr, pte;
+
+ if (SYMBOL(swapper_pg_dir) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of swapper_pg_dir.\n");
+ return NOT_PADDR;
+ }
+
+ page_dir = SYMBOL(swapper_pg_dir);
+ page_dir += pgd_index_PAE(vaddr) * sizeof(unsigned long long);
+ if (!readmem(VADDR, page_dir, &pgd_pte, sizeof(pgd_pte))) {
+ ERRMSG("Can't get pgd_pte (page_dir:%llx).\n", page_dir);
+ return NOT_PADDR;
+ }
+ if (!(pgd_pte & _PAGE_PRESENT))
+ return NOT_PADDR;
+
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PGD : %16llx => %16llx\n", page_dir, pgd_pte);
+
+ pmd_paddr = pgd_pte & ENTRY_MASK;
+ pmd_paddr += pmd_index(vaddr) * sizeof(unsigned long long);
+ if (!readmem(PADDR, pmd_paddr, &pmd_pte, sizeof(pmd_pte))) {
+ ERRMSG("Can't get pmd_pte (pmd_paddr:%llx).\n", pmd_paddr);
+ return NOT_PADDR;
+ }
+ if (!(pmd_pte & _PAGE_PRESENT))
+ return NOT_PADDR;
+
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PMD : %16llx => %16llx\n", pmd_paddr, pmd_pte);
+
+ if (pmd_pte & _PAGE_PSE)
+ return (pmd_pte & ENTRY_MASK) + (vaddr & ((1UL << PMD_SHIFT) - 1));
+
+ pte_paddr = pmd_pte & ENTRY_MASK;
+ pte_paddr += pte_index(vaddr) * sizeof(unsigned long long);
+ if (!readmem(PADDR, pte_paddr, &pte, sizeof(pte)))
+ return NOT_PADDR;
+
+ if (!(pte & _PAGE_PRESENT))
+ return NOT_PADDR;
+
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PTE : %16llx => %16llx\n", pte_paddr, pte);
+
+ return (pte & ENTRY_MASK) + (vaddr & ((1UL << PTE_SHIFT) - 1));
+}
+
+int
+is_vmalloc_addr_x86(unsigned long vaddr)
+{
+ return (info->vmalloc_start && vaddr >= info->vmalloc_start);
+}
+
+unsigned long long
+vaddr_to_paddr_x86(unsigned long vaddr)
+{
+ unsigned long long paddr;
+
+ if ((paddr = vaddr_to_paddr_general(vaddr)) != NOT_PADDR)
+ return paddr;
+
+ if ((SYMBOL(vmlist) == NOT_FOUND_SYMBOL)
+ || (OFFSET(vm_struct.addr) == NOT_FOUND_STRUCTURE)) {
+ ERRMSG("Can't get necessary information for vmalloc translation.\n");
+ return NOT_PADDR;
+ }
+ if (!is_vmalloc_addr_x86(vaddr))
+ return (vaddr - info->kernel_start);
+
+ if (vt.mem_flags & MEMORY_X86_PAE) {
+ paddr = vtop_x86_PAE(vaddr);
+ } else {
+ /*
+ * TODO: Support vmalloc translation of not-PAE kernel.
+ */
+ ERRMSG("This makedumpfile does not support vmalloc translation of not-PAE kernel.\n");
+ return NOT_PADDR;
+ }
+
+ return paddr;
+}
+
+/*
+ * for Xen extraction
+ */
+unsigned long long
+kvtop_xen_x86(unsigned long kvaddr)
+{
+ unsigned long long dirp, entry;
+
+ if (!is_xen_vaddr(kvaddr))
+ return NOT_PADDR;
+
+ if (is_direct(kvaddr))
+ return (unsigned long)kvaddr - DIRECTMAP_VIRT_START;
+
+ if ((dirp = kvtop_xen_x86(SYMBOL(pgd_l3))) == NOT_PADDR)
+ return NOT_PADDR;
+ dirp += pgd_index_PAE(kvaddr) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ if (!(entry & _PAGE_PRESENT))
+ return NOT_PADDR;
+
+ dirp = entry & ENTRY_MASK;
+ dirp += pmd_index(kvaddr) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ if (!(entry & _PAGE_PRESENT))
+ return NOT_PADDR;
+
+ if (entry & _PAGE_PSE) {
+ entry = (entry & ENTRY_MASK) + (kvaddr & ((1UL << PMD_SHIFT) - 1));
+ return entry;
+ }
+
+ dirp = entry & ENTRY_MASK;
+ dirp += pte_index(kvaddr) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ if (!(entry & _PAGE_PRESENT)) {
+ return NOT_PADDR;
+ }
+
+ entry = (entry & ENTRY_MASK) + (kvaddr & ((1UL << PTE_SHIFT) - 1));
+
+ return entry;
+}
+
+int get_xen_info_x86(void)
+{
+ unsigned long frame_table_vaddr;
+ unsigned long xen_end;
+ int i;
+
+ if (SYMBOL(pgd_l2) == NOT_FOUND_SYMBOL &&
+ SYMBOL(pgd_l3) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get pgd.\n");
+ return FALSE;
+ }
+
+ if (SYMBOL(pgd_l3) == NOT_FOUND_SYMBOL) {
+ ERRMSG("non-PAE not support right now.\n");
+ return FALSE;
+ }
+
+ if (SYMBOL(frame_table) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of frame_table.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR_XEN, SYMBOL(frame_table), &frame_table_vaddr,
+ sizeof(frame_table_vaddr))) {
+ ERRMSG("Can't get the value of frame_table.\n");
+ return FALSE;
+ }
+ info->frame_table_vaddr = frame_table_vaddr;
+
+ if (SYMBOL(xenheap_phys_end) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of xenheap_phys_end.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR_XEN, SYMBOL(xenheap_phys_end), &xen_end,
+ sizeof(xen_end))) {
+ ERRMSG("Can't get the value of xenheap_phys_end.\n");
+ return FALSE;
+ }
+ info->xen_heap_start = 0;
+ info->xen_heap_end = paddr_to_pfn(xen_end);
+
+ /*
+ * pickled_id == domain addr for x86
+ */
+ for (i = 0; i < info->num_domain; i++) {
+ info->domain_list[i].pickled_id =
+ info->domain_list[i].domain_addr;
+ }
+
+ return TRUE;
+}
+#endif /* x86 */
+
diff --git a/arch/x86_64.c b/arch/x86_64.c
new file mode 100644
index 0000000..5e33ab2
--- /dev/null
+++ b/arch/x86_64.c
@@ -0,0 +1,386 @@
+/*
+ * x86_64.c
+ *
+ * Copyright (C) 2006, 2007, 2008 NEC Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifdef __x86_64__
+
+#include "../print_info.h"
+#include "../makedumpfile.h"
+
+int
+is_vmalloc_addr(ulong vaddr)
+{
+ /*
+ * vmalloc, virtual memmap, and module space as VMALLOC space.
+ */
+ return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END)
+ || (vaddr >= VMEMMAP_START && vaddr <= VMEMMAP_END)
+ || (vaddr >= MODULES_VADDR && vaddr <= MODULES_END));
+}
+
+int
+get_phys_base_x86_64(void)
+{
+ int i;
+ struct pt_load_segment *pls;
+
+ /*
+ * Get the relocatable offset
+ */
+ info->phys_base = 0; /* default/traditional */
+
+ for (i = 0; i < info->num_load_memory; i++) {
+ pls = &info->pt_load_segments[i];
+ if ((pls->virt_start >= __START_KERNEL_map) &&
+ !(is_vmalloc_addr(pls->virt_start))) {
+
+ info->phys_base = pls->phys_start -
+ (pls->virt_start & ~(__START_KERNEL_map));
+
+ break;
+ }
+ }
+
+ return TRUE;
+}
+
+int
+get_machdep_info_x86_64(void)
+{
+ int i, j, mfns[MAX_X86_64_FRAMES];
+ unsigned long frame_mfn[MAX_X86_64_FRAMES];
+ unsigned long buf[MFNS_PER_FRAME];
+
+ info->section_size_bits = _SECTION_SIZE_BITS;
+
+ if (!(vt.mem_flags & MEMORY_XEN))
+ return TRUE;
+
+ /*
+ * Get the information for translating domain-0's physical
+ * address into machine address.
+ */
+ if (!readmem(MADDR_XEN, pfn_to_paddr(info->p2m_mfn),
+ &frame_mfn, PAGESIZE())) {
+ ERRMSG("Can't get p2m_mfn.\n");
+ return FALSE;
+ }
+
+ /*
+ * Count the number of p2m frame.
+ */
+ for (i = 0; i < MAX_X86_64_FRAMES; i++) {
+ mfns[i] = 0;
+ if (!frame_mfn[i])
+ break;
+
+ if (!readmem(MADDR_XEN, pfn_to_paddr(frame_mfn[i]), &buf,
+ PAGESIZE())) {
+ ERRMSG("Can't get frame_mfn[%d].\n", i);
+ return FALSE;
+ }
+ for (j = 0; j < MFNS_PER_FRAME; j++) {
+ if (!buf[j])
+ break;
+
+ mfns[i]++;
+ }
+ info->p2m_frames += mfns[i];
+ }
+ info->p2m_mfn_frame_list
+ = malloc(sizeof(unsigned long) * info->p2m_frames);
+ if (info->p2m_mfn_frame_list == NULL) {
+ ERRMSG("Can't allocate memory for p2m_mfn_frame_list. %s\n",
+ strerror(errno));
+ return FALSE;
+ }
+
+ /*
+ * Get p2m_mfn_frame_list.
+ */
+ for (i = 0; i < MAX_X86_64_FRAMES; i++) {
+ if (!frame_mfn[i])
+ break;
+
+ if (!readmem(MADDR_XEN, pfn_to_paddr(frame_mfn[i]),
+ &info->p2m_mfn_frame_list[i * MFNS_PER_FRAME],
+ mfns[i] * sizeof(unsigned long))) {
+ ERRMSG("Can't get p2m_mfn_frame_list.\n");
+ return FALSE;
+ }
+ if (mfns[i] != MFNS_PER_FRAME)
+ break;
+ }
+ return TRUE;
+}
+
+int
+get_versiondep_info_x86_64(void)
+{
+ /*
+ * On linux-2.6.26, MAX_PHYSMEM_BITS is changed to 44 from 40.
+ */
+ if (info->kernel_version < KERNEL_VERSION(2, 6, 26))
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS_ORIG;
+ else if (info->kernel_version < KERNEL_VERSION(2, 6, 31))
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_26;
+ else
+ info->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_31;
+
+ if (info->kernel_version < KERNEL_VERSION(2, 6, 27))
+ info->page_offset = __PAGE_OFFSET_ORIG;
+ else
+ info->page_offset = __PAGE_OFFSET_2_6_27;
+
+ if (info->kernel_version < KERNEL_VERSION(2, 6, 31)) {
+ info->vmalloc_start = VMALLOC_START_ORIG;
+ info->vmalloc_end = VMALLOC_END_ORIG;
+ info->vmemmap_start = VMEMMAP_START_ORIG;
+ info->vmemmap_end = VMEMMAP_END_ORIG;
+ } else {
+ info->vmalloc_start = VMALLOC_START_2_6_31;
+ info->vmalloc_end = VMALLOC_END_2_6_31;
+ info->vmemmap_start = VMEMMAP_START_2_6_31;
+ info->vmemmap_end = VMEMMAP_END_2_6_31;
+ }
+
+ return TRUE;
+}
+
+/*
+ * Translate a virtual address to a physical address by using 4 levels paging.
+ */
+unsigned long long
+vtop4_x86_64(unsigned long vaddr)
+{
+ unsigned long page_dir, pml4, pgd_paddr, pgd_pte, pmd_paddr, pmd_pte;
+ unsigned long pte_paddr, pte;
+
+ if (SYMBOL(init_level4_pgt) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of init_level4_pgt.\n");
+ return NOT_PADDR;
+ }
+
+ /*
+ * Get PGD.
+ */
+ page_dir = SYMBOL(init_level4_pgt);
+ page_dir += pml4_index(vaddr) * sizeof(unsigned long);
+ if (!readmem(VADDR, page_dir, &pml4, sizeof pml4)) {
+ ERRMSG("Can't get pml4 (page_dir:%lx).\n", page_dir);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PGD : %16lx => %16lx\n", page_dir, pml4);
+
+ if (!(pml4 & _PAGE_PRESENT)) {
+ ERRMSG("Can't get a valid pml4.\n");
+ return NOT_PADDR;
+ }
+
+ /*
+ * Get PUD.
+ */
+ pgd_paddr = pml4 & PHYSICAL_PAGE_MASK;
+ pgd_paddr += pgd_index(vaddr) * sizeof(unsigned long);
+ if (!readmem(PADDR, pgd_paddr, &pgd_pte, sizeof pgd_pte)) {
+ ERRMSG("Can't get pgd_pte (pgd_paddr:%lx).\n", pgd_paddr);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PUD : %16lx => %16lx\n", pgd_paddr, pgd_pte);
+
+ if (!(pgd_pte & _PAGE_PRESENT)) {
+ ERRMSG("Can't get a valid pgd_pte.\n");
+ return NOT_PADDR;
+ }
+
+ /*
+ * Get PMD.
+ */
+ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK;
+ pmd_paddr += pmd_index(vaddr) * sizeof(unsigned long);
+ if (!readmem(PADDR, pmd_paddr, &pmd_pte, sizeof pmd_pte)) {
+ ERRMSG("Can't get pmd_pte (pmd_paddr:%lx).\n", pmd_paddr);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PMD : %16lx => %16lx\n", pmd_paddr, pmd_pte);
+
+ if (!(pmd_pte & _PAGE_PRESENT)) {
+ ERRMSG("Can't get a valid pmd_pte.\n");
+ return NOT_PADDR;
+ }
+ if (pmd_pte & _PAGE_PSE)
+ return (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK)
+ + (vaddr & ~_2MB_PAGE_MASK);
+
+ /*
+ * Get PTE.
+ */
+ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK;
+ pte_paddr += pte_index(vaddr) * sizeof(unsigned long);
+ if (!readmem(PADDR, pte_paddr, &pte, sizeof pte)) {
+ ERRMSG("Can't get pte (pte_paddr:%lx).\n", pte_paddr);
+ return NOT_PADDR;
+ }
+ if (info->vaddr_for_vtop == vaddr)
+ MSG(" PTE : %16lx => %16lx\n", pte_paddr, pte);
+
+ if (!(pte & _PAGE_PRESENT)) {
+ ERRMSG("Can't get a valid pte.\n");
+ return NOT_PADDR;
+ }
+ return (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(vaddr);
+}
+
+unsigned long long
+vaddr_to_paddr_x86_64(unsigned long vaddr)
+{
+ unsigned long phys_base;
+ unsigned long long paddr;
+
+ /*
+ * Check the relocatable kernel.
+ */
+ if (SYMBOL(phys_base) != NOT_FOUND_SYMBOL)
+ phys_base = info->phys_base;
+ else
+ phys_base = 0;
+
+ if (is_vmalloc_addr(vaddr)) {
+ if ((paddr = vtop4_x86_64(vaddr)) == NOT_PADDR) {
+ ERRMSG("Can't convert a virtual address(%lx) to " \
+ "physical address.\n", vaddr);
+ return NOT_PADDR;
+ }
+ } else if (vaddr >= __START_KERNEL_map) {
+ paddr = vaddr - __START_KERNEL_map + phys_base;
+
+ } else {
+ if (vt.mem_flags & MEMORY_XEN)
+ paddr = vaddr - PAGE_OFFSET_XEN_DOM0;
+ else
+ paddr = vaddr - PAGE_OFFSET;
+ }
+ return paddr;
+}
+
+/*
+ * for Xen extraction
+ */
+unsigned long long
+kvtop_xen_x86_64(unsigned long kvaddr)
+{
+ unsigned long long dirp, entry;
+
+ if (!is_xen_vaddr(kvaddr))
+ return NOT_PADDR;
+
+ if (is_xen_text(kvaddr))
+ return (unsigned long)kvaddr - XEN_VIRT_START + info->xen_phys_start;
+
+ if (is_direct(kvaddr))
+ return (unsigned long)kvaddr - DIRECTMAP_VIRT_START;
+
+ if ((dirp = kvtop_xen_x86_64(SYMBOL(pgd_l4))) == NOT_PADDR)
+ return NOT_PADDR;
+ dirp += pml4_index(kvaddr) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ if (!(entry & _PAGE_PRESENT))
+ return NOT_PADDR;
+
+ dirp = entry & ENTRY_MASK;
+ dirp += pgd_index(kvaddr) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ if (!(entry & _PAGE_PRESENT))
+ return NOT_PADDR;
+
+ dirp = entry & ENTRY_MASK;
+ dirp += pmd_index(kvaddr) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ if (!(entry & _PAGE_PRESENT))
+ return NOT_PADDR;
+
+ if (entry & _PAGE_PSE) {
+ entry = (entry & ENTRY_MASK) + (kvaddr & ((1UL << PMD_SHIFT) - 1));
+ return entry;
+ }
+ dirp = entry & ENTRY_MASK;
+ dirp += pte_index(kvaddr) * sizeof(unsigned long long);
+ if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
+ return NOT_PADDR;
+
+ if (!(entry & _PAGE_PRESENT)) {
+ return NOT_PADDR;
+ }
+
+ entry = (entry & ENTRY_MASK) + (kvaddr & ((1UL << PTE_SHIFT) - 1));
+
+ return entry;
+}
+
+int get_xen_info_x86_64(void)
+{
+ unsigned long frame_table_vaddr;
+ unsigned long xen_end;
+ int i;
+
+ if (SYMBOL(pgd_l4) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get pml4.\n");
+ return FALSE;
+ }
+
+ if (SYMBOL(frame_table) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of frame_table.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR_XEN, SYMBOL(frame_table), &frame_table_vaddr,
+ sizeof(frame_table_vaddr))) {
+ ERRMSG("Can't get the value of frame_table.\n");
+ return FALSE;
+ }
+ info->frame_table_vaddr = frame_table_vaddr;
+
+ if (SYMBOL(xenheap_phys_end) == NOT_FOUND_SYMBOL) {
+ ERRMSG("Can't get the symbol of xenheap_phys_end.\n");
+ return FALSE;
+ }
+ if (!readmem(VADDR_XEN, SYMBOL(xenheap_phys_end), &xen_end,
+ sizeof(xen_end))) {
+ ERRMSG("Can't get the value of xenheap_phys_end.\n");
+ return FALSE;
+ }
+ info->xen_heap_start = 0;
+ info->xen_heap_end = paddr_to_pfn(xen_end);
+
+ /*
+ * pickled_id == domain addr for x86_64
+ */
+ for (i = 0; i < info->num_domain; i++) {
+ info->domain_list[i].pickled_id =
+ info->domain_list[i].domain_addr;
+ }
+
+ return TRUE;
+}
+
+#endif /* x86_64 */
+