summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLouis Bouchard <louis.bouchard@canonical.com>2017-01-05 10:55:10 +0100
committerLouis Bouchard <louis.bouchard@canonical.com>2017-01-05 10:55:10 +0100
commitaba8155bbba9f6b0019c9cfdf7204c7b06bf34aa (patch)
treeb0b387e44c27effce8154ad8bc0d1b964b261a67
parent7f3a6f30a79d2a27269111624bfb06f9bc50e48f (diff)
parent0820a55bf9a0d1f6769398b686a328e5979542b5 (diff)
Merge remote-tracking branch 'upstream/master' into debian/1.6.1-1
-rw-r--r--Makefile4
-rw-r--r--README3
-rw-r--r--arch/arm64.c245
-rw-r--r--arch/ppc64.c231
-rw-r--r--arch/x86_64.c90
-rw-r--r--dwarf_info.c6
-rw-r--r--makedumpfile.84
-rw-r--r--makedumpfile.c204
-rw-r--r--makedumpfile.conf.52
-rw-r--r--makedumpfile.h155
-rw-r--r--makedumpfile.spec2
-rw-r--r--print_info.c2
-rw-r--r--sadump_info.c40
13 files changed, 631 insertions, 357 deletions
diff --git a/Makefile b/Makefile
index 7dde3e4..8b0fd24 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# makedumpfile
-VERSION=1.6.0
-DATE=9 Jun 2016
+VERSION=1.6.1
+DATE=27 Dec 2016
# Honour the environment variable CC
ifeq ($(strip $CC),)
diff --git a/README b/README
index 39fba15..820912d 100644
--- a/README
+++ b/README
@@ -113,6 +113,9 @@
4.3 | OK | ** | | | | ** | | -- | OK | OK | | |
4.4 | OK | ** | | | | ** | | -- | OK | OK | | |
4.5 | OK | ** | | | | ** | | -- | OK | OK | | |
+ 4.6 | OK | ** | | | | ** | | -- | OK | OK | | |
+ 4.7 | OK | ** | | | | ** | | -- | OK | OK | | |
+ 4.8 | OK | ** | | | | ** | | -- | OK | OK | | |
OK : Support.
-- : Not support.
diff --git a/arch/arm64.c b/arch/arm64.c
index f754026..958f57f 100644
--- a/arch/arm64.c
+++ b/arch/arm64.c
@@ -35,100 +35,85 @@ typedef struct {
pud_t pud;
} pmd_t;
-#define pud_offset(pgd, vaddr) ((pud_t *)pgd)
+typedef struct {
+ unsigned long pte;
+} pte_t;
+
+static int pgtable_level;
+static int va_bits;
+static unsigned long kimage_voffset;
+
+#define SZ_4K (4 * 1024)
+#define SZ_16K (16 * 1024)
+#define SZ_64K (64 * 1024)
+#define SZ_128M (128 * 1024 * 1024)
#define pgd_val(x) ((x).pgd)
#define pud_val(x) (pgd_val((x).pgd))
#define pmd_val(x) (pud_val((x).pud))
-
-#define PUD_SHIFT PGDIR_SHIFT
-#define PUD_SIZE (1UL << PUD_SHIFT)
-
-typedef struct {
- unsigned long pte;
-} pte_t;
#define pte_val(x) ((x).pte)
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
-#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * ARM64_PGTABLE_LEVELS + 3)
-#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
-#define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
-#define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
-#define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
+#define PAGE_MASK (~(PAGESIZE() - 1))
+#define PGDIR_SHIFT ((PAGESHIFT() - 3) * pgtable_level + 3)
+#define PTRS_PER_PGD (1 << (va_bits - PGDIR_SHIFT))
+#define PUD_SHIFT get_pud_shift_arm64()
+#define PTRS_PER_PTE (1 << (PAGESHIFT() - 3))
+#define PTRS_PER_PUD PTRS_PER_PTE
+#define PMD_SHIFT ((PAGESHIFT() - 3) * 2 + 3)
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
#define PTRS_PER_PMD PTRS_PER_PTE
#define PAGE_PRESENT (1 << 0)
#define SECTIONS_SIZE_BITS 30
-/*
-
-* Highest possible physical address supported.
-*/
+/* Highest possible physical address supported */
#define PHYS_MASK_SHIFT 48
#define PHYS_MASK ((1UL << PHYS_MASK_SHIFT) - 1)
/*
* Remove the highest order bits that are not a part of the
* physical address in a section
*/
-#define PMD_SECTION_MASK ((1UL << 40) - 1)
+#define PMD_SECTION_MASK ((1UL << 40) - 1)
#define PMD_TYPE_MASK 3
#define PMD_TYPE_SECT 1
#define PMD_TYPE_TABLE 3
-#define __va(paddr) ((paddr) - info->phys_base + PAGE_OFFSET)
-#define __pa(vaddr) ((vaddr) - PAGE_OFFSET + info->phys_base)
-
#define pgd_index(vaddr) (((vaddr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(pgdir, vaddr) ((pgd_t *)(pgdir) + pgd_index(vaddr))
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pmd_page_vaddr(pmd) (__va(pmd_val(pmd) & PHYS_MASK & (int32_t)PAGE_MASK))
-#define pte_offset(dir, vaddr) ((pte_t*)pmd_page_vaddr((*dir)) + pte_index(vaddr))
-
-
-#define pmd_offset_pgtbl_lvl_2(pud, vaddr) ((pmd_t *)pud)
+#define pte_index(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1))
+#define pmd_page_paddr(pmd) (pmd_val(pmd) & PHYS_MASK & (int32_t)PAGE_MASK)
+#define pte_offset(dir, vaddr) ((pte_t*)pmd_page_paddr((*dir)) + pte_index(vaddr))
#define pmd_index(vaddr) (((vaddr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pud_page_vaddr(pud) (__va(pud_val(pud) & PHYS_MASK & (int32_t)PAGE_MASK))
-#define pmd_offset_pgtbl_lvl_3(pud, vaddr) ((pmd_t *)pud_page_vaddr((*pud)) + pmd_index(vaddr))
-
-/* kernel struct page size can be kernel version dependent, currently
- * keep it constant.
- */
-#define KERN_STRUCT_PAGE_SIZE get_structure_size("page", DWARF_INFO_GET_STRUCT_SIZE)
-
-#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
-#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
-#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * KERN_STRUCT_PAGE_SIZE, PUD_SIZE)
-#define MODULES_END PAGE_OFFSET
-#define MODULES_VADDR (MODULES_END - 0x4000000)
-
-static int pgtable_level;
-static int va_bits;
-static int page_shift;
+#define pud_page_paddr(pud) (pud_val(pud) & PHYS_MASK & (int32_t)PAGE_MASK)
+#define pmd_offset_pgtbl_lvl_2(pud, vaddr) ((pmd_t *)pud)
+#define pmd_offset_pgtbl_lvl_3(pud, vaddr) ((pmd_t *)pud_page_paddr((*pud)) + pmd_index(vaddr))
-int
-get_pgtable_level_arm64(void)
-{
- return pgtable_level;
-}
+#define pud_index(vaddr) (((vaddr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+#define pgd_page_paddr(pgd) (pgd_val(pgd) & PHYS_MASK & (int32_t)PAGE_MASK)
-int
-get_va_bits_arm64(void)
+static unsigned long long
+__pa(unsigned long vaddr)
{
- return va_bits;
+ if (kimage_voffset == NOT_FOUND_NUMBER ||
+ (vaddr >= PAGE_OFFSET))
+ return (vaddr - PAGE_OFFSET + info->phys_base);
+ else
+ return (vaddr - kimage_voffset);
}
-int
-get_page_shift_arm64(void)
+static int
+get_pud_shift_arm64(void)
{
- return page_shift;
+ if (pgtable_level == 4)
+ return ((PAGESHIFT() - 3) * 3 + 3);
+ else
+ return PGDIR_SHIFT;
}
-pmd_t *
+static pmd_t *
pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
{
if (pgtable_level == 2) {
@@ -138,81 +123,50 @@ pmd_offset(pud_t *puda, pud_t *pudv, unsigned long vaddr)
}
}
-#define PAGE_OFFSET_39 (0xffffffffffffffffUL << 39)
-#define PAGE_OFFSET_42 (0xffffffffffffffffUL << 42)
+static pud_t *
+pud_offset(pgd_t *pgda, pgd_t *pgdv, unsigned long vaddr)
+{
+ if (pgtable_level == 4)
+ return ((pud_t *)pgd_page_paddr((*pgdv)) + pud_index(vaddr));
+ else
+ return (pud_t *)(pgda);
+}
+
static int calculate_plat_config(void)
{
- unsigned long long stext;
-
- /* Currently we assume that there are only two possible
- * configuration supported by kernel.
- * 1) Page Table Level:2, Page Size 64K and VA Bits 42
- * 1) Page Table Level:3, Page Size 4K and VA Bits 39
- * Ideally, we should have some mechanism to decide these values
- * from kernel symbols, but we have limited symbols in vmcore,
- * and we can not do much. So until some one comes with a better
- * way, we use following.
- */
- stext = SYMBOL(_stext);
-
- /* condition for minimum VA bits must be checked first and so on */
- if ((stext & PAGE_OFFSET_39) == PAGE_OFFSET_39) {
- pgtable_level = 3;
- va_bits = 39;
- page_shift = 12;
- } else if ((stext & PAGE_OFFSET_42) == PAGE_OFFSET_42) {
+ va_bits = NUMBER(VA_BITS);
+
+ /* derive pgtable_level as per arch/arm64/Kconfig */
+ if ((PAGESIZE() == SZ_16K && va_bits == 36) ||
+ (PAGESIZE() == SZ_64K && va_bits == 42)) {
pgtable_level = 2;
- va_bits = 42;
- page_shift = 16;
+ } else if ((PAGESIZE() == SZ_64K && va_bits == 48) ||
+ (PAGESIZE() == SZ_4K && va_bits == 39) ||
+ (PAGESIZE() == SZ_16K && va_bits == 47)) {
+ pgtable_level = 3;
+ } else if ((PAGESIZE() != SZ_64K && va_bits == 48)) {
+ pgtable_level = 4;
} else {
- ERRMSG("Kernel Configuration not supported\n");
+ ERRMSG("PAGE SIZE %#lx and VA Bits %d not supported\n",
+ PAGESIZE(), va_bits);
return FALSE;
}
return TRUE;
}
-static int
-is_vtop_from_page_table_arm64(unsigned long vaddr)
+unsigned long
+get_kvbase_arm64(void)
{
- /* If virtual address lies in vmalloc, vmemmap or module space
- * region then, get the physical address from page table.
- */
- return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END)
- || (vaddr >= VMEMMAP_START && vaddr <= VMEMMAP_END)
- || (vaddr >= MODULES_VADDR && vaddr <= MODULES_END));
+ return (0xffffffffffffffffUL << va_bits);
}
int
get_phys_base_arm64(void)
{
- unsigned long phys_base = ULONG_MAX;
- unsigned long long phys_start;
- int i;
-
- if (!calculate_plat_config()) {
- ERRMSG("Can't determine platform config values\n");
- return FALSE;
- }
-
- /*
- * We resolve phys_base from PT_LOAD segments. LMA contains physical
- * address of the segment, and we use the lowest start as
- * phys_base.
- */
- for (i = 0; get_pt_load(i, &phys_start, NULL, NULL, NULL); i++) {
- if (phys_start < phys_base)
- phys_base = phys_start;
- }
-
- if (phys_base == ULONG_MAX) {
- ERRMSG("Can't determine phys_base\n");
- return FALSE;
- }
+ info->phys_base = NUMBER(PHYS_OFFSET);
- info->phys_base = phys_base;
-
- DEBUG_MSG("phys_base : %lx\n", phys_base);
+ DEBUG_MSG("phys_base : %lx\n", info->phys_base);
return TRUE;
}
@@ -220,24 +174,20 @@ get_phys_base_arm64(void)
int
get_machdep_info_arm64(void)
{
+ if (!calculate_plat_config()) {
+ ERRMSG("Can't determine platform config values\n");
+ return FALSE;
+ }
+
+ kimage_voffset = NUMBER(kimage_voffset);
info->max_physmem_bits = PHYS_MASK_SHIFT;
info->section_size_bits = SECTIONS_SIZE_BITS;
- info->page_offset = SYMBOL(_stext)
- & (0xffffffffffffffffUL << (VA_BITS - 1));
- info->vmalloc_start = 0xffffffffffffffffUL << VA_BITS;
- info->vmalloc_end = PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - 0x10000;
- info->vmemmap_start = VMALLOC_END + 0x10000;
- info->vmemmap_end = VMEMMAP_START + VMEMMAP_SIZE;
+ info->page_offset = 0xffffffffffffffffUL << (va_bits - 1);
+ DEBUG_MSG("kimage_voffset : %lx\n", kimage_voffset);
DEBUG_MSG("max_physmem_bits : %lx\n", info->max_physmem_bits);
DEBUG_MSG("section_size_bits: %lx\n", info->section_size_bits);
DEBUG_MSG("page_offset : %lx\n", info->page_offset);
- DEBUG_MSG("vmalloc_start : %lx\n", info->vmalloc_start);
- DEBUG_MSG("vmalloc_end : %lx\n", info->vmalloc_end);
- DEBUG_MSG("vmemmap_start : %lx\n", info->vmemmap_start);
- DEBUG_MSG("vmemmap_end : %lx\n", info->vmemmap_end);
- DEBUG_MSG("modules_start : %lx\n", MODULES_VADDR);
- DEBUG_MSG("modules_end : %lx\n", MODULES_END);
return TRUE;
}
@@ -267,17 +217,18 @@ get_versiondep_info_arm64(void)
}
/*
- * vtop_arm64() - translate arbitrary virtual address to physical
+ * vaddr_to_paddr_arm64() - translate arbitrary virtual address to physical
* @vaddr: virtual address to translate
*
* Function translates @vaddr into physical address using page tables. This
* address can be any virtual address. Returns physical address of the
* corresponding virtual address or %NOT_PADDR when there is no translation.
*/
-static unsigned long long
-vtop_arm64(unsigned long vaddr)
+unsigned long long
+vaddr_to_paddr_arm64(unsigned long vaddr)
{
unsigned long long paddr = NOT_PADDR;
+ unsigned long long swapper_phys;
pgd_t *pgda, pgdv;
pud_t *puda, pudv;
pmd_t *pmda, pmdv;
@@ -288,17 +239,22 @@ vtop_arm64(unsigned long vaddr)
return NOT_PADDR;
}
- pgda = pgd_offset(SYMBOL(swapper_pg_dir), vaddr);
- if (!readmem(VADDR, (unsigned long long)pgda, &pgdv, sizeof(pgdv))) {
+ swapper_phys = __pa(SYMBOL(swapper_pg_dir));
+
+ pgda = pgd_offset(swapper_phys, vaddr);
+ if (!readmem(PADDR, (unsigned long long)pgda, &pgdv, sizeof(pgdv))) {
ERRMSG("Can't read pgd\n");
return NOT_PADDR;
}
- pudv.pgd = pgdv;
- puda = (pud_t *)pgda;
+ puda = pud_offset(pgda, &pgdv, vaddr);
+ if (!readmem(PADDR, (unsigned long long)puda, &pudv, sizeof(pudv))) {
+ ERRMSG("Can't read pud\n");
+ return NOT_PADDR;
+ }
pmda = pmd_offset(puda, &pudv, vaddr);
- if (!readmem(VADDR, (unsigned long long)pmda, &pmdv, sizeof(pmdv))) {
+ if (!readmem(PADDR, (unsigned long long)pmda, &pmdv, sizeof(pmdv))) {
ERRMSG("Can't read pmd\n");
return NOT_PADDR;
}
@@ -307,7 +263,7 @@ vtop_arm64(unsigned long vaddr)
case PMD_TYPE_TABLE:
ptea = pte_offset(&pmdv, vaddr);
/* 64k page */
- if (!readmem(VADDR, (unsigned long long)ptea, &ptev, sizeof(ptev))) {
+ if (!readmem(PADDR, (unsigned long long)ptea, &ptev, sizeof(ptev))) {
ERRMSG("Can't read pte\n");
return NOT_PADDR;
}
@@ -331,19 +287,4 @@ vtop_arm64(unsigned long vaddr)
return paddr;
}
-unsigned long long
-vaddr_to_paddr_arm64(unsigned long vaddr)
-{
- /*
- * use translation tables when a) user has explicitly requested us to
- * perform translation for a given address. b) virtual address lies in
- * vmalloc, vmemmap or modules memory region. Otherwise we assume that
- * the translation is done within the kernel direct mapped region.
- */
- if ((info->vaddr_for_vtop == vaddr) ||
- is_vtop_from_page_table_arm64(vaddr))
- return vtop_arm64(vaddr);
-
- return __pa(vaddr);
-}
#endif /* __aarch64__ */
diff --git a/arch/ppc64.c b/arch/ppc64.c
index 89a7f05..6aeab7e 100644
--- a/arch/ppc64.c
+++ b/arch/ppc64.c
@@ -23,6 +23,87 @@
#include "../print_info.h"
#include "../elf_info.h"
#include "../makedumpfile.h"
+#include <endian.h>
+
+/*
+ * Swaps a 8 byte value
+ */
+static ulong swap64(ulong val, uint swap)
+{
+ if (swap)
+ return (((val & 0x00000000000000ffULL) << 56) |
+ ((val & 0x000000000000ff00ULL) << 40) |
+ ((val & 0x0000000000ff0000ULL) << 24) |
+ ((val & 0x00000000ff000000ULL) << 8) |
+ ((val & 0x000000ff00000000ULL) >> 8) |
+ ((val & 0x0000ff0000000000ULL) >> 24) |
+ ((val & 0x00ff000000000000ULL) >> 40) |
+ ((val & 0xff00000000000000ULL) >> 56));
+ else
+ return val;
+}
+
+/*
+ * Convert physical address to kernel virtual address
+ */
+static inline ulong paddr_to_vaddr_ppc64(ulong paddr)
+{
+ return (paddr + info->kernel_start);
+}
+
+/*
+ * Convert the raw pgd entry to next pgtable adress
+ */
+static inline ulong pgd_page_vaddr_l4(ulong pgd)
+{
+ ulong pgd_val;
+
+ pgd_val = (pgd & ~info->pgd_masked_bits);
+ if (info->kernel_version >= KERNEL_VERSION(4, 6, 0)) {
+ /*
+ * physical address is stored starting from kernel v4.6
+ */
+ pgd_val = paddr_to_vaddr_ppc64(pgd_val);
+ }
+
+ return pgd_val;
+}
+
+/*
+ * Convert the raw pud entry to next pgtable adress
+ */
+static inline ulong pud_page_vaddr_l4(ulong pud)
+{
+ ulong pud_val;
+
+ pud_val = (pud & ~info->pud_masked_bits);
+ if (info->kernel_version >= KERNEL_VERSION(4, 6, 0)) {
+ /*
+ * physical address is stored starting from kernel v4.6
+ */
+ pud_val = paddr_to_vaddr_ppc64(pud_val);
+ }
+
+ return pud_val;
+}
+
+/*
+ * Convert the raw pmd entry to next pgtable adress
+ */
+static inline ulong pmd_page_vaddr_l4(ulong pmd)
+{
+ ulong pmd_val;
+
+ pmd_val = (pmd & ~info->pmd_masked_bits);
+ if (info->kernel_version >= KERNEL_VERSION(4, 6, 0)) {
+ /*
+ * physical address is stored starting from kernel v4.6
+ */
+ pmd_val = paddr_to_vaddr_ppc64(pmd_val);
+ }
+
+ return pmd_val;
+}
/*
* This function traverses vmemmap list to get the count of vmemmap regions
@@ -156,29 +237,79 @@ ppc64_vmalloc_init(void)
/*
* 64K pagesize
*/
- if (info->kernel_version >= KERNEL_VERSION(3, 10, 0)) {
+ if (info->cur_mmu_type & RADIX_MMU) {
+ info->l1_index_size = PTE_INDEX_SIZE_RADIX_64K;
+ info->l2_index_size = PMD_INDEX_SIZE_RADIX_64K;
+ info->l3_index_size = PUD_INDEX_SIZE_RADIX_64K;
+ info->l4_index_size = PGD_INDEX_SIZE_RADIX_64K;
+
+ } else if (info->kernel_version >= KERNEL_VERSION(4, 6, 0)) {
+ info->l1_index_size = PTE_INDEX_SIZE_L4_64K_3_10;
+ info->l2_index_size = PMD_INDEX_SIZE_L4_64K_4_6;
+ info->l3_index_size = PUD_INDEX_SIZE_L4_64K_4_6;
+ info->l4_index_size = PGD_INDEX_SIZE_L4_64K_3_10;
+
+ } else if (info->kernel_version >= KERNEL_VERSION(3, 10, 0)) {
info->l1_index_size = PTE_INDEX_SIZE_L4_64K_3_10;
info->l2_index_size = PMD_INDEX_SIZE_L4_64K_3_10;
info->l3_index_size = PUD_INDEX_SIZE_L4_64K;
+ info->l4_index_size = PGD_INDEX_SIZE_L4_64K_3_10;
} else {
info->l1_index_size = PTE_INDEX_SIZE_L4_64K;
info->l2_index_size = PMD_INDEX_SIZE_L4_64K;
info->l3_index_size = PUD_INDEX_SIZE_L4_64K;
+ info->l4_index_size = PGD_INDEX_SIZE_L4_64K;
}
- info->pte_shift = SYMBOL(demote_segment_4k) ?
- PTE_SHIFT_L4_64K_V2 : PTE_SHIFT_L4_64K_V1;
- info->l2_masked_bits = PMD_MASKED_BITS_64K;
+ info->pte_rpn_shift = (SYMBOL(demote_segment_4k) ?
+ PTE_RPN_SHIFT_L4_64K_V2 : PTE_RPN_SHIFT_L4_64K_V1);
+
+ if (info->kernel_version >= KERNEL_VERSION(4, 6, 0)) {
+ info->pgd_masked_bits = PGD_MASKED_BITS_64K_4_6;
+ info->pud_masked_bits = PUD_MASKED_BITS_64K_4_6;
+ info->pmd_masked_bits = PMD_MASKED_BITS_64K_4_6;
+ } else {
+ info->pgd_masked_bits = PGD_MASKED_BITS_64K;
+ info->pud_masked_bits = PUD_MASKED_BITS_64K;
+ info->pmd_masked_bits = (info->kernel_version >= KERNEL_VERSION(3, 11, 0) ?
+ PMD_MASKED_BITS_64K_3_11 : PMD_MASKED_BITS_64K);
+ }
} else {
/*
* 4K pagesize
*/
- info->l1_index_size = PTE_INDEX_SIZE_L4_4K;
- info->l2_index_size = PMD_INDEX_SIZE_L4_4K;
- info->l3_index_size = PUD_INDEX_SIZE_L4_4K;
+ if (info->cur_mmu_type & RADIX_MMU) {
+ info->l1_index_size = PTE_INDEX_SIZE_RADIX_4K;
+ info->l2_index_size = PMD_INDEX_SIZE_RADIX_4K;
+ info->l3_index_size = PUD_INDEX_SIZE_RADIX_4K;
+ info->l4_index_size = PGD_INDEX_SIZE_RADIX_4K;
+
+ } else {
+ info->l1_index_size = PTE_INDEX_SIZE_L4_4K;
+ info->l2_index_size = PMD_INDEX_SIZE_L4_4K;
+ info->l3_index_size = (info->kernel_version >= KERNEL_VERSION(3, 7, 0) ?
+ PUD_INDEX_SIZE_L4_4K_3_7 : PUD_INDEX_SIZE_L4_4K);
+ info->l4_index_size = PGD_INDEX_SIZE_L4_4K;
+ }
- info->pte_shift = PTE_SHIFT_L4_4K;
- info->l2_masked_bits = PMD_MASKED_BITS_4K;
+ info->pte_rpn_shift = (info->kernel_version >= KERNEL_VERSION(4, 5, 0) ?
+ PTE_RPN_SHIFT_L4_4K_4_5 : PTE_RPN_SHIFT_L4_4K);
+
+ info->pgd_masked_bits = PGD_MASKED_BITS_4K;
+ info->pud_masked_bits = PUD_MASKED_BITS_4K;
+ info->pmd_masked_bits = PMD_MASKED_BITS_4K;
+ }
+
+ if (info->kernel_version >= KERNEL_VERSION(4, 7, 0)) {
+ info->pgd_masked_bits = PGD_MASKED_BITS_4_7;
+ info->pud_masked_bits = PUD_MASKED_BITS_4_7;
+ info->pmd_masked_bits = PMD_MASKED_BITS_4_7;
+ }
+
+ info->pte_rpn_mask = PTE_RPN_MASK_DEFAULT;
+ if (info->kernel_version >= KERNEL_VERSION(4, 6, 0)) {
+ info->pte_rpn_mask = PTE_RPN_MASK_L4_4_6;
+ info->pte_rpn_shift = PTE_RPN_SHIFT_L4_4_6;
}
/*
@@ -188,8 +319,8 @@ ppc64_vmalloc_init(void)
info->ptrs_per_l1 = (1 << info->l1_index_size);
info->ptrs_per_l2 = (1 << info->l2_index_size);
info->ptrs_per_l3 = (1 << info->l3_index_size);
-
- info->ptrs_per_pgd = info->ptrs_per_l3;
+ info->ptrs_per_l4 = (1 << info->l4_index_size);
+ info->ptrs_per_pgd = info->ptrs_per_l4;
/*
* Compute shifts
@@ -227,12 +358,13 @@ ppc64_vmemmap_to_phys(unsigned long vaddr)
static unsigned long long
ppc64_vtop_level4(unsigned long vaddr)
{
- ulong *level4, *level4_dir;
- ulong *page_dir, *page_middle;
- ulong *page_table;
- unsigned long long level4_pte, pgd_pte;
+ ulong *level4;
+ ulong *pgdir, *page_upper;
+ ulong *page_middle, *page_table;
+ unsigned long long pgd_pte, pud_pte;
unsigned long long pmd_pte, pte;
unsigned long long paddr = NOT_PADDR;
+ uint swap = 0;
if (info->page_buf == NULL) {
/*
@@ -246,48 +378,61 @@ ppc64_vtop_level4(unsigned long vaddr)
}
}
+ if (info->kernel_version >= KERNEL_VERSION(4, 7, 0)) {
+ /*
+ * Starting with kernel v4.7, page table entries are always
+ * big endian on server processors. Set this flag if
+ * kernel is not big endian.
+ */
+ if (__BYTE_ORDER == __LITTLE_ENDIAN)
+ swap = 1;
+ }
+
level4 = (ulong *)info->kernel_pgd;
- level4_dir = (ulong *)((ulong *)level4 + L4_OFFSET(vaddr));
+ pgdir = (ulong *)((ulong *)level4 + PGD_OFFSET_L4(vaddr));
if (!readmem(VADDR, PAGEBASE(level4), info->page_buf, PAGESIZE())) {
- ERRMSG("Can't read level4 page: 0x%llx\n", PAGEBASE(level4));
+ ERRMSG("Can't read PGD page: 0x%llx\n", PAGEBASE(level4));
return NOT_PADDR;
}
- level4_pte = ULONG((info->page_buf + PAGEOFFSET(level4_dir)));
- if (!level4_pte)
+ pgd_pte = swap64(ULONG((info->page_buf + PAGEOFFSET(pgdir))), swap);
+ if (!pgd_pte)
return NOT_PADDR;
/*
* Sometimes we don't have level3 pagetable entries
*/
if (info->l3_index_size != 0) {
- page_dir = (ulong *)((ulong *)level4_pte + PGD_OFFSET_L4(vaddr));
- if (!readmem(VADDR, PAGEBASE(level4_pte), info->page_buf, PAGESIZE())) {
- ERRMSG("Can't read PGD page: 0x%llx\n", PAGEBASE(level4_pte));
+ pgd_pte = pgd_page_vaddr_l4(pgd_pte);
+ page_upper = (ulong *)((ulong *)pgd_pte + PUD_OFFSET_L4(vaddr));
+ if (!readmem(VADDR, PAGEBASE(pgd_pte), info->page_buf, PAGESIZE())) {
+ ERRMSG("Can't read PUD page: 0x%llx\n", PAGEBASE(pgd_pte));
return NOT_PADDR;
}
- pgd_pte = ULONG((info->page_buf + PAGEOFFSET(page_dir)));
- if (!pgd_pte)
+ pud_pte = swap64(ULONG((info->page_buf + PAGEOFFSET(page_upper))), swap);
+ if (!pud_pte)
return NOT_PADDR;
} else {
- pgd_pte = level4_pte;
+ pud_pte = pgd_pte;
}
- page_middle = (ulong *)((ulong *)pgd_pte + PMD_OFFSET_L4(vaddr));
- if (!readmem(VADDR, PAGEBASE(pgd_pte), info->page_buf, PAGESIZE())) {
- ERRMSG("Can't read PMD page: 0x%llx\n", PAGEBASE(pgd_pte));
+ pud_pte = pud_page_vaddr_l4(pud_pte);
+ page_middle = (ulong *)((ulong *)pud_pte + PMD_OFFSET_L4(vaddr));
+ if (!readmem(VADDR, PAGEBASE(pud_pte), info->page_buf, PAGESIZE())) {
+ ERRMSG("Can't read PMD page: 0x%llx\n", PAGEBASE(pud_pte));
return NOT_PADDR;
}
- pmd_pte = ULONG((info->page_buf + PAGEOFFSET(page_middle)));
+ pmd_pte = swap64(ULONG((info->page_buf + PAGEOFFSET(page_middle))), swap);
if (!(pmd_pte))
return NOT_PADDR;
- page_table = (ulong *)(pmd_pte & ~(info->l2_masked_bits))
+ pmd_pte = pmd_page_vaddr_l4(pmd_pte);
+ page_table = (ulong *)(pmd_pte)
+ (BTOP(vaddr) & (info->ptrs_per_l1 - 1));
if (!readmem(VADDR, PAGEBASE(pmd_pte), info->page_buf, PAGESIZE())) {
ERRMSG("Can't read page table: 0x%llx\n", PAGEBASE(pmd_pte));
return NOT_PADDR;
}
- pte = ULONG((info->page_buf + PAGEOFFSET(page_table)));
+ pte = swap64(ULONG((info->page_buf + PAGEOFFSET(page_table))), swap);
if (!(pte & _PAGE_PRESENT)) {
ERRMSG("Page not present!\n");
return NOT_PADDR;
@@ -296,7 +441,8 @@ ppc64_vtop_level4(unsigned long vaddr)
if (!pte)
return NOT_PADDR;
- paddr = PAGEBASE(PTOB(pte >> info->pte_shift)) + PAGEOFFSET(vaddr);
+ paddr = PAGEBASE(PTOB((pte & info->pte_rpn_mask) >> info->pte_rpn_shift))
+ + PAGEOFFSET(vaddr);
return paddr;
}
@@ -405,6 +551,27 @@ get_machdep_info_ppc64(void)
int
get_versiondep_info_ppc64()
{
+ unsigned long cur_cpu_spec;
+ uint mmu_features;
+
+ /*
+ * On PowerISA 3.0 based server processors, a kernel can run with
+ * radix MMU or standard MMU. Get the current MMU type.
+ */
+ info->cur_mmu_type = STD_MMU;
+ if ((SYMBOL(cur_cpu_spec) != NOT_FOUND_SYMBOL)
+ && (OFFSET(cpu_spec.mmu_features) != NOT_FOUND_STRUCTURE)) {
+ if (readmem(VADDR, SYMBOL(cur_cpu_spec), &cur_cpu_spec,
+ sizeof(cur_cpu_spec))) {
+ if (readmem(VADDR, cur_cpu_spec + OFFSET(cpu_spec.mmu_features),
+ &mmu_features, sizeof(mmu_features)))
+ info->cur_mmu_type = mmu_features & RADIX_MMU;
+ }
+ }
+
+ /*
+ * Initialize Linux page table info
+ */
if (ppc64_vmalloc_init() == FALSE) {
ERRMSG("Can't initialize for vmalloc translation\n");
return FALSE;
diff --git a/arch/x86_64.c b/arch/x86_64.c
index ddf7be6..893cd51 100644
--- a/arch/x86_64.c
+++ b/arch/x86_64.c
@@ -21,17 +21,6 @@
extern struct vmap_pfns *gvmem_pfns;
extern int nr_gvmem_pfns;
-int
-is_vmalloc_addr_x86_64(ulong vaddr)
-{
- /*
- * vmalloc, virtual memmap, and module space as VMALLOC space.
- */
- return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END)
- || (vaddr >= VMEMMAP_START && vaddr <= VMEMMAP_END)
- || (vaddr >= MODULES_VADDR && vaddr <= MODULES_END));
-}
-
static unsigned long
get_xen_p2m_mfn(void)
{
@@ -44,6 +33,24 @@ get_xen_p2m_mfn(void)
return NOT_FOUND_LONG_VALUE;
}
+static int
+get_page_offset_x86_64(void)
+{
+ int i;
+ unsigned long long phys_start;
+ unsigned long long virt_start;
+
+ for (i = 0; get_pt_load(i, &phys_start, NULL, &virt_start, NULL); i++) {
+ if (virt_start < __START_KERNEL_map) {
+ info->page_offset = virt_start - phys_start;
+ return TRUE;
+ }
+ }
+
+ ERRMSG("Can't get any pt_load to calculate page offset.\n");
+ return FALSE;
+}
+
int
get_phys_base_x86_64(void)
{
@@ -55,10 +62,21 @@ get_phys_base_x86_64(void)
* Get the relocatable offset
*/
info->phys_base = 0; /* default/traditional */
+ if (NUMBER(phys_base) != NOT_FOUND_NUMBER) {
+ info->phys_base = NUMBER(phys_base);
+ return TRUE;
+ }
+
+ /* linux-2.6.21 or older don't have phys_base, should be set to 0. */
+ if (!has_vmcoreinfo()) {
+ SYMBOL_INIT(phys_base, "phys_base");
+ if (SYMBOL(phys_base) == NOT_FOUND_SYMBOL) {
+ return TRUE;
+ }
+ }
for (i = 0; get_pt_load(i, &phys_start, NULL, &virt_start, NULL); i++) {
- if ((virt_start >= __START_KERNEL_map) &&
- !(is_vmalloc_addr_x86_64(virt_start))) {
+ if (virt_start >= __START_KERNEL_map) {
info->phys_base = phys_start -
(virt_start & ~(__START_KERNEL_map));
@@ -159,19 +177,13 @@ get_versiondep_info_x86_64(void)
else
info->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_31;
- if (info->kernel_version < KERNEL_VERSION(2, 6, 27))
- info->page_offset = __PAGE_OFFSET_ORIG;
- else
- info->page_offset = __PAGE_OFFSET_2_6_27;
+ if (!get_page_offset_x86_64())
+ return FALSE;
if (info->kernel_version < KERNEL_VERSION(2, 6, 31)) {
- info->vmalloc_start = VMALLOC_START_ORIG;
- info->vmalloc_end = VMALLOC_END_ORIG;
info->vmemmap_start = VMEMMAP_START_ORIG;
info->vmemmap_end = VMEMMAP_END_ORIG;
} else {
- info->vmalloc_start = VMALLOC_START_2_6_31;
- info->vmalloc_end = VMALLOC_END_2_6_31;
info->vmemmap_start = VMEMMAP_START_2_6_31;
info->vmemmap_end = VMEMMAP_END_2_6_31;
}
@@ -196,9 +208,9 @@ vtop4_x86_64(unsigned long vaddr)
/*
* Get PGD.
*/
- page_dir = SYMBOL(init_level4_pgt);
+ page_dir = SYMBOL(init_level4_pgt) - __START_KERNEL_map + info->phys_base;
page_dir += pml4_index(vaddr) * sizeof(unsigned long);
- if (!readmem(VADDR, page_dir, &pml4, sizeof pml4)) {
+ if (!readmem(PADDR, page_dir, &pml4, sizeof pml4)) {
ERRMSG("Can't get pml4 (page_dir:%lx).\n", page_dir);
return NOT_PADDR;
}
@@ -269,38 +281,6 @@ vtop4_x86_64(unsigned long vaddr)
return (pte & ENTRY_MASK) + PAGEOFFSET(vaddr);
}
-unsigned long long
-vaddr_to_paddr_x86_64(unsigned long vaddr)
-{
- unsigned long phys_base;
- unsigned long long paddr;
-
- /*
- * Check the relocatable kernel.
- */
- if (SYMBOL(phys_base) != NOT_FOUND_SYMBOL)
- phys_base = info->phys_base;
- else
- phys_base = 0;
-
- if (is_vmalloc_addr_x86_64(vaddr)) {
- if ((paddr = vtop4_x86_64(vaddr)) == NOT_PADDR) {
- ERRMSG("Can't convert a virtual address(%lx) to " \
- "physical address.\n", vaddr);
- return NOT_PADDR;
- }
- } else if (vaddr >= __START_KERNEL_map) {
- paddr = vaddr - __START_KERNEL_map + phys_base;
-
- } else {
- if (is_xen_memory())
- paddr = vaddr - PAGE_OFFSET_XEN_DOM0;
- else
- paddr = vaddr - PAGE_OFFSET;
- }
- return paddr;
-}
-
/*
* for Xen extraction
*/
diff --git a/dwarf_info.c b/dwarf_info.c
index 8c491d3..4f9ad12 100644
--- a/dwarf_info.c
+++ b/dwarf_info.c
@@ -53,7 +53,9 @@ struct dwarf_info {
char src_name[LEN_SRCFILE]; /* OUT */
Dwarf_Off die_offset; /* OUT */
};
-static struct dwarf_info dwarf_info;
+static struct dwarf_info dwarf_info = {
+ .fd_debuginfo = -1,
+};
/*
@@ -1595,7 +1597,7 @@ set_dwarf_debuginfo(char *mod_name, char *os_release,
if (dwarf_info.module_name
&& strcmp(dwarf_info.module_name, "vmlinux")
&& strcmp(dwarf_info.module_name, "xen-syms")) {
- if (dwarf_info.fd_debuginfo > 0)
+ if (dwarf_info.fd_debuginfo >= 0)
close(dwarf_info.fd_debuginfo);
if (dwarf_info.name_debuginfo)
free(dwarf_info.name_debuginfo);
diff --git a/makedumpfile.8 b/makedumpfile.8
index 3b6f7e2..9069fb1 100644
--- a/makedumpfile.8
+++ b/makedumpfile.8
@@ -1,4 +1,4 @@
-.TH MAKEDUMPFILE 8 "9 Jun 2016" "makedumpfile v1.6.0" "Linux System Administrator's Manual"
+.TH MAKEDUMPFILE 8 "27 Dec 2016" "makedumpfile v1.6.1" "Linux System Administrator's Manual"
.SH NAME
makedumpfile \- make a small dumpfile of kdump
.SH SYNOPSIS
@@ -376,6 +376,8 @@ the kdump\-compressed format.
\fB\-\-num\-threads\fR \fITHREADNUM\fR
Using multiple threads to read and compress data of each page in parallel.
And it will reduces time for saving \fIDUMPFILE\fR.
+Note that if the usable cpu number is less than the thread number, it may
+lead to great performance degradation.
This feature only supports creating \fIDUMPFILE\fR in kdump\-comressed
format from \fIVMCORE\fR in kdump\-compressed format or elf format.
.br
diff --git a/makedumpfile.c b/makedumpfile.c
index 853b999..e69b6df 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -1365,7 +1365,7 @@ open_dump_bitmap(void)
/* Unnecessary to open */
if (!info->working_dir && !info->flag_reassemble && !info->flag_refiltering
- && !info->flag_sadump && !info->flag_mem_usage)
+ && !info->flag_sadump && !info->flag_mem_usage && info->flag_cyclic)
return TRUE;
tmpname = getenv("TMPDIR");
@@ -1480,9 +1480,6 @@ open_files_for_creating_dumpfile(void)
if (!open_dump_memory())
return FALSE;
- if (!open_dump_bitmap())
- return FALSE;
-
return TRUE;
}
@@ -1510,7 +1507,6 @@ get_symbol_info(void)
SYMBOL_INIT(init_level4_pgt, "init_level4_pgt");
SYMBOL_INIT(vmlist, "vmlist");
SYMBOL_INIT(vmap_area_list, "vmap_area_list");
- SYMBOL_INIT(phys_base, "phys_base");
SYMBOL_INIT(node_online_map, "node_online_map");
SYMBOL_INIT(node_states, "node_states");
SYMBOL_INIT(node_memblk, "node_memblk");
@@ -1567,6 +1563,7 @@ get_symbol_info(void)
SYMBOL_INIT(cpu_pgd, "cpu_pgd");
SYMBOL_INIT(demote_segment_4k, "demote_segment_4k");
+ SYMBOL_INIT(cur_cpu_spec, "cur_cpu_spec");
return TRUE;
}
@@ -1579,7 +1576,14 @@ get_structure_info(void)
*/
SIZE_INIT(page, "page");
OFFSET_INIT(page.flags, "page", "flags");
- OFFSET_INIT(page._count, "page", "_count");
+ OFFSET_INIT(page._refcount, "page", "_refcount");
+ if (OFFSET(page._refcount) == NOT_FOUND_STRUCTURE) {
+ info->flag_use_count = TRUE;
+ OFFSET_INIT(page._refcount, "page", "_count");
+ } else {
+ info->flag_use_count = FALSE;
+ }
+
OFFSET_INIT(page.mapping, "page", "mapping");
OFFSET_INIT(page._mapcount, "page", "_mapcount");
OFFSET_INIT(page.private, "page", "private");
@@ -1689,7 +1693,25 @@ get_structure_info(void)
OFFSET(module.core_size) += core_layout;
}
OFFSET_INIT(module.module_init, "module", "module_init");
+ if (OFFSET(module.module_init) == NOT_FOUND_STRUCTURE) {
+ /* for kernel version 4.5 and above */
+ long init_layout;
+
+ OFFSET_INIT(module.module_init, "module", "init_layout");
+ init_layout = OFFSET(module.module_init);
+ OFFSET_INIT(module.module_init, "module_layout", "base");
+ OFFSET(module.module_init) += init_layout;
+ }
OFFSET_INIT(module.init_size, "module", "init_size");
+ if (OFFSET(module.init_size) == NOT_FOUND_STRUCTURE) {
+ /* for kernel version 4.5 and above */
+ long init_layout;
+
+ OFFSET_INIT(module.init_size, "module", "init_layout");
+ init_layout = OFFSET(module.init_size);
+ OFFSET_INIT(module.init_size, "module_layout", "size");
+ OFFSET(module.init_size) += init_layout;
+ }
ENUM_NUMBER_INIT(NR_FREE_PAGES, "NR_FREE_PAGES");
ENUM_NUMBER_INIT(N_ONLINE, "N_ONLINE");
@@ -1924,6 +1946,12 @@ get_structure_info(void)
SIZE_INIT(mmu_psize_def, "mmu_psize_def");
OFFSET_INIT(mmu_psize_def.shift, "mmu_psize_def", "shift");
+ /*
+ * Get offsets of the cpu_spec's members.
+ */
+ SIZE_INIT(cpu_spec, "cpu_spec");
+ OFFSET_INIT(cpu_spec.mmu_features, "cpu_spec", "mmu_features");
+
return TRUE;
}
@@ -1966,14 +1994,6 @@ get_value_for_old_linux(void)
NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE) =
PAGE_BUDDY_MAPCOUNT_VALUE_v2_6_39_to_latest_version;
}
-#ifdef __x86_64__
- if (NUMBER(KERNEL_IMAGE_SIZE) == NOT_FOUND_NUMBER) {
- if (info->kernel_version < KERNEL_VERSION(2, 6, 26))
- NUMBER(KERNEL_IMAGE_SIZE) = KERNEL_IMAGE_SIZE_ORIG;
- else
- NUMBER(KERNEL_IMAGE_SIZE) = KERNEL_IMAGE_SIZE_2_6_26;
- }
-#endif
if (SIZE(pageflags) == NOT_FOUND_STRUCTURE) {
if (info->kernel_version >= KERNEL_VERSION(2, 6, 27))
SIZE(pageflags) =
@@ -2044,7 +2064,7 @@ get_mem_type(void)
if ((SIZE(page) == NOT_FOUND_STRUCTURE)
|| (OFFSET(page.flags) == NOT_FOUND_STRUCTURE)
- || (OFFSET(page._count) == NOT_FOUND_STRUCTURE)
+ || (OFFSET(page._refcount) == NOT_FOUND_STRUCTURE)
|| (OFFSET(page.mapping) == NOT_FOUND_STRUCTURE)) {
ret = NOT_FOUND_MEMTYPE;
} else if ((((SYMBOL(node_data) != NOT_FOUND_SYMBOL)
@@ -2105,7 +2125,6 @@ write_vmcoreinfo_data(void)
WRITE_SYMBOL("init_level4_pgt", init_level4_pgt);
WRITE_SYMBOL("vmlist", vmlist);
WRITE_SYMBOL("vmap_area_list", vmap_area_list);
- WRITE_SYMBOL("phys_base", phys_base);
WRITE_SYMBOL("node_online_map", node_online_map);
WRITE_SYMBOL("node_states", node_states);
WRITE_SYMBOL("node_data", node_data);
@@ -2126,6 +2145,7 @@ write_vmcoreinfo_data(void)
WRITE_SYMBOL("mmu_vmemmap_psize", mmu_vmemmap_psize);
WRITE_SYMBOL("cpu_pgd", cpu_pgd);
WRITE_SYMBOL("demote_segment_4k", demote_segment_4k);
+ WRITE_SYMBOL("cur_cpu_spec", cur_cpu_spec);
WRITE_SYMBOL("free_huge_page", free_huge_page);
/*
@@ -2151,7 +2171,10 @@ write_vmcoreinfo_data(void)
* write the member offset of 1st kernel
*/
WRITE_MEMBER_OFFSET("page.flags", page.flags);
- WRITE_MEMBER_OFFSET("page._count", page._count);
+ if (info->flag_use_count)
+ WRITE_MEMBER_OFFSET("page._count", page._refcount);
+ else
+ WRITE_MEMBER_OFFSET("page._refcount", page._refcount);
WRITE_MEMBER_OFFSET("page.mapping", page.mapping);
WRITE_MEMBER_OFFSET("page.lru", page.lru);
WRITE_MEMBER_OFFSET("page._mapcount", page._mapcount);
@@ -2198,6 +2221,7 @@ write_vmcoreinfo_data(void)
vmemmap_backing.virt_addr);
WRITE_MEMBER_OFFSET("vmemmap_backing.list", vmemmap_backing.list);
WRITE_MEMBER_OFFSET("mmu_psize_def.shift", mmu_psize_def.shift);
+ WRITE_MEMBER_OFFSET("cpu_spec.mmu_features", cpu_spec.mmu_features);
if (SYMBOL(node_data) != NOT_FOUND_SYMBOL)
WRITE_ARRAY_LENGTH("node_data", node_data);
@@ -2226,9 +2250,14 @@ write_vmcoreinfo_data(void)
WRITE_NUMBER("PG_hwpoison", PG_hwpoison);
WRITE_NUMBER("PAGE_BUDDY_MAPCOUNT_VALUE", PAGE_BUDDY_MAPCOUNT_VALUE);
- WRITE_NUMBER("KERNEL_IMAGE_SIZE", KERNEL_IMAGE_SIZE);
+ WRITE_NUMBER("phys_base", phys_base);
WRITE_NUMBER("HUGETLB_PAGE_DTOR", HUGETLB_PAGE_DTOR);
+#ifdef __aarch64__
+ WRITE_NUMBER("VA_BITS", VA_BITS);
+ WRITE_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
+ WRITE_NUMBER_UNSIGNED("kimage_voffset", kimage_voffset);
+#endif
/*
* write the source file of 1st kernel
@@ -2378,6 +2407,40 @@ read_vmcoreinfo_symbol(char *str_symbol)
return symbol;
}
+unsigned long
+read_vmcoreinfo_ulong(char *str_structure)
+{
+ long data = NOT_FOUND_LONG_VALUE;
+ char buf[BUFSIZE_FGETS], *endp;
+ unsigned int i;
+
+ if (fseek(info->file_vmcoreinfo, 0, SEEK_SET) < 0) {
+ ERRMSG("Can't seek the vmcoreinfo file(%s). %s\n",
+ info->name_vmcoreinfo, strerror(errno));
+ return INVALID_STRUCTURE_DATA;
+ }
+
+ while (fgets(buf, BUFSIZE_FGETS, info->file_vmcoreinfo)) {
+ i = strlen(buf);
+ if (!i)
+ break;
+ if (buf[i - 1] == '\n')
+ buf[i - 1] = '\0';
+ if (strncmp(buf, str_structure, strlen(str_structure)) == 0) {
+ data = strtoul(buf + strlen(str_structure), &endp, 10);
+ if (strlen(endp) != 0)
+ data = strtoul(buf + strlen(str_structure), &endp, 16);
+ if ((data == LONG_MAX) || strlen(endp) != 0) {
+ ERRMSG("Invalid data in %s: %s",
+ info->name_vmcoreinfo, buf);
+ return INVALID_STRUCTURE_DATA;
+ }
+ break;
+ }
+ }
+ return data;
+}
+
long
read_vmcoreinfo_long(char *str_structure)
{
@@ -2399,6 +2462,8 @@ read_vmcoreinfo_long(char *str_structure)
buf[i - 1] = '\0';
if (strncmp(buf, str_structure, strlen(str_structure)) == 0) {
data = strtol(buf + strlen(str_structure), &endp, 10);
+ if (strlen(endp) != 0)
+ data = strtol(buf + strlen(str_structure), &endp, 16);
if ((data == LONG_MAX) || strlen(endp) != 0) {
ERRMSG("Invalid data in %s: %s",
info->name_vmcoreinfo, buf);
@@ -2454,7 +2519,6 @@ read_vmcoreinfo(void)
READ_SYMBOL("init_level4_pgt", init_level4_pgt);
READ_SYMBOL("vmlist", vmlist);
READ_SYMBOL("vmap_area_list", vmap_area_list);
- READ_SYMBOL("phys_base", phys_base);
READ_SYMBOL("node_online_map", node_online_map);
READ_SYMBOL("node_states", node_states);
READ_SYMBOL("node_data", node_data);
@@ -2475,6 +2539,7 @@ read_vmcoreinfo(void)
READ_SYMBOL("mmu_vmemmap_psize", mmu_vmemmap_psize);
READ_SYMBOL("cpu_pgd", cpu_pgd);
READ_SYMBOL("demote_segment_4k", demote_segment_4k);
+ READ_SYMBOL("cur_cpu_spec", cur_cpu_spec);
READ_SYMBOL("free_huge_page", free_huge_page);
READ_STRUCTURE_SIZE("page", page);
@@ -2491,7 +2556,13 @@ read_vmcoreinfo(void)
READ_MEMBER_OFFSET("page.flags", page.flags);
- READ_MEMBER_OFFSET("page._count", page._count);
+ READ_MEMBER_OFFSET("page._refcount", page._refcount);
+ if (OFFSET(page._refcount) == NOT_FOUND_STRUCTURE) {
+ info->flag_use_count = TRUE;
+ READ_MEMBER_OFFSET("page._count", page._refcount);
+ } else {
+ info->flag_use_count = FALSE;
+ }
READ_MEMBER_OFFSET("page.mapping", page.mapping);
READ_MEMBER_OFFSET("page.lru", page.lru);
READ_MEMBER_OFFSET("page._mapcount", page._mapcount);
@@ -2527,6 +2598,7 @@ read_vmcoreinfo(void)
vmemmap_backing.virt_addr);
READ_MEMBER_OFFSET("vmemmap_backing.list", vmemmap_backing.list);
READ_MEMBER_OFFSET("mmu_psize_def.shift", mmu_psize_def.shift);
+ READ_MEMBER_OFFSET("cpu_spec.mmu_features", cpu_spec.mmu_features);
READ_STRUCTURE_SIZE("printk_log", printk_log);
if (SIZE(printk_log) != NOT_FOUND_STRUCTURE) {
@@ -2566,7 +2638,12 @@ read_vmcoreinfo(void)
READ_SRCFILE("pud_t", pud_t);
READ_NUMBER("PAGE_BUDDY_MAPCOUNT_VALUE", PAGE_BUDDY_MAPCOUNT_VALUE);
- READ_NUMBER("KERNEL_IMAGE_SIZE", KERNEL_IMAGE_SIZE);
+ READ_NUMBER("phys_base", phys_base);
+#ifdef __aarch64__
+ READ_NUMBER("VA_BITS", VA_BITS);
+ READ_NUMBER_UNSIGNED("PHYS_OFFSET", PHYS_OFFSET);
+ READ_NUMBER_UNSIGNED("kimage_voffset", kimage_voffset);
+#endif
READ_NUMBER("HUGETLB_PAGE_DTOR", HUGETLB_PAGE_DTOR);
@@ -3696,10 +3773,10 @@ free_for_parallel()
return;
for (i = 0; i < info->num_threads; i++) {
- if (FD_MEMORY_PARALLEL(i) > 0)
+ if (FD_MEMORY_PARALLEL(i) >= 0)
close(FD_MEMORY_PARALLEL(i));
- if (FD_BITMAP_MEMORY_PARALLEL(i) > 0)
+ if (FD_BITMAP_MEMORY_PARALLEL(i) >= 0)
close(FD_BITMAP_MEMORY_PARALLEL(i));
}
}
@@ -4004,13 +4081,13 @@ out:
void
initialize_bitmap(struct dump_bitmap *bitmap)
{
- if (info->fd_bitmap) {
+ if (info->fd_bitmap >= 0) {
bitmap->fd = info->fd_bitmap;
bitmap->file_name = info->name_bitmap;
bitmap->no_block = -1;
memset(bitmap->buf, 0, BUFSIZE_BITMAP);
} else {
- bitmap->fd = 0;
+ bitmap->fd = -1;
bitmap->file_name = NULL;
bitmap->no_block = -1;
memset(bitmap->buf, 0, info->bufsize_cyclic);
@@ -4120,7 +4197,7 @@ set_bitmap_buffer(struct dump_bitmap *bitmap, mdf_pfn_t pfn, int val, struct cyc
int
set_bitmap(struct dump_bitmap *bitmap, mdf_pfn_t pfn, int val, struct cycle *cycle)
{
- if (bitmap->fd) {
+ if (bitmap->fd >= 0) {
return set_bitmap_file(bitmap, pfn, val);
} else {
return set_bitmap_buffer(bitmap, pfn, val, cycle);
@@ -4136,7 +4213,7 @@ sync_bitmap(struct dump_bitmap *bitmap)
/*
* The bitmap doesn't have the fd, it's a on-memory bitmap.
*/
- if (bitmap->fd == 0)
+ if (bitmap->fd < 0)
return TRUE;
/*
* The bitmap buffer is not dirty, and it is not necessary
@@ -5369,7 +5446,7 @@ create_1st_bitmap_buffer(struct cycle *cycle)
int
create_1st_bitmap(struct cycle *cycle)
{
- if (info->bitmap1->fd) {
+ if (info->bitmap1->fd >= 0) {
return create_1st_bitmap_file();
} else {
return create_1st_bitmap_buffer(cycle);
@@ -5380,7 +5457,7 @@ static inline int
is_in_segs(unsigned long long paddr)
{
if (info->flag_refiltering || info->flag_sadump) {
- if (info->bitmap1->fd == 0) {
+ if (info->bitmap1->fd < 0) {
initialize_1st_bitmap(info->bitmap1);
create_1st_bitmap_file();
}
@@ -5615,7 +5692,7 @@ __exclude_unnecessary_pages(unsigned long mem_map,
pcache = page_cache + (index_pg * SIZE(page));
flags = ULONG(pcache + OFFSET(page.flags));
- _count = UINT(pcache + OFFSET(page._count));
+ _count = UINT(pcache + OFFSET(page._refcount));
mapping = ULONG(pcache + OFFSET(page.mapping));
if (OFFSET(page.compound_order) != NOT_FOUND_STRUCTURE) {
@@ -5838,7 +5915,7 @@ copy_bitmap_file(void)
int
copy_bitmap(void)
{
- if (info->fd_bitmap) {
+ if (info->fd_bitmap >= 0) {
return copy_bitmap_file();
} else {
return copy_bitmap_buffer();
@@ -6279,7 +6356,7 @@ prepare_bitmap1_buffer(void)
return FALSE;
}
- if (info->fd_bitmap) {
+ if (info->fd_bitmap >= 0) {
if ((info->bitmap1->buf = (char *)malloc(BUFSIZE_BITMAP)) == NULL) {
ERRMSG("Can't allocate memory for the 1st bitmaps's buffer. %s\n",
strerror(errno));
@@ -6318,7 +6395,7 @@ prepare_bitmap2_buffer(void)
strerror(errno));
return FALSE;
}
- if (info->fd_bitmap) {
+ if (info->fd_bitmap >= 0) {
if ((info->bitmap2->buf = (char *)malloc(BUFSIZE_BITMAP)) == NULL) {
ERRMSG("Can't allocate memory for the 2nd bitmaps's buffer. %s\n",
strerror(errno));
@@ -7548,7 +7625,7 @@ kdump_thread_function_cyclic(void *arg) {
fd_memory = FD_MEMORY_PARALLEL(kdump_thread_args->thread_num);
- if (info->fd_bitmap) {
+ if (info->fd_bitmap >= 0) {
bitmap_parallel.buf = malloc(BUFSIZE_BITMAP);
if (bitmap_parallel.buf == NULL){
ERRMSG("Can't allocate memory for bitmap_parallel.buf. %s\n",
@@ -7594,7 +7671,7 @@ kdump_thread_function_cyclic(void *arg) {
pthread_mutex_lock(&info->current_pfn_mutex);
for (pfn = info->current_pfn; pfn < cycle->end_pfn; pfn++) {
dumpable = is_dumpable(
- info->fd_bitmap ? &bitmap_parallel : info->bitmap2,
+ info->fd_bitmap >= 0 ? &bitmap_parallel : info->bitmap2,
pfn,
cycle);
if (dumpable)
@@ -7689,7 +7766,7 @@ next:
retval = NULL;
fail:
- if (bitmap_memory_parallel.fd > 0)
+ if (bitmap_memory_parallel.fd >= 0)
close(bitmap_memory_parallel.fd);
if (bitmap_parallel.buf != NULL)
free(bitmap_parallel.buf);
@@ -8427,7 +8504,7 @@ out:
int
write_kdump_bitmap1(struct cycle *cycle) {
- if (info->bitmap1->fd) {
+ if (info->bitmap1->fd >= 0) {
return write_kdump_bitmap1_file();
} else {
return write_kdump_bitmap1_buffer(cycle);
@@ -8436,7 +8513,7 @@ write_kdump_bitmap1(struct cycle *cycle) {
int
write_kdump_bitmap2(struct cycle *cycle) {
- if (info->bitmap2->fd) {
+ if (info->bitmap2->fd >= 0) {
return write_kdump_bitmap2_file();
} else {
return write_kdump_bitmap2_buffer(cycle);
@@ -8563,9 +8640,10 @@ close_vmcoreinfo(void)
void
close_dump_memory(void)
{
- if ((info->fd_memory = close(info->fd_memory)) < 0)
+ if (close(info->fd_memory) < 0)
ERRMSG("Can't close the dump memory(%s). %s\n",
info->name_memory, strerror(errno));
+ info->fd_memory = -1;
}
void
@@ -8574,21 +8652,22 @@ close_dump_file(void)
if (info->flag_flatten)
return;
- if ((info->fd_dumpfile = close(info->fd_dumpfile)) < 0)
+ if (close(info->fd_dumpfile) < 0)
ERRMSG("Can't close the dump file(%s). %s\n",
info->name_dumpfile, strerror(errno));
+ info->fd_dumpfile = -1;
}
void
close_dump_bitmap(void)
{
- if (!info->working_dir && !info->flag_reassemble && !info->flag_refiltering
- && !info->flag_sadump && !info->flag_mem_usage)
+ if (info->fd_bitmap < 0)
return;
- if ((info->fd_bitmap = close(info->fd_bitmap)) < 0)
+ if (close(info->fd_bitmap) < 0)
ERRMSG("Can't close the bitmap file(%s). %s\n",
info->name_bitmap, strerror(errno));
+ info->fd_bitmap = -1;
free(info->name_bitmap);
info->name_bitmap = NULL;
}
@@ -8597,16 +8676,18 @@ void
close_kernel_file(void)
{
if (info->name_vmlinux) {
- if ((info->fd_vmlinux = close(info->fd_vmlinux)) < 0) {
+ if (close(info->fd_vmlinux) < 0) {
ERRMSG("Can't close the kernel file(%s). %s\n",
info->name_vmlinux, strerror(errno));
}
+ info->fd_vmlinux = -1;
}
if (info->name_xen_syms) {
- if ((info->fd_xen_syms = close(info->fd_xen_syms)) < 0) {
+ if (close(info->fd_xen_syms) < 0) {
ERRMSG("Can't close the kernel file(%s). %s\n",
info->name_xen_syms, strerror(errno));
}
+ info->fd_xen_syms = -1;
}
}
@@ -9708,6 +9789,9 @@ create_dumpfile(void)
if (!initial())
return FALSE;
+ if (!open_dump_bitmap())
+ return FALSE;
+
/* create an array of translations from pfn to vmemmap pages */
if (info->flag_excludevm) {
if (find_vmemmap() == FAILED) {
@@ -10168,7 +10252,7 @@ reassemble_kdump_header(void)
ret = TRUE;
out:
- if (fd > 0)
+ if (fd >= 0)
close(fd);
free(buf_bitmap);
@@ -10178,7 +10262,7 @@ out:
int
reassemble_kdump_pages(void)
{
- int i, fd = 0, ret = FALSE;
+ int i, fd = -1, ret = FALSE;
off_t offset_first_ph, offset_ph_org, offset_eraseinfo;
off_t offset_data_new, offset_zero_page = 0;
mdf_pfn_t pfn, start_pfn, end_pfn;
@@ -10302,7 +10386,7 @@ reassemble_kdump_pages(void)
offset_data_new += pd.size;
}
close(fd);
- fd = 0;
+ fd = -1;
}
if (!write_cache_bufsz(&cd_pd))
goto out;
@@ -10345,7 +10429,7 @@ reassemble_kdump_pages(void)
size_eraseinfo += SPLITTING_SIZE_EI(i);
close(fd);
- fd = 0;
+ fd = -1;
}
if (size_eraseinfo) {
if (!write_cache_bufsz(&cd_data))
@@ -10366,7 +10450,7 @@ out:
if (data)
free(data);
- if (fd > 0)
+ if (fd >= 0)
close(fd);
return ret;
@@ -10878,6 +10962,9 @@ int show_mem_usage(void)
if (!initial())
return FALSE;
+ if (!open_dump_bitmap())
+ return FALSE;
+
if (!prepare_bitmap_buffer())
return FALSE;
@@ -10939,6 +11026,11 @@ main(int argc, char *argv[])
strerror(errno));
goto out;
}
+ info->fd_vmlinux = -1;
+ info->fd_xen_syms = -1;
+ info->fd_memory = -1;
+ info->fd_dumpfile = -1;
+ info->fd_bitmap = -1;
initialize_tables();
/*
@@ -11188,6 +11280,7 @@ main(int argc, char *argv[])
}
if (info->flag_split) {
for (i = 0; i < info->num_dumpfile; i++) {
+ SPLITTING_FD_BITMAP(i) = -1;
if (!check_dump_file(SPLITTING_DUMPFILE(i)))
goto out;
}
@@ -11229,13 +11322,16 @@ out:
free(info->kh_memory);
if (info->valid_pages)
free(info->valid_pages);
- if (info->bitmap_memory)
+ if (info->bitmap_memory) {
+ if (info->bitmap_memory->buf)
+ free(info->bitmap_memory->buf);
free(info->bitmap_memory);
- if (info->fd_memory)
+ }
+ if (info->fd_memory >= 0)
close(info->fd_memory);
- if (info->fd_dumpfile)
+ if (info->fd_dumpfile >= 0)
close(info->fd_dumpfile);
- if (info->fd_bitmap)
+ if (info->fd_bitmap >= 0)
close(info->fd_bitmap);
if (vt.node_online_map != NULL)
free(vt.node_online_map);
diff --git a/makedumpfile.conf.5 b/makedumpfile.conf.5
index f65f9ca..05dc117 100644
--- a/makedumpfile.conf.5
+++ b/makedumpfile.conf.5
@@ -1,4 +1,4 @@
-.TH MAKEDUMPFILE.CONF 5 "9 Jun 2016" "makedumpfile v1.6.0" "Linux System Administrator's Manual"
+.TH MAKEDUMPFILE.CONF 5 "27 Dec 2016" "makedumpfile v1.6.1" "Linux System Administrator's Manual"
.SH NAME
makedumpfile.conf \- The filter configuration file for makedumpfile(8).
.SH DESCRIPTION
diff --git a/makedumpfile.h b/makedumpfile.h
index 251d4bf..e32e567 100644
--- a/makedumpfile.h
+++ b/makedumpfile.h
@@ -393,6 +393,22 @@ do { \
return FALSE; \
} \
} while (0)
+#define WRITE_NUMBER_UNSIGNED(str_number, number) \
+do { \
+ if (NUMBER(number) != NOT_FOUND_NUMBER) { \
+ fprintf(info->file_vmcoreinfo, "%s%lu\n", \
+ STR_NUMBER(str_number), NUMBER(number)); \
+ } \
+} while (0)
+#define READ_NUMBER_UNSIGNED(str_number, number) \
+do { \
+ if (NUMBER(number) == NOT_FOUND_NUMBER) { \
+ NUMBER(number) = read_vmcoreinfo_ulong(STR_NUMBER(str_number)); \
+ if (NUMBER(number) == INVALID_STRUCTURE_DATA) \
+ return FALSE; \
+ } \
+} while (0)
+
/*
* for source file name
@@ -456,7 +472,7 @@ do { \
#define KVER_MIN_SHIFT 16
#define KERNEL_VERSION(x,y,z) (((x) << KVER_MAJ_SHIFT) | ((y) << KVER_MIN_SHIFT) | (z))
#define OLDEST_VERSION KERNEL_VERSION(2, 6, 15)/* linux-2.6.15 */
-#define LATEST_VERSION KERNEL_VERSION(4, 5, 3)/* linux-4.5.3 */
+#define LATEST_VERSION KERNEL_VERSION(4, 8, 1)/* linux-4.8.1 */
/*
* vmcoreinfo in /proc/vmcore
@@ -500,11 +516,8 @@ do { \
#define PMASK (0x7ffffffffffff000UL)
#ifdef __aarch64__
-int get_va_bits_arm64(void);
-#define ARM64_PGTABLE_LEVELS get_pgtable_level_arm64()
-#define VA_BITS get_va_bits_arm64()
-#define PAGE_SHIFT get_page_shift_arm64()
-#define KVBASE VMALLOC_START
+unsigned long get_kvbase_arm64(void);
+#define KVBASE get_kvbase_arm64()
#endif /* aarch64 */
#ifdef __arm__
@@ -573,10 +586,6 @@ int get_va_bits_arm64(void);
#define VMEMMAP_END_2_6_31 (0xffffeaffffffffff) /* 2.6.31, or later */
#define __START_KERNEL_map (0xffffffff80000000)
-#define KERNEL_IMAGE_SIZE_ORIG (0x0000000008000000) /* 2.6.25, or former */
-#define KERNEL_IMAGE_SIZE_2_6_26 (0x0000000020000000) /* 2.6.26, or later */
-#define MODULES_VADDR (__START_KERNEL_map + NUMBER(KERNEL_IMAGE_SIZE))
-#define MODULES_END (0xfffffffffff00000)
#define KVBASE PAGE_OFFSET
#define _SECTION_SIZE_BITS (27)
#define _MAX_PHYSMEM_BITS_ORIG (40)
@@ -625,24 +634,6 @@ int get_va_bits_arm64(void);
#define REGION_SHIFT (60UL)
#define VMEMMAP_REGION_ID (0xfUL)
-#define PGDIR_SHIFT \
- (PAGESHIFT() + (PAGESHIFT() - 3) + (PAGESHIFT() - 2))
-#define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT() - 3))
-
-/* shift to put page number into pte */
-#define PTE_SHIFT 16
-
-#define PTE_INDEX_SIZE 9
-#define PMD_INDEX_SIZE 10
-#define PGD_INDEX_SIZE 10
-
-#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
-#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
-
-#define PGD_OFFSET(vaddr) ((vaddr >> PGDIR_SHIFT) & 0x7ff)
-#define PMD_OFFSET(vaddr) ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-
/* 4-level page table support */
/* 4K pagesize */
@@ -650,7 +641,15 @@ int get_va_bits_arm64(void);
#define PMD_INDEX_SIZE_L4_4K 7
#define PUD_INDEX_SIZE_L4_4K 7
#define PGD_INDEX_SIZE_L4_4K 9
-#define PTE_SHIFT_L4_4K 17
+#define PUD_INDEX_SIZE_L4_4K_3_7 9
+#define PTE_INDEX_SIZE_RADIX_4K 9
+#define PMD_INDEX_SIZE_RADIX_4K 9
+#define PUD_INDEX_SIZE_RADIX_4K 9
+#define PGD_INDEX_SIZE_RADIX_4K 13
+#define PTE_RPN_SHIFT_L4_4K 17
+#define PTE_RPN_SHIFT_L4_4K_4_5 18
+#define PGD_MASKED_BITS_4K 0
+#define PUD_MASKED_BITS_4K 0
#define PMD_MASKED_BITS_4K 0
/* 64K pagesize */
@@ -661,21 +660,57 @@ int get_va_bits_arm64(void);
#define PTE_INDEX_SIZE_L4_64K_3_10 8
#define PMD_INDEX_SIZE_L4_64K_3_10 10
#define PGD_INDEX_SIZE_L4_64K_3_10 12
-#define PTE_SHIFT_L4_64K_V1 32
-#define PTE_SHIFT_L4_64K_V2 30
+#define PMD_INDEX_SIZE_L4_64K_4_6 5
+#define PUD_INDEX_SIZE_L4_64K_4_6 5
+#define PTE_INDEX_SIZE_RADIX_64K 5
+#define PMD_INDEX_SIZE_RADIX_64K 9
+#define PUD_INDEX_SIZE_RADIX_64K 9
+#define PGD_INDEX_SIZE_RADIX_64K 13
+#define PTE_RPN_SHIFT_L4_64K_V1 32
+#define PTE_RPN_SHIFT_L4_64K_V2 30
+#define PGD_MASKED_BITS_64K 0
+#define PUD_MASKED_BITS_64K 0x1ff
#define PMD_MASKED_BITS_64K 0x1ff
+#define PMD_MASKED_BITS_64K_3_11 0xfff
+#define PGD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL
+#define PUD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL
+#define PMD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL
+
+#define PTE_RPN_MASK_DEFAULT 0xffffffffffffffffUL
+#define PTE_RPN_SIZE_L4_4_6 (info->page_size == 65536 ? 41 : 45)
+#define PTE_RPN_MASK_L4_4_6 (((1UL << PTE_RPN_SIZE_L4_4_6) - 1) << info->page_shift)
+#define PTE_RPN_SHIFT_L4_4_6 info->page_shift
+
+#define PGD_MASKED_BITS_4_7 0xc0000000000000ffUL
+#define PUD_MASKED_BITS_4_7 0xc0000000000000ffUL
+#define PMD_MASKED_BITS_4_7 0xc0000000000000ffUL
+
+/*
+ * Supported MMU types
+ */
+#define STD_MMU 0x0
+/*
+ * The flag bit for radix MMU in cpu_spec.mmu_features
+ * in the kernel. Use the same flag here.
+ */
+#define RADIX_MMU 0x40
-#define L4_MASK \
- (info->kernel_version >= KERNEL_VERSION(3, 10, 0) ? 0xfff : 0x1ff)
-#define L4_OFFSET(vaddr) ((vaddr >> (info->l4_shift)) & L4_MASK)
-#define PGD_OFFSET_L4(vaddr) \
+#define PGD_MASK_L4 \
+ (info->kernel_version >= KERNEL_VERSION(3, 10, 0) ? (info->ptrs_per_pgd - 1) : 0x1ff)
+#define PGD_OFFSET_L4(vaddr) ((vaddr >> (info->l4_shift)) & PGD_MASK_L4)
+
+#define PUD_OFFSET_L4(vaddr) \
((vaddr >> (info->l3_shift)) & (info->ptrs_per_l3 - 1))
#define PMD_OFFSET_L4(vaddr) \
((vaddr >> (info->l2_shift)) & (info->ptrs_per_l2 - 1))
-#define _PAGE_PRESENT 0x1UL
+#define _PAGE_PRESENT \
+ (info->kernel_version >= KERNEL_VERSION(4, 6, 0) ? \
+ (0x1UL << 63) : (info->kernel_version >= KERNEL_VERSION(4, 5, 0) ? \
+ 0x2UL : 0x1UL))
+
#endif
#ifdef __powerpc32__
@@ -833,17 +868,16 @@ unsigned long long vaddr_to_paddr_x86(unsigned long vaddr);
#endif /* x86 */
#ifdef __x86_64__
-int is_vmalloc_addr_x86_64(ulong vaddr);
int get_phys_base_x86_64(void);
int get_machdep_info_x86_64(void);
int get_versiondep_info_x86_64(void);
-unsigned long long vaddr_to_paddr_x86_64(unsigned long vaddr);
+unsigned long long vtop4_x86_64(unsigned long vaddr);
#define find_vmemmap() find_vmemmap_x86_64()
#define get_phys_base() get_phys_base_x86_64()
#define get_machdep_info() get_machdep_info_x86_64()
#define get_versiondep_info() get_versiondep_info_x86_64()
-#define vaddr_to_paddr(X) vaddr_to_paddr_x86_64(X)
-#define is_phys_addr(X) (!is_vmalloc_addr_x86_64(X))
+#define vaddr_to_paddr(X) vtop4_x86_64(X)
+#define is_phys_addr(X) stub_true_ul(X)
#endif /* x86_64 */
#ifdef __powerpc64__ /* powerpc64 */
@@ -1100,6 +1134,7 @@ struct DumpInfo {
int flag_nospace; /* the flag of "No space on device" error */
int flag_vmemmap; /* kernel supports vmemmap address space */
int flag_excludevm; /* -e - excluding unused vmemmap pages */
+ int flag_use_count; /* _refcount is named _count in struct page */
unsigned long vaddr_for_vtop; /* virtual address for debugging */
long page_size; /* size of page */
long page_shift;
@@ -1121,10 +1156,13 @@ struct DumpInfo {
/*
* page table info for ppc64
*/
+ int cur_mmu_type;
int ptrs_per_pgd;
+ uint l4_index_size;
uint l3_index_size;
uint l2_index_size;
uint l1_index_size;
+ uint ptrs_per_l4;
uint ptrs_per_l3;
uint ptrs_per_l2;
uint ptrs_per_l1;
@@ -1132,8 +1170,11 @@ struct DumpInfo {
uint l3_shift;
uint l2_shift;
uint l1_shift;
- uint pte_shift;
- uint l2_masked_bits;
+ uint pte_rpn_shift;
+ ulong pte_rpn_mask;
+ ulong pgd_masked_bits;
+ ulong pud_masked_bits;
+ ulong pmd_masked_bits;
ulong kernel_pgd;
char *page_buf; /* Page buffer to read page tables */
@@ -1425,17 +1466,14 @@ struct symbol_table {
unsigned long long kexec_crash_image;
/*
- * vmemmap symbols on ppc64 arch
+ * symbols on ppc64 arch
*/
unsigned long long vmemmap_list;
unsigned long long mmu_vmemmap_psize;
unsigned long long mmu_psize_defs;
-
- /*
- * vm related symbols for ppc64 arch
- */
unsigned long long cpu_pgd;
unsigned long long demote_segment_4k;
+ unsigned long long cur_cpu_spec;
};
struct size_table {
@@ -1472,10 +1510,11 @@ struct size_table {
long elf64_hdr;
/*
- * vmemmap symbols on ppc64 arch
+ * symbols on ppc64 arch
*/
long vmemmap_backing;
long mmu_psize_def;
+ long cpu_spec;
long pageflags;
};
@@ -1483,7 +1522,7 @@ struct size_table {
struct offset_table {
struct page {
long flags;
- long _count;
+ long _refcount;
long mapping;
long lru;
long _mapcount;
@@ -1624,18 +1663,21 @@ struct offset_table {
} printk_log;
/*
- * vmemmap symbols on ppc64 arch
+ * symbols on ppc64 arch
*/
- struct mmu_psize_def {
+ struct mmu_psize_def_s {
long shift;
} mmu_psize_def;
- struct vmemmap_backing {
+ struct vmemmap_backing_s {
long phys;
long virt_addr;
long list;
} vmemmap_backing;
+ struct cpu_spec_s {
+ long mmu_features;
+ } cpu_spec;
};
/*
@@ -1683,10 +1725,15 @@ struct number_table {
long PG_hwpoison;
long PAGE_BUDDY_MAPCOUNT_VALUE;
- long KERNEL_IMAGE_SIZE;
long SECTION_SIZE_BITS;
long MAX_PHYSMEM_BITS;
long HUGETLB_PAGE_DTOR;
+ long phys_base;
+#ifdef __aarch64__
+ long VA_BITS;
+ unsigned long PHYS_OFFSET;
+ unsigned long kimage_voffset;
+#endif
};
struct srcfile_table {
@@ -1954,7 +2001,7 @@ is_dumpable_file(struct dump_bitmap *bitmap, mdf_pfn_t pfn)
static inline int
is_dumpable(struct dump_bitmap *bitmap, mdf_pfn_t pfn, struct cycle *cycle)
{
- if (bitmap->fd == 0) {
+ if (bitmap->fd < 0) {
return is_dumpable_buffer(bitmap, pfn, cycle);
} else {
return is_dumpable_file(bitmap, pfn);
diff --git a/makedumpfile.spec b/makedumpfile.spec
index 55b0f10..219fa99 100644
--- a/makedumpfile.spec
+++ b/makedumpfile.spec
@@ -1,6 +1,6 @@
Name: makedumpfile
Summary: makedumpfile package
-Version: 1.6.0
+Version: 1.6.1
Release: 1
Group: Applications/Text
License: GPL
diff --git a/print_info.c b/print_info.c
index 1a9f70c..392d863 100644
--- a/print_info.c
+++ b/print_info.c
@@ -200,6 +200,8 @@ print_usage(void)
MSG(" [--num-threads THREADNUM]:\n");
MSG(" Using multiple threads to read and compress data of each page in parallel.\n");
MSG(" And it will reduces time for saving DUMPFILE.\n");
+ MSG(" Note that if the usable cpu number is less than the thread number, it may\n");
+ MSG(" lead to great performance degradation.\n");
MSG(" This feature only supports creating DUMPFILE in kdump-comressed format from\n");
MSG(" VMCORE in kdump-compressed format or elf format.\n");
MSG("\n");
diff --git a/sadump_info.c b/sadump_info.c
index 20376f0..f77a020 100644
--- a/sadump_info.c
+++ b/sadump_info.c
@@ -213,6 +213,8 @@ sadump_copy_1st_bitmap_from_memory(void)
char buf[si->sh_memory->block_size];
off_t offset_page;
unsigned long bitmap_offset, bitmap_len;
+ mdf_pfn_t pfn, pfn_bitmap1;
+ extern mdf_pfn_t pfn_memhole;
bitmap_offset = si->sub_hdr_offset + sh->block_size*sh->sub_hdr_size;
bitmap_len = sh->block_size * sh->bitmap_blocks;
@@ -250,6 +252,13 @@ sadump_copy_1st_bitmap_from_memory(void)
offset_page += sizeof(buf);
}
+ pfn_bitmap1 = 0;
+ for (pfn = 0; pfn < info->max_mapnr; ++pfn) {
+ if (sadump_is_ram(pfn))
+ pfn_bitmap1++;
+ }
+ pfn_memhole = info->max_mapnr - pfn_bitmap1;
+
/*
* kdump uses the first 640kB on the 2nd kernel. But both
* bitmaps should reflect the 1st kernel memory situation. We
@@ -832,18 +841,28 @@ sadump_initialize_bitmap_memory(void)
strerror(errno));
return FALSE;
}
+
bmp->fd = info->fd_memory;
bmp->file_name = info->name_memory;
bmp->no_block = -1;
- memset(bmp->buf, 0, BUFSIZE_BITMAP);
bmp->offset = dumpable_bitmap_offset;
+ bmp->buf = malloc(BUFSIZE_BITMAP);
+ if (!bmp->buf) {
+ ERRMSG("Can't allocate memory for the memory-bitmap's buffer. %s\n",
+ strerror(errno));
+ free(bmp);
+ return FALSE;
+ }
+ memset(bmp->buf, 0, BUFSIZE_BITMAP);
+
max_section = divideup(si->max_mapnr, SADUMP_PF_SECTION_NUM);
block_table = calloc(sizeof(unsigned long long), max_section);
if (block_table == NULL) {
ERRMSG("Can't allocate memory for the block_table. %s\n",
strerror(errno));
+ free(bmp->buf);
free(bmp);
return FALSE;
}
@@ -870,8 +889,17 @@ sadump_initialize_bitmap_memory(void)
bmp->fd = info->fd_memory;
bmp->file_name = info->name_memory;
bmp->no_block = -1;
- memset(bmp->buf, 0, BUFSIZE_BITMAP);
bmp->offset = si->sub_hdr_offset + sh->block_size * sh->sub_hdr_size;
+
+ bmp->buf = malloc(BUFSIZE_BITMAP);
+ if (!bmp->buf) {
+ ERRMSG("Can't allocate memory for the memory-bitmap's buffer. %s\n",
+ strerror(errno));
+ free(bmp);
+ return FALSE;
+ }
+ memset(bmp->buf, 0, BUFSIZE_BITMAP);
+
si->ram_bitmap = bmp;
/*
@@ -1825,6 +1853,7 @@ sadump_add_diskset_info(char *name_memory)
}
si->diskset_info[si->num_disks - 1].name_memory = name_memory;
+ si->diskset_info[si->num_disks - 1].fd_memory = -1;
return TRUE;
}
@@ -1889,7 +1918,7 @@ free_sadump_info(void)
int i;
for (i = 1; i < si->num_disks; ++i) {
- if (si->diskset_info[i].fd_memory)
+ if (si->diskset_info[i].fd_memory >= 0)
close(si->diskset_info[i].fd_memory);
if (si->diskset_info[i].sph_memory)
free(si->diskset_info[i].sph_memory);
@@ -1904,6 +1933,11 @@ free_sadump_info(void)
fclose(si->file_elf_note);
if (si->cpu_online_mask_buf)
free(si->cpu_online_mask_buf);
+ if (si->ram_bitmap) {
+ if (si->ram_bitmap->buf)
+ free(si->ram_bitmap->buf);
+ free(si->ram_bitmap);
+ }
}
void