I have no reason to believe there would be a problem, but I
couldn't verify
the translation of module address space because the 4.6 sample vmcore doesn't
have any modules installed. Any chance you can test that?
And again, nice work -- and truly appreciated!
Thanks,
Dave
----- Original Message -----
> A quick update.
>
> changes in v6:
> * arm64_kdump_phys_offset() now falls through the default case,
> arm_kdump_phys_offset(), anyway.
>
> changes in v5:
> * Calcs PHYS_OFFSET by reading VMCOREINFO, "NUMBER(PHYS_OFFSET)"
> "memstart_addr"-based routine was also moved into
arm64_kdump_phys_base().
>
> changes in v4:
> * Fixed VA_BITS calculation for v4.5 or earlier
> * Added 4-level address translation with 4KB page size
> * Removed "fix a renaming of a member of struct page, _count to
_refcount"
>
> Chnages in v3:
> * Refined KASLR handling
> hopefully the tool works even on a live system if CONFIG_RANDOMIZE_RAM is
> not configured
> * Fixed a renaming of a member of struct page
> * Removed a commit message regarding an issue of backtracing a panic'ed task
> because this is not a bug in this tool, but my kdump patch's.
> * Reported "kmem <vmalloc addr>" issue in a commit message
>
> changes in v2:
> * Fixed build warnings
> * Moved ARM64_NEW_VMEMMAP to machdep->flags
> * Show additional kaslr-related parameters in arm64_dump_machdep_table()
> * Handle a VMCOREINFO, "NUMBER(kimage_voffset)"
>
> ======8<======
> >From 666cdf305aa246ce6b30282d8e89e950dc828f70 Mon Sep 17 00:00:00 2001
> From: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
> Date: Mon, 16 May 2016 17:31:55 +0900
> Subject: [PATCH v6] arm64: fix kernel memory map handling for kaslr-enabled
> kernel
>
> In kernel v4.6, Kernel ASLR (KASLR) is supported on arm64, and the start
> address of the kernel image can be randomized if CONFIG_RANDOMIZE_BASE is
> enabled.
> Even worse, the kernel image is no more mapped in the linear mapping, but
> in vmalloc area (i.e. below PAGE_OFFSET).
>
> Now, according to the kernel's memory.h, converting a virtual address to
> a physical address should be done like below:
>
> phys_addr_t __x = (phys_addr_t)(x); \
> __x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \
> (__x - kimage_voffset); })
>
> Please note that PHYS_OFFSET is no more equal to the start address of
> the first usable memory block in SYSTEM RAM due to the fact mentioned
> above.
>
> This patch addresses this change and allows the crash utility to access
> memory contents with correct addresses.
>
> * On a live system, crash with this patch won't work, especially
> with CONFIG_RANDOMIZE_BASE configured, because we currently have no way
> to know kimage_voffset.
>
> * For a core dump file, we can do simply:
> $ crash <vmlinux> <vmcore>
> as long as the file has "NUMBER(kimage_voffset)"
> (RELOC_AUTO|KASLR is automatically set.)
>
> Signed-off-by: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
> ---
> arm64.c | 227
> ++++++++++++++++++++++++++++++++++++++++++++++++--------------
> defs.h | 26 ++++---
> main.c | 7 +-
> symbols.c | 12 ++--
> 4 files changed, 206 insertions(+), 66 deletions(-)
>
> diff --git a/arm64.c b/arm64.c
> index 86ec348..21e3d8e 100644
> --- a/arm64.c
> +++ b/arm64.c
> @@ -73,6 +73,23 @@ static int arm64_get_crash_notes(void);
> static void arm64_calc_VA_BITS(void);
> static int arm64_is_uvaddr(ulong, struct task_context *);
>
> +ulong
> +arm64_VTOP(ulong addr)
> +{
> + if (machdep->flags & NEW_VMEMMAP) {
> + if (addr >= machdep->machspec->page_offset)
> + return machdep->machspec->phys_offset
> + + (addr - machdep->machspec->page_offset);
> + else if (machdep->machspec->kimage_voffset)
> + return addr - machdep->machspec->kimage_voffset;
> + else /* no randomness */
> + return machdep->machspec->phys_offset
> + + (addr - machdep->machspec->vmalloc_start_addr);
> + } else {
> + return machdep->machspec->phys_offset
> + + (addr - machdep->machspec->page_offset);
> + }
> +}
>
> /*
> * Do all necessary machine-specific setup here. This is called several
> times
> @@ -82,6 +99,7 @@ void
> arm64_init(int when)
> {
> ulong value;
> + char *string;
> struct machine_specific *ms;
>
> #if defined(__x86_64__)
> @@ -103,9 +121,32 @@ arm64_init(int when)
> if (machdep->cmdline_args[0])
> arm64_parse_cmdline_args();
> machdep->flags |= MACHDEP_BT_TEXT;
> +
> + ms = machdep->machspec;
> + if (!ms->kimage_voffset &&
> + (string = pc->read_vmcoreinfo("NUMBER(kimage_voffset)"))) {
> + ms->kimage_voffset = htol(string, QUIET, NULL);
> + free(string);
> + }
> +
> + if (ms->kimage_voffset) {
> + machdep->flags |= NEW_VMEMMAP;
> +
> + /*
> + * Even if CONFIG_RANDOMIZE_BASE is not configured,
> + * derive_kaslr_offset() should work and set
> + * kt->relocate to 0
> + */
> + if (!kt->relocate && !(kt->flags2 & (RELOC_AUTO|KASLR)))
> + kt->flags2 |= (RELOC_AUTO|KASLR);
> + }
> +
> break;
>
> case PRE_GDB:
> + if (kernel_symbol_exists("kimage_voffset"))
> + machdep->flags |= NEW_VMEMMAP;
> +
> if (!machdep->pagesize) {
> /*
> * Kerneldoc Documentation/arm64/booting.txt describes
> @@ -161,16 +202,34 @@ arm64_init(int when)
> machdep->pagemask = ~((ulonglong)machdep->pageoffset);
>
> arm64_calc_VA_BITS();
> - machdep->machspec->page_offset = ARM64_PAGE_OFFSET;
> + ms = machdep->machspec;
> + ms->page_offset = ARM64_PAGE_OFFSET;
> machdep->identity_map_base = ARM64_PAGE_OFFSET;
> - machdep->machspec->userspace_top = ARM64_USERSPACE_TOP;
> - machdep->machspec->modules_vaddr = ARM64_MODULES_VADDR;
> - machdep->machspec->modules_end = ARM64_MODULES_END;
> - machdep->machspec->vmalloc_start_addr = ARM64_VMALLOC_START;
> - machdep->machspec->vmalloc_end = ARM64_VMALLOC_END;
> - machdep->kvbase = ARM64_VMALLOC_START;
> - machdep->machspec->vmemmap_vaddr = ARM64_VMEMMAP_VADDR;
> - machdep->machspec->vmemmap_end = ARM64_VMEMMAP_END;
> + machdep->kvbase = ARM64_VA_START;
> + ms->userspace_top = ARM64_USERSPACE_TOP;
> + if (machdep->flags & NEW_VMEMMAP) {
> + struct syment *sp;
> +
> + sp = kernel_symbol_search("_text");
> + ms->kimage_text = (sp ? sp->value : 0);
> + sp = kernel_symbol_search("_end");
> + ms->kimage_end = (sp ? sp->value : 0);
> +
> + ms->modules_vaddr = ARM64_VA_START;
> + if (kernel_symbol_exists("kasan_init"))
> + ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE;
> + ms->modules_end = ms->modules_vaddr
> + + ARM64_MODULES_VSIZE -1;
> +
> + ms->vmalloc_start_addr = ms->modules_end + 1;
> + } else {
> + ms->modules_vaddr = ARM64_PAGE_OFFSET - MEGABYTES(64);
> + ms->modules_end = ARM64_PAGE_OFFSET - 1;
> + ms->vmalloc_start_addr = ARM64_VA_START;
> + }
> + ms->vmalloc_end = ARM64_VMALLOC_END;
> + ms->vmemmap_vaddr = ARM64_VMEMMAP_VADDR;
> + ms->vmemmap_end = ARM64_VMEMMAP_END;
>
> switch (machdep->pagesize)
> {
> @@ -241,8 +300,6 @@ arm64_init(int when)
> machdep->stacksize = ARM64_STACK_SIZE;
> machdep->flags |= VMEMMAP;
>
> - arm64_calc_phys_offset();
> -
> machdep->uvtop = arm64_uvtop;
> machdep->kvtop = arm64_kvtop;
> machdep->is_kvaddr = generic_is_kvaddr;
> @@ -271,6 +328,10 @@ arm64_init(int when)
> machdep->dumpfile_init = NULL;
> machdep->verify_line_number = NULL;
> machdep->init_kernel_pgd = arm64_init_kernel_pgd;
> +
> + /* use machdep parameters */
> + arm64_calc_phys_offset();
> +
> break;
>
> case POST_GDB:
> @@ -420,6 +481,8 @@ arm64_dump_machdep_table(ulong arg)
> fprintf(fp, "%sIRQ_STACKS", others++ ? "|" : "");
> if (machdep->flags & MACHDEP_BT_TEXT)
> fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" :
"");
> + if (machdep->flags & NEW_VMEMMAP)
> + fprintf(fp, "%sNEW_VMEMMAP", others++ ? "|" : "");
> fprintf(fp, ")\n");
>
> fprintf(fp, " kvbase: %lx\n", machdep->kvbase);
> @@ -524,6 +587,11 @@ arm64_dump_machdep_table(ulong arg)
> fprintf(fp, " modules_end: %016lx\n", ms->modules_end);
> fprintf(fp, " vmemmap_vaddr: %016lx\n", ms->vmemmap_vaddr);
> fprintf(fp, " vmemmap_end: %016lx\n", ms->vmemmap_end);
> + if (machdep->flags & NEW_VMEMMAP) {
> + fprintf(fp, " kimage_text: %016lx\n", ms->kimage_text);
> + fprintf(fp, " kimage_end: %016lx\n", ms->kimage_end);
> + fprintf(fp, " kimage_voffset: %016lx\n",
ms->kimage_voffset);
> + }
> fprintf(fp, " phys_offset: %lx\n", ms->phys_offset);
> fprintf(fp, "__exception_text_start: %lx\n",
ms->__exception_text_start);
> fprintf(fp, " __exception_text_end: %lx\n",
ms->__exception_text_end);
> @@ -566,6 +634,42 @@ arm64_dump_machdep_table(ulong arg)
> }
> }
>
> +static int
> +arm64_parse_machdep_arg_l(char *argstring, char *param, ulong *value)
> +{
> + int len;
> + int megabytes = FALSE;
> + char *p;
> +
> + len = strlen(param);
> + if (!STRNEQ(argstring, param) || (argstring[len] != '='))
> + return FALSE;
> +
> + if ((LASTCHAR(argstring) == 'm') ||
> + (LASTCHAR(argstring) == 'M')) {
> + LASTCHAR(argstring) = NULLCHAR;
> + megabytes = TRUE;
> + }
> +
> + p = argstring + len + 1;
> + if (strlen(p)) {
> + int flags = RETURN_ON_ERROR | QUIET;
> + int err = 0;
> +
> + if (megabytes) {
> + *value = dtol(p, flags, &err);
> + if (!err)
> + *value = MEGABYTES(*value);
> + } else {
> + *value = htol(p, flags, &err);
> + }
> +
> + if (!err)
> + return TRUE;
> + }
> +
> + return FALSE;
> +}
>
> /*
> * Parse machine dependent command line arguments.
> @@ -577,11 +681,10 @@ arm64_dump_machdep_table(ulong arg)
> static void
> arm64_parse_cmdline_args(void)
> {
> - int index, i, c, err;
> + int index, i, c;
> char *arglist[MAXARGS];
> char buf[BUFSIZE];
> char *p;
> - ulong value = 0;
>
> for (index = 0; index < MAX_MACHDEP_ARGS; index++) {
> if (!machdep->cmdline_args[index])
> @@ -603,39 +706,23 @@ arm64_parse_cmdline_args(void)
> c = parse_line(buf, arglist);
>
> for (i = 0; i < c; i++) {
> - err = 0;
> -
> - if (STRNEQ(arglist[i], "phys_offset=")) {
> - int megabytes = FALSE;
> - int flags = RETURN_ON_ERROR | QUIET;
> -
> - if ((LASTCHAR(arglist[i]) == 'm') ||
> - (LASTCHAR(arglist[i]) == 'M')) {
> - LASTCHAR(arglist[i]) = NULLCHAR;
> - megabytes = TRUE;
> - }
> -
> - p = arglist[i] + strlen("phys_offset=");
> - if (strlen(p)) {
> - if (megabytes)
> - value = dtol(p, flags, &err);
> - else
> - value = htol(p, flags, &err);
> - }
> -
> - if (!err) {
> - if (megabytes)
> - value = MEGABYTES(value);
> -
> - machdep->machspec->phys_offset = value;
> -
> - error(NOTE,
> - "setting phys_offset to: 0x%lx\n\n",
> - machdep->machspec->phys_offset);
> + if (arm64_parse_machdep_arg_l(arglist[i],
> + "phys_offset",
> + &machdep->machspec->phys_offset)) {
> + error(NOTE,
> + "setting phys_offset to: 0x%lx\n\n",
> + machdep->machspec->phys_offset);
> +
> + machdep->flags |= PHYS_OFFSET;
> + continue;
> + } else if (arm64_parse_machdep_arg_l(arglist[i],
> + "kimage_voffset",
> + &machdep->machspec->kimage_voffset)) {
> + error(NOTE,
> + "setting kimage_voffset to: 0x%lx\n\n",
> + machdep->machspec->kimage_voffset);
>
> - machdep->flags |= PHYS_OFFSET;
> - continue;
> - }
> + continue;
> }
>
> error(WARNING, "ignoring --machdep option: %s\n",
> @@ -715,11 +802,31 @@ arm64_calc_phys_offset(void)
>
>
> /*
> - * Borrow the 32-bit ARM functionality.
> + * Determine PHYS_OFFSET either by reading VMCOREINFO or the kernel
> + * symbol, otherwise borrow the 32-bit ARM functionality.
> */
> static int
> arm64_kdump_phys_base(ulong *phys_offset)
> {
> + char *string;
> + struct syment *sp;
> + physaddr_t paddr;
> +
> + if ((string = pc->read_vmcoreinfo("NUMBER(PHYS_OFFSET)"))) {
> + *phys_offset = htol(string, QUIET, NULL);
> + free(string);
> + return TRUE;
> + }
> +
> + if (machdep->flags & NEW_VMEMMAP &&
> + machdep->machspec->kimage_voffset &&
> + (sp = kernel_symbol_search("memstart_addr"))) {
> + paddr = sp->value - machdep->machspec->kimage_voffset;
> + if (READMEM(-1, phys_offset, sizeof(*phys_offset),
> + sp->value, paddr) > 0)
> + return TRUE;
> + }
> +
> return arm_kdump_phys_base(phys_offset);
> }
>
> @@ -2509,6 +2616,11 @@ arm64_IS_VMALLOC_ADDR(ulong vaddr)
> {
> struct machine_specific *ms = machdep->machspec;
>
> + if ((machdep->flags & NEW_VMEMMAP) &&
> + (vaddr >= machdep->machspec->kimage_text) &&
> + (vaddr <= machdep->machspec->kimage_end))
> + return FALSE;
> +
> return ((vaddr >= ms->vmalloc_start_addr && vaddr <=
> ms->vmalloc_end) ||
> ((machdep->flags & VMEMMAP) &&
> (vaddr >= ms->vmemmap_vaddr && vaddr <=
ms->vmemmap_end))
> ||
> @@ -2539,7 +2651,10 @@ arm64_calc_VA_BITS(void)
>
> for (bitval = highest_bit_long(value); bitval; bitval--) {
> if ((value & (1UL << bitval)) == 0) {
> - machdep->machspec->VA_BITS = bitval + 2;
> + if (machdep->flags & NEW_VMEMMAP)
> + machdep->machspec->VA_BITS = bitval + 1;
> + else
> + machdep->machspec->VA_BITS = bitval + 2;
> break;
> }
> }
> @@ -2593,10 +2708,22 @@ arm64_calc_virtual_memory_ranges(void)
> break;
> }
>
> - vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) *
> SIZE(page), PUD_SIZE);
> + if (machdep->flags & NEW_VMEMMAP)
> +#define STRUCT_PAGE_MAX_SHIFT 6
> + vmemmap_size = 1UL << (ms->VA_BITS - machdep->pageshift - 1
> + + STRUCT_PAGE_MAX_SHIFT);
> + else
> + vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) *
> SIZE(page), PUD_SIZE);
> +
> vmalloc_end = (ms->page_offset - PUD_SIZE - vmemmap_size - SZ_64K);
> - vmemmap_start = vmalloc_end + SZ_64K;
> - vmemmap_end = vmemmap_start + vmemmap_size;
> +
> + if (machdep->flags & NEW_VMEMMAP) {
> + vmemmap_start = ms->page_offset - vmemmap_size;
> + vmemmap_end = ms->page_offset;
> + } else {
> + vmemmap_start = vmalloc_end + SZ_64K;
> + vmemmap_end = vmemmap_start + vmemmap_size;
> + }
>
> ms->vmalloc_end = vmalloc_end - 1;
> ms->vmemmap_vaddr = vmemmap_start;
> diff --git a/defs.h b/defs.h
> index 8eb601b..d6f719c 100644
> --- a/defs.h
> +++ b/defs.h
> @@ -2846,8 +2846,8 @@ typedef u64 pte_t;
>
> #define PTOV(X) \
> ((unsigned
>
long)(X)-(machdep->machspec->phys_offset)+(machdep->machspec->page_offset))
> -#define VTOP(X) \
> - ((unsigned
>
long)(X)-(machdep->machspec->page_offset)+(machdep->machspec->phys_offset))
> +
> +#define VTOP(X) arm64_VTOP((ulong)(X))
>
> #define USERSPACE_TOP (machdep->machspec->userspace_top)
> #define PAGE_OFFSET (machdep->machspec->page_offset)
> @@ -2962,19 +2962,24 @@ typedef signed int s32;
> #define VM_L3_4K (0x10)
> #define KDUMP_ENABLED (0x20)
> #define IRQ_STACKS (0x40)
> -#define VM_L4_4K (0x80)
> +#define NEW_VMEMMAP (0x80)
> +#define VM_L4_4K (0x100)
>
> /*
> * sources: Documentation/arm64/memory.txt
> * arch/arm64/include/asm/memory.h
> * arch/arm64/include/asm/pgtable.h
> */
> -
> -#define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) <<
> (machdep->machspec->VA_BITS - 1))
> +#define ARM64_VA_START ((0xffffffffffffffffUL) \
> + << machdep->machspec->VA_BITS)
> +#define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) \
> + << (machdep->machspec->VA_BITS - 1))
> #define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS)
> -#define ARM64_MODULES_VADDR (ARM64_PAGE_OFFSET - MEGABYTES(64))
> -#define ARM64_MODULES_END (ARM64_PAGE_OFFSET - 1)
> -#define ARM64_VMALLOC_START ((0xffffffffffffffffUL) <<
> machdep->machspec->VA_BITS)
> +
> +/* only used for v4.6 or later */
> +#define ARM64_MODULES_VSIZE MEGABYTES(128)
> +#define ARM64_KASAN_SHADOW_SIZE (1UL << (machdep->machspec->VA_BITS -
3))
> +
> /*
> * The following 3 definitions are the original values, but are obsolete
> * for 3.17 and later kernels because they are now build-time calculations.
> @@ -3055,6 +3060,10 @@ struct machine_specific {
> ulong *irq_stacks;
> ulong __irqentry_text_start;
> ulong __irqentry_text_end;
> + /* only needed for v4.6 or later kernel */
> + ulong kimage_voffset;
> + ulong kimage_text;
> + ulong kimage_end;
> };
>
> struct arm64_stackframe {
> @@ -5412,6 +5421,7 @@ void unwind_backtrace(struct bt_info *);
> #ifdef ARM64
> void arm64_init(int);
> void arm64_dump_machdep_table(ulong);
> +ulong arm64_VTOP(ulong);
> int arm64_IS_VMALLOC_ADDR(ulong);
> ulong arm64_swp_type(ulong);
> ulong arm64_swp_offset(ulong);
> diff --git a/main.c b/main.c
> index 05787f0..4065e9a 100644
> --- a/main.c
> +++ b/main.c
> @@ -227,9 +227,10 @@ main(int argc, char **argv)
> optarg);
> }
> } else if (STREQ(long_options[option_index].name, "kaslr")) {
> - if (!machine_type("X86_64"))
> - error(INFO, "--kaslr only valid "
> - "with X86_64 machine type.\n");
> + if (!machine_type("X86_64") &&
> + !machine_type("ARM64"))
> + error(INFO, "--kaslr not valid "
> + "with this machine type.\n");
> else if (STREQ(optarg, "auto"))
> kt->flags2 |= (RELOC_AUTO|KASLR);
> else {
> diff --git a/symbols.c b/symbols.c
> index a8d3563..b0a6461 100644
> --- a/symbols.c
> +++ b/symbols.c
> @@ -593,7 +593,8 @@ kaslr_init(void)
> {
> char *string;
>
> - if (!machine_type("X86_64") || (kt->flags & RELOC_SET))
> + if ((!machine_type("X86_64") &&
!machine_type("ARM64")) ||
> + (kt->flags & RELOC_SET))
> return;
>
> /*
> @@ -712,7 +713,7 @@ store_symbols(bfd *abfd, int dynamic, void *minisyms,
> long symcount,
> if (machine_type("X86")) {
> if (!(kt->flags & RELOC_SET))
> kt->flags |= RELOC_FORCE;
> - } else if (machine_type("X86_64")) {
> + } else if (machine_type("X86_64") || machine_type("ARM64")) {
> if ((kt->flags2 & RELOC_AUTO) && !(kt->flags & RELOC_SET))
> derive_kaslr_offset(abfd, dynamic, from,
> fromend, size, store);
> @@ -783,7 +784,8 @@ store_sysmap_symbols(void)
> error(FATAL, "symbol table namespace malloc: %s\n",
> strerror(errno));
>
> - if (!machine_type("X86") && !machine_type("X86_64"))
> + if (!machine_type("X86") && !machine_type("X86_64")
&&
> + !machine_type("ARM64"))
> kt->flags &= ~RELOC_SET;
>
> first = 0;
> @@ -833,7 +835,7 @@ store_sysmap_symbols(void)
> }
>
> /*
> - * Handle x86 kernels configured such that the vmlinux symbols
> + * Handle x86/arm64 kernels configured such that the vmlinux symbols
> * are not as loaded into the kernel (not unity-mapped).
> */
> static ulong
> @@ -4681,7 +4683,7 @@ value_search(ulong value, ulong *offset)
> if ((sp = machdep->value_to_symbol(value, offset)))
> return sp;
>
> - if (IS_VMALLOC_ADDR(value))
> + if (IS_VMALLOC_ADDR(value))
> goto check_modules;
>
> if ((sp = symval_hash_search(value)) == NULL)
> --
> 2.8.1
>
> --
> Crash-utility mailing list
> Crash-utility(a)redhat.com
>
https://www.redhat.com/mailman/listinfo/crash-utility
>
--
Crash-utility mailing list
Crash-utility(a)redhat.com
https://www.redhat.com/mailman/listinfo/crash-utility