[PATCH v4 0/3] arm64: more improvement of bt -f
by AKASHI Takahiro
Changes in v4:
* use arm64_on_irq_stack() to check whether or not we are on IRQ stack.
mistakendly used 'flags & IRQ_STACKS'.
* fix a bug at critical timing around switching stacks in arm64_unwind_frame().
We need to take are of a possibility that we get back to process stack,
but fp still points to IRQ stack.
* add patch[2/3]
This should be applied even withtout patch[1/3]
* add patch[3/3]
applying this patch would be a discussion.
AKASHI Takahiro (3):
arm64: more improvement of bt -f
arm64: find a correct starting stackframe at bt
arm64: correct a PC shown in bt
arm64.c | 559 +++++++++++++++++++++++++++++++++++++++++++---------------------
defs.h | 6 +
2 files changed, 379 insertions(+), 186 deletions(-)
--
2.9.0
8 years, 5 months
[PATCH v3] arm64: more improvement of bt -f
by AKASHI Takahiro
Dave,
This patch addresses all the issues that I mentioned in [1], and
re-factored arm64_back_trace_cmd() to make it simpler, while
arm64_unwind_frame() gets a bit complicated. But those changes,
I believe, make the code more readable and easily maintainable.
(The only ugly part is arm64_in_exp_entry(). I have no better ideas.)
Please pick up this patch if you like.
It is to be applied on top of your current master.
[1] https://www.redhat.com/archives/crash-utility/2016-June/msg00040.html
Thanks,
-Takahiro AKASHI
======8<======
>From c1e06fdd21bb70d247babd43cf2762e0cdf6979c Mon Sep 17 00:00:00 2001
From: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
Date: Thu, 16 Jun 2016 09:29:52 +0900
Subject: [PATCH v3] arm64: more improvement of bt -f
Signed-off-by: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
---
arm64.c | 486 +++++++++++++++++++++++++++++++++++++++++++---------------------
defs.h | 6 +
2 files changed, 337 insertions(+), 155 deletions(-)
diff --git a/arm64.c b/arm64.c
index 06676d1..9d42fe6 100644
--- a/arm64.c
+++ b/arm64.c
@@ -43,17 +43,18 @@ static void arm64_stackframe_init(void);
static int arm64_eframe_search(struct bt_info *);
static int arm64_is_kernel_exception_frame(struct bt_info *, ulong);
static int arm64_in_exception_text(ulong);
+static int arm64_in_exp_entry(ulong);
static void arm64_back_trace_cmd(struct bt_info *);
static void arm64_print_text_symbols(struct bt_info *, struct arm64_stackframe *, FILE *);
static int arm64_print_stackframe_entry(struct bt_info *, int, struct arm64_stackframe *, FILE *);
-static void arm64_display_full_frame(struct bt_info *, ulong);
-static int arm64_unwind_frame(struct bt_info *, struct arm64_stackframe *);
+static void arm64_display_full_frame(struct bt_info *, struct arm64_stackframe *, struct arm64_stackframe *);
+static int arm64_unwind_frame(struct bt_info *, struct arm64_stackframe *, FILE *);
static int arm64_get_dumpfile_stackframe(struct bt_info *, struct arm64_stackframe *);
static int arm64_in_kdump_text(struct bt_info *, struct arm64_stackframe *);
static int arm64_in_kdump_text_on_irq_stack(struct bt_info *);
-static int arm64_switch_stack(struct bt_info *, struct arm64_stackframe *, FILE *);
static int arm64_get_stackframe(struct bt_info *, struct arm64_stackframe *);
static void arm64_get_stack_frame(struct bt_info *, ulong *, ulong *);
+static void arm64_gen_hidden_frame(struct bt_info *bt, ulong, struct arm64_stackframe *);
static void arm64_print_exception_frame(struct bt_info *, ulong, int, FILE *);
static void arm64_do_bt_reference_check(struct bt_info *, ulong, char *);
static int arm64_translate_pte(ulong, void *, ulonglong);
@@ -580,6 +581,10 @@ arm64_dump_machdep_table(ulong arg)
fprintf(fp, " __exception_text_end: %lx\n", ms->__exception_text_end);
fprintf(fp, " __irqentry_text_start: %lx\n", ms->__irqentry_text_start);
fprintf(fp, " __irqentry_text_end: %lx\n", ms->__irqentry_text_end);
+ fprintf(fp, " exp_entry1_start: %lx\n", ms->exp_entry1_start);
+ fprintf(fp, " exp_entry1_end: %lx\n", ms->exp_entry1_end);
+ fprintf(fp, " exp_entry2_start: %lx\n", ms->exp_entry2_start);
+ fprintf(fp, " exp_entry2_end: %lx\n", ms->exp_entry2_end);
fprintf(fp, " panic_task_regs: %lx\n", (ulong)ms->panic_task_regs);
fprintf(fp, " PTE_PROT_NONE: %lx\n", ms->PTE_PROT_NONE);
fprintf(fp, " PTE_FILE: ");
@@ -1286,6 +1291,15 @@ arm64_stackframe_init(void)
machdep->machspec->__irqentry_text_start = sp1->value;
machdep->machspec->__irqentry_text_end = sp2->value;
}
+ if ((sp1 = kernel_symbol_search("vectors")) &&
+ (sp1n = kernel_symbol_search("cpu_switch_to")) &&
+ (sp2 = kernel_symbol_search("ret_fast_syscall")) &&
+ (sp2n = kernel_symbol_search("sys_rt_sigreturn_wrapper"))) {
+ machdep->machspec->exp_entry1_start = sp1->value;
+ machdep->machspec->exp_entry1_end = sp1n->value;
+ machdep->machspec->exp_entry2_start = sp2->value;
+ machdep->machspec->exp_entry2_end = sp2n->value;
+ }
if ((sp1 = kernel_symbol_search("crash_kexec")) &&
(sp1n = next_symbol(NULL, sp1)) &&
@@ -1488,9 +1502,21 @@ arm64_in_exception_text(ulong ptr)
return FALSE;
}
+static int
+arm64_in_exp_entry(ulong addr)
+{
+ struct machine_specific *ms;
+
+ ms = machdep->machspec;
+ if ((ms->exp_entry1_start <= addr) && (addr < ms->exp_entry1_end))
+ return TRUE;
+ if ((ms->exp_entry2_start <= addr) && (addr < ms->exp_entry2_end))
+ return TRUE;
+ return FALSE;
+}
+
#define BACKTRACE_CONTINUE (1)
#define BACKTRACE_COMPLETE_KERNEL (2)
-#define BACKTRACE_COMPLETE_USER (3)
static int
arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackframe *frame, FILE *ofp)
@@ -1511,11 +1537,6 @@ arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackfr
value_to_symstr(frame->pc, buf, bt->radix);
}
- if ((bt->flags & BT_FULL) && level) {
- arm64_display_full_frame(bt, frame->fp);
- bt->frameptr = frame->fp;
- }
-
fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level,
frame->fp ? frame->fp : bt->stacktop - USER_EFRAME_OFFSET,
name_plus_offset ? name_plus_offset : name, frame->pc);
@@ -1534,7 +1555,8 @@ arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackfr
fprintf(ofp, " %s\n", buf);
}
- if (STREQ(name, "start_kernel") || STREQ(name, "secondary_start_kernel") ||
+ if (STREQ(name, "start_kernel") ||
+ STREQ(name, "secondary_start_kernel") ||
STREQ(name, "kthread") || STREQ(name, "kthreadd"))
return BACKTRACE_COMPLETE_KERNEL;
@@ -1542,46 +1564,169 @@ arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackfr
}
static void
-arm64_display_full_frame(struct bt_info *bt, ulong sp)
+arm64_display_full_frame(struct bt_info *bt, struct arm64_stackframe *cur,
+ struct arm64_stackframe *next)
{
+ struct machine_specific *ms;
+ ulong next_fp, stackbase;
+ char *stackbuf;
int i, u_idx;
ulong *up;
ulong words, addr;
char buf[BUFSIZE];
- if (bt->frameptr == sp)
- return;
+ stackbase = bt->stackbase;
+ stackbuf = bt->stackbuf;
+ ms = machdep->machspec;
- if (!INSTACK(sp, bt) || !INSTACK(bt->frameptr, bt)) {
- if (sp == 0)
- sp = bt->stacktop - USER_EFRAME_OFFSET;
- else
- return;
- }
+ /* Calc next fp for dump */
+ if (next->fp == 0)
+ /* last stackframe on kernel tack */
+ next_fp = bt->stacktop - 0x10;
+ else if (!INSTACK(cur->sp, bt)) {
+ /* We have just switched over stacks */
+ next_fp = ms->irq_stacks[bt->tc->processor]
+ + ms->irq_stack_size - 0x10;
+
+ /*
+ * We are already buffering a process stack.
+ * So use an old buffer for IRQ stack.
+ */
+ stackbase = ms->irq_stacks[bt->tc->processor];
+ stackbuf = ms->irq_stackbuf;
+ } else
+ next_fp = next->fp;
+
+ if (CRASHDEBUG(1))
+ fprintf(fp, " frame <%016lx:%016lx>\n", cur->fp, next_fp);
- words = (sp - bt->frameptr) / sizeof(ulong);
+ /* Check here because we want to see a debug message above. */
+ if (!(bt->flags & BT_FULL))
+ return;
- addr = bt->frameptr;
- u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong);
+ /* Dump */
+ words = (next_fp - cur->fp) / sizeof(ulong);
+ addr = cur->fp;
+ u_idx = (cur->fp - stackbase)/sizeof(ulong);
for (i = 0; i < words; i++, u_idx++) {
if (!(i & 1))
fprintf(fp, "%s %lx: ", i ? "\n" : "", addr);
- up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]);
+ up = (ulong *)(&stackbuf[u_idx*sizeof(ulong)]);
fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0));
addr += sizeof(ulong);
}
fprintf(fp, "\n");
+
+ if (stackbuf == ms->irq_stackbuf)
+ FREEBUF(stackbuf);
}
-static int
-arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
+/*
+ * (1)Normal frame:
+ * +------+
+ * | pfp |
+ * | cpc |
+ * psp + +
+ * | |
+ * | |
+ * pfp +------+ <--- :prev stackframe = <pfp, psp, ppc>
+ * | cfp |
+ * | npc |
+ * csp + +
+ * | |
+ * | |
+ * cfp +------+ <--- :curr stackframe = <cfp, csp, cpc>
+ * | nfp | | cfp = *pfp
+ * | Npc | | csp = pfp + 0x10
+ * nsp + + real stackframe
+ * | | | at cpc
+ * | | |
+ * nfp +------+ <--- :next stackframe = <nfp, nsp, npc>
+ * | |
+ *
+ * (2)Exception:
+ * +------+
+ * | pfp |
+ * | cpc |
+ * psp + +
+ * | |
+ * | |
+ * pfp +------+ <--- :prev stackframe = <pfp, psp, ppc>
+ * | cfp |
+ * | npc |
+ * csp + +
+ * | |
+ * | |
+ * cfp +------+ <--- :stackframe = <cfp, csp, cpc>
+ * | nfp |
+ * | epc |
+ * + +
+ * | |
+ * | | calced dummy
+ * esp +------+ <--- :exp stackframe = <---, esp, epc>
+ * | | esp = nsp - sizeof(pt_regs)
+ * | |
+ * | Npc |
+ * | nfp |
+ * | nsp |
+ * | npc |
+ * nsp + +
+ * | | calced missing
+ * nfp +------+ <--- :task stackframe = <nfp, nsp, npc>
+ * | Nfp |
+ * | NNpc |
+ * Nsp + +
+ * | |
+ * Nfp +------+ <--- :task stackframe = <Nfp, Nsp, Npc>
+ * | NNfp |
+ *
+ * (3)At interrupt:
+ * +------+
+ * | pfp |
+ * | cpc |
+ * psp + +
+ * | |
+ * | |
+ * pfp +------+ <--- :prev stackframe = <pfp, psp, ppc>
+ * | cfp |
+ * | epc |
+ * csp + +
+ * | |
+ * | | calced dummy
+ * cfp +------+ <--- :irq stackframe = <cfp, csp, epc>
+ * | nfp | | if (cfp == IRQ_STACK_PTR)
+ * | esp | V
+ * top +------+ <---, esp, epc>
+ * IRQ stack
+ *
+ * calced dummy
+ * esp +------+ <--- :exp stackframe = <---, esp, epc>
+ * | | esp = nsp - sizeof(pt_regs)
+ * | |
+ * | Npc |
+ * | nfp |
+ * | nsp |
+ * | npc | calced missing
+ * nfp +------+ <--- :task stackframe = <nfp, nsp, npc>
+ * | Nfp |
+ * | NNpc |
+ * Nsp + +
+ * | |
+ * Nfp +------+ <--- :task stackframe = <Nfp, Nsp, Npc>
+ * | NNfp |
+ */
+
+static struct arm64_stackframe ext_frame;
+
+static int
+arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame,
+ FILE *ofp)
{
unsigned long high, low, fp;
unsigned long stack_mask;
unsigned long irq_stack_ptr, orig_sp;
- struct arm64_pt_regs *ptregs;
struct machine_specific *ms;
stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1;
@@ -1593,54 +1738,101 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
if (fp < low || fp > high || fp & 0xf)
return FALSE;
- frame->sp = fp + 0x10;
- frame->fp = GET_STACK_ULONG(fp);
+ if (CRASHDEBUG(1))
+ fprintf(ofp, " cur fp:%016lx sp:%016lx pc:%016lx\n",
+ frame->fp, frame->sp, frame->pc);
+
+ if (ext_frame.pc) {
+ /*
+ * Previous frame was a dummy for exception entry.
+ * So insert a hidden (real) stackframe.
+ */
+ frame->fp = ext_frame.fp;
+ frame->sp = ext_frame.sp;
+ frame->pc = ext_frame.pc;
+
+ ext_frame.pc = 0; /* back to normal unwinding */
+
+ goto unwind_done;
+ }
+
frame->pc = GET_STACK_ULONG(fp + 8);
+ if (!arm64_in_exp_entry(frame->pc)) {
+ /* Normal stack frame */
- /*
- * The kernel's manner of determining the end of the IRQ stack:
- *
- * #define THREAD_SIZE 16384
- * #define THREAD_START_SP (THREAD_SIZE - 16)
- * #define IRQ_STACK_START_SP THREAD_START_SP
- * #define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
- * #define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
- *
- * irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id());
- * orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); (pt_regs pointer on process stack)
- */
- if (machdep->flags & IRQ_STACKS) {
- ms = machdep->machspec;
- irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 16;
-
- if (frame->sp == irq_stack_ptr) {
- orig_sp = GET_STACK_ULONG(irq_stack_ptr - 8);
- arm64_set_process_stack(bt);
- if (INSTACK(orig_sp, bt) && (INSTACK(frame->fp, bt) || (frame->fp == 0))) {
- ptregs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(orig_sp))];
- frame->sp = orig_sp;
- frame->pc = ptregs->pc;
- bt->bptr = fp;
- if (CRASHDEBUG(1))
- error(INFO,
- "arm64_unwind_frame: switch stacks: fp: %lx sp: %lx pc: %lx\n",
- frame->fp, frame->sp, frame->pc);
+ frame->sp = fp + 0x10;
+ frame->fp = GET_STACK_ULONG(fp);
+ } else {
+ /*
+ * We are in exception entry code, and so need to
+ * - fake a dummy frame for exception frame, and
+ * - complement a stackframe hidden by exception
+ */
+
+ ext_frame.fp = GET_STACK_ULONG(fp);
+ if (ext_frame.fp == 0) {
+ /*
+ * Either on process stack or on IRQ stack,
+ * the next frame is the last one on process stack.
+ */
+
+ frame->sp = bt->stacktop
+ - sizeof(struct arm64_pt_regs) - 0x10;
+ frame->fp = frame->sp;
+ } else if (!machdep->flags & IRQ_STACKS) {
+ /*
+ * We are on process stack. Just fake a dummy frame
+ */
+
+ frame->sp = ext_frame.fp
+ - sizeof(struct arm64_pt_regs);
+ frame->fp = frame->sp;
+ } else {
+ /* We are on IRQ stack */
+
+ ms = machdep->machspec;
+ irq_stack_ptr = ms->irq_stacks[bt->tc->processor]
+ + ms->irq_stack_size - 0x20;
+ if (ext_frame.fp != irq_stack_ptr) {
+ /* Just fake a dummy frame */
+
+ frame->sp = ext_frame.fp
+ - sizeof(struct arm64_pt_regs);
+ frame->fp = frame->sp;
} else {
- error(WARNING,
- "arm64_unwind_frame: on IRQ stack: oriq_sp: %lx%s fp: %lx%s\n",
- orig_sp, INSTACK(orig_sp, bt) ? "" : " (?)",
- frame->fp, INSTACK(frame->fp, bt) ? "" : " (?)");
- return FALSE;
+ /*
+ * switch from IRQ stack to process stack
+ */
+
+ frame->sp = GET_STACK_ULONG(irq_stack_ptr + 8);
+ frame->fp = frame->sp;
+
+ /*
+ * Keep a buffer for a while until
+ * displaying the last frame on IRQ stack.
+ * Ugly?
+ */
+ if (bt->flags | BT_FULL)
+ ms->irq_stackbuf = bt->stackbuf;
+
+ arm64_set_process_stack(bt);
}
}
+
+ arm64_gen_hidden_frame(bt, frame->sp, &ext_frame);
}
+unwind_done:
+ if (CRASHDEBUG(1))
+ fprintf(ofp, " nxt fp:%016lx sp:%016lx pc:%016lx\n",
+ frame->fp, frame->sp, frame->pc);
+
return TRUE;
}
-/*
+/*
* A layout of a stack frame in a function looks like:
- *
+ *
* stack grows to lower addresses.
* /|\
* |
@@ -1658,7 +1850,7 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
* | vars |
* old fp +------+
* | |
- *
+ *
* - On function entry, sp is decremented down to new fp.
*
* - and old fp and sp are saved into this stack frame.
@@ -1680,13 +1872,13 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
* sp shows "callee's static local variables", old fp and sp.
*
* Diagram and explanation courtesy of Takahiro Akashi
- */
+ */
static void
arm64_back_trace_cmd(struct bt_info *bt)
{
- struct arm64_stackframe stackframe;
- int level;
+ struct arm64_stackframe stackframe, cur_frame;
+ int level, mode;
ulong exception_frame;
FILE *ofp;
@@ -1708,17 +1900,7 @@ arm64_back_trace_cmd(struct bt_info *bt)
stackframe.fp = GET_STACK_ULONG(bt->bptr - 8);
stackframe.pc = GET_STACK_ULONG(bt->bptr);
stackframe.sp = bt->bptr + 8;
- bt->frameptr = stackframe.sp;
- } else if (bt->hp && bt->hp->esp) {
- if (arm64_on_irq_stack(bt->tc->processor, bt->hp->esp)) {
- arm64_set_irq_stack(bt);
- bt->flags |= BT_IRQSTACK;
- }
- stackframe.fp = GET_STACK_ULONG(bt->hp->esp - 8);
- stackframe.pc = bt->hp->eip ?
- bt->hp->eip : GET_STACK_ULONG(bt->hp->esp);
- stackframe.sp = bt->hp->esp + 8;
- bt->flags &= ~BT_REGS_NOT_FOUND;
+ bt->frameptr = stackframe.fp;
} else {
stackframe.sp = bt->stkptr;
stackframe.pc = bt->instptr;
@@ -1739,8 +1921,14 @@ arm64_back_trace_cmd(struct bt_info *bt)
return;
if (!(bt->flags & BT_KDUMP_ADJUST)) {
- if (bt->flags & BT_USER_SPACE)
- goto complete_user;
+ if (bt->flags & BT_USER_SPACE) {
+ exception_frame = bt->stacktop - USER_EFRAME_OFFSET;
+ arm64_print_exception_frame(bt, exception_frame,
+ USER_MODE, ofp);
+ fprintf(ofp, " #0 [user space]\n");
+
+ return;
+ }
if (DUMPFILE() && is_task_active(bt->task)) {
exception_frame = stackframe.fp - SIZE(pt_regs);
@@ -1750,53 +1938,55 @@ arm64_back_trace_cmd(struct bt_info *bt)
}
}
- level = exception_frame = 0;
- while (1) {
+ for (level = 0;; level++) {
bt->instptr = stackframe.pc;
- switch (arm64_print_stackframe_entry(bt, level, &stackframe, ofp))
- {
- case BACKTRACE_COMPLETE_KERNEL:
- return;
- case BACKTRACE_COMPLETE_USER:
- goto complete_user;
- case BACKTRACE_CONTINUE:
+ /*
+ * Show one-line stackframe info
+ */
+ if (arm64_print_stackframe_entry(bt, level, &stackframe, ofp)
+ == BACKTRACE_COMPLETE_KERNEL)
break;
- }
-
- if (exception_frame) {
- arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp);
- exception_frame = 0;
- }
- if (!arm64_unwind_frame(bt, &stackframe))
+ cur_frame = stackframe;
+ if (!arm64_unwind_frame(bt, &stackframe, ofp))
break;
- if (arm64_in_exception_text(bt->instptr) && INSTACK(stackframe.fp, bt)) {
- if (!(bt->flags & BT_IRQSTACK) ||
- (((stackframe.sp + SIZE(pt_regs)) < bt->stacktop)))
- exception_frame = stackframe.fp - SIZE(pt_regs);
- }
+ /*
+ * Dump the contents of the current stackframe.
+ * We need to know the next stackframe to determine
+ * the dump range:
+ * <cur_frame.fp:stackframe.fp>
+ */
+ arm64_display_full_frame(bt, &cur_frame, &stackframe);
- if ((bt->flags & BT_IRQSTACK) &&
- !arm64_on_irq_stack(bt->tc->processor, stackframe.sp)) {
- bt->flags &= ~BT_IRQSTACK;
- if (arm64_switch_stack(bt, &stackframe, ofp) == USER_MODE)
- break;
- }
+ /*
+ * If we are in a normal stackframe, just continue,
+ * otherwise show an exception frame.
+ * Since exception entry code doesn't have a real
+ * stackframe, we fake a dummy frame here.
+ */
+ if (!arm64_in_exp_entry(stackframe.pc))
+ continue;
+ if (!INSTACK(cur_frame.sp, bt))
+ fprintf(ofp, "--- <IRQ stack> ---\n");
- level++;
- }
+ arm64_print_stackframe_entry(bt, ++level, &stackframe, ofp);
+ cur_frame = stackframe;
+ arm64_unwind_frame(bt, &stackframe, ofp);
- if (is_kernel_thread(bt->tc->task))
- return;
+ /*
+ * and don't show the contenxts. Instead,
+ * show an exception frame below
+ */
+ mode = (stackframe.pc < machdep->machspec->userspace_top) ?
+ USER_MODE : KERNEL_MODE;
+ arm64_print_exception_frame(bt, cur_frame.sp, mode, ofp);
-complete_user:
- exception_frame = bt->stacktop - USER_EFRAME_OFFSET;
- arm64_print_exception_frame(bt, exception_frame, USER_MODE, ofp);
- if ((bt->flags & (BT_USER_SPACE|BT_KDUMP_ADJUST)) == BT_USER_SPACE)
- fprintf(ofp, " #0 [user space]\n");
+ if (mode == USER_MODE)
+ break;
+ }
}
static void
@@ -1932,41 +2122,6 @@ arm64_in_kdump_text_on_irq_stack(struct bt_info *bt)
return FALSE;
}
-static int
-arm64_switch_stack(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp)
-{
- int i;
- ulong stacktop, words, addr;
- ulong *stackbuf;
- char buf[BUFSIZE];
- struct machine_specific *ms = machdep->machspec;
-
- if (bt->flags & BT_FULL) {
- stacktop = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size;
- words = (stacktop - bt->bptr) / sizeof(ulong);
- stackbuf = (ulong *)GETBUF(words * sizeof(ulong));
- readmem(bt->bptr, KVADDR, stackbuf, words * sizeof(long),
- "top of IRQ stack", FAULT_ON_ERROR);
-
- addr = bt->bptr;
- for (i = 0; i < words; i++) {
- if (!(i & 1))
- fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr);
- fprintf(ofp, "%s ", format_stack_entry(bt, buf, stackbuf[i], 0));
- addr += sizeof(ulong);
- }
- fprintf(ofp, "\n");
- FREEBUF(stackbuf);
- }
- fprintf(ofp, "--- <IRQ stack> ---\n");
-
- if (frame->fp == 0)
- return USER_MODE;
-
- arm64_print_exception_frame(bt, frame->sp, KERNEL_MODE, ofp);
- return KERNEL_MODE;
-}
-
static int
arm64_get_dumpfile_stackframe(struct bt_info *bt, struct arm64_stackframe *frame)
{
@@ -2047,6 +2202,20 @@ arm64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp)
}
static void
+arm64_gen_hidden_frame(struct bt_info *bt, ulong sp,
+ struct arm64_stackframe *frame)
+{
+ struct arm64_pt_regs *ptregs;
+
+ ptregs = (struct arm64_pt_regs *)
+ &bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(sp))];
+
+ frame->pc = ptregs->pc;
+ frame->fp = ptregs->regs[29];
+ frame->sp = ptregs->sp;
+}
+
+static void
arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *ofp)
{
int i, r, rows, top_reg, is_64_bit;
@@ -2055,10 +2224,16 @@ arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *o
ulong LR, SP, offset;
char buf[BUFSIZE];
+ if (mode == KERNEL_MODE)
+ fprintf(ofp, "--- <Exception in kernel> ---\n");
+ else
+ fprintf(ofp, "--- <Exception in user> ---\n");
+
if (CRASHDEBUG(1))
fprintf(ofp, "pt_regs: %lx\n", pt_regs);
- regs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(pt_regs))];
+ regs = (struct arm64_pt_regs *)
+ &bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(pt_regs))];
if ((mode == USER_MODE) && (regs->pstate & PSR_MODE32_BIT)) {
LR = regs->regs[14];
@@ -2130,10 +2305,11 @@ arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *o
}
if (is_64_bit) {
- fprintf(ofp, "ORIG_X0: %016lx SYSCALLNO: %lx",
- (ulong)regs->orig_x0, (ulong)regs->syscallno);
- if (mode == USER_MODE)
+ if (mode == USER_MODE) {
+ fprintf(ofp, "ORIG_X0: %016lx SYSCALLNO: %lx",
+ (ulong)regs->orig_x0, (ulong)regs->syscallno);
fprintf(ofp, " PSTATE: %08lx", (ulong)regs->pstate);
+ }
fprintf(ofp, "\n");
}
diff --git a/defs.h b/defs.h
index d6f719c..f7ea5a0 100644
--- a/defs.h
+++ b/defs.h
@@ -3058,8 +3058,14 @@ struct machine_specific {
ulong kernel_flags;
ulong irq_stack_size;
ulong *irq_stacks;
+ char *irq_stackbuf;
ulong __irqentry_text_start;
ulong __irqentry_text_end;
+ /* for exception vector code */
+ ulong exp_entry1_start;
+ ulong exp_entry1_end;
+ ulong exp_entry2_start;
+ ulong exp_entry2_end;
/* only needed for v4.6 or later kernel */
ulong kimage_voffset;
ulong kimage_text;
--
2.9.0
8 years, 5 months
arm64: "bt -f" output
by AKASHI Takahiro
Dave,
When I looked at the output from "bt -f" command, I found that stack dump
starts from frame.sp in arm64_print_stackframe_entry().
Usage of stack frames on arm64 is a bit different from that on x86, and
using frame.fp is, I believe, much useful (and accurate) for crash users.
See my patch attached below.
Details:
A layout of a stack frame in a function looks like:
stack grows to lower addresses.
/|\
|
| |
new sp +------+ <---
|dyn | |
| vars | |
new fp +- - - + |
|old fp| | a function's stack frame
|old lr| |
|static| |
| vars| |
old sp +------+ <---
|dyn |
| vars |
old fp +------+
| |
* On function entry, sp is decremented down to new fp.
* and old fp and sp are saved into this stack frame.
"Static" local variables are allocated at the same time.
* Later on, "dynamic" local variables may be allocated on a stack.
But those dynamic variables are rarely used in the kernel image,
and, as a matter of fact, sp is equal to fp in almost all functions.
(not 100% though.)
* Currently, sp is determined in arm64_unwind_frame() by
sp = a callee's fp + 0x10
where 0x10 stands for a saved area for fp and sp
* As you can see, however, this calculated sp still points to the top of
callee's static local variables and doesn't match with a *real* sp.
* So, generally, dumping a stack from this calculated sp to the next frame's
sp shows "callee's static local variables", old fp and sp.
Confused?
Probably you will be able to understand more easily by seeing an example
from my vmlinux/vmcore cases:
=== crash with my patch ===
crash> bt -f 1324
PID: 1324 TASK: ffff80002018be80 CPU: 2 COMMAND: "dhry"
ffff800022f6ae08: ffff00000812ae44 (crash_save_cpu on IRQ stack)
ffff800022f6ae10: ffff800022f74e00 ffff800020107ed0
ffff800022f6ae20: 0000000000000000 ffff800020107ed0
ffff800022f6ae30: ffff000008a26800 0000000000000003
ffff800022f6ae40: 0000018800000005 0000000000000001
#0 [ffff800022f6ae50] crash_save_cpu at ffff00000812ae44
ffff800022f6ae50: ffff800022f6b010 ffff00000808e718
#1's fp #1's lr (=handle_IPI)
ffff800022f6ae60: ffff000008bce000 0000000000000002 -----
ffff800022f6ae70: ffff800022f6ae90 0000000000000000 |
ffff800022f6ae80: ffff800000000000 0000000000000000 |
ffff800022f6ae90: 0000000000000000 0000000000000000 |local variables
... | including a big
ffff800022f6afd0: 0000000000000000 0000000000000000 |"struct elf_prsatus"
ffff800022f6afe0: ffff800022f6aff0 ffff00000808a820 |
ffff800022f6aff0: ffff800022f6b010 ffff00000808e758 |
ffff800022f6b000: ffff000008bce000 0000000000000000 -----
#1 [ffff800022f6b010] handle_IPI at ffff00000808e718
ffff800022f6b010: ffff800022f6b040 ffff0000080815f8
ffff800022f6b020: 0000000000000003 0000000000001fff
ffff800022f6b030: ffff800020107ed0 ffff000008e60c54
#2 [ffff800022f6b040] gic_handle_irq at ffff0000080815f8
ffff800022f6b040: ffff800022f6b080 ffff000008084c4c
== crash(master branch) ===
crash.dave> bt -f 1324
PID: 1324 TASK: ffff80002018be80 CPU: 2 COMMAND: "dhry"
ffff800022f6ae08: ffff00000812ae44 (crash_save_cpu on IRQ stack)
#0 [ffff800022f6ae10] crash_save_cpu at ffff00000812ae44
ffff800022f6ae10: ffff800022f74e00 ffff800020107ed0 -----
ffff800022f6ae20: 0000000000000000 ffff800020107ed0 |local variables
ffff800022f6ae30: ffff000008a26800 0000000000000003 | of callee(*)
ffff800022f6ae40: 0000018800000005 0000000000000001 -----
ffff800022f6ae50: ffff800022f6b010 ffff00000808e718 <= *real* stack frame
#1 [ffff800022f6ae60] handle_IPI at ffff00000808e718 of crash_save_cpu()
ffff800022f6ae60: ffff000008bce000 0000000000000002 starts here.
ffff800022f6ae70: ffff800022f6ae90 0000000000000000
ffff800022f6ae80: ffff800000000000 0000000000000000
ffff800022f6ae90: 0000000000000000 0000000000000000
ffff800022f6aea0: 0000000000000000 000000000000052c
ffff800022f6aeb0: 0000000000000000 0000000000000000
...
ffff800022f6afa0: ffff800022f6afc0 0000000000000000
ffff800022f6afb0: ffff800022f6afe0 ffff000008675850
ffff800022f6afc0: 0000000000402138 0000000000000000
ffff800022f6afd0: 0000000000000000 0000000000000000
ffff800022f6afe0: ffff800022f6aff0 ffff00000808a820
ffff800022f6aff0: ffff800022f6b010 ffff00000808e758
ffff800022f6b000: ffff000008bce000 0000000000000000
ffff800022f6b010: ffff800022f6b040 ffff0000080815f8
#2 [ffff800022f6b020] gic_handle_irq at ffff0000080815f8
ffff800022f6b020: 0000000000000003 0000000000001fff
ffff800022f6b030: ffff800020107ed0 ffff000008e60c54
ffff800022f6b040: ffff800022f6b080 ffff000008084c4c
=== END ===
(*) append_elf_note()
Thanks,
-Takahiro AKASHI
======8<======
>From b3ca69ace916a5c233ce937954da887ba5487e50 Mon Sep 17 00:00:00 2001
From: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
Date: Wed, 8 Jun 2016 11:14:22 +0900
Subject: [PATCH] arm64: dump a stack frame based on fp
Signed-off-by: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
---
arm64.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/arm64.c b/arm64.c
index bdea79c..22f93a2 100644
--- a/arm64.c
+++ b/arm64.c
@@ -1508,12 +1508,12 @@ arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackfr
}
if (bt->flags & BT_FULL) {
- arm64_display_full_frame(bt, frame->sp);
- bt->frameptr = frame->sp;
+ arm64_display_full_frame(bt, frame->fp);
+ bt->frameptr = frame->fp;
}
fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level,
- frame->sp, name_plus_offset ? name_plus_offset : name, frame->pc);
+ frame->fp, name_plus_offset ? name_plus_offset : name, frame->pc);
if (BT_REFERENCE_CHECK(bt))
arm64_do_bt_reference_check(bt, frame->pc, name);
@@ -1571,12 +1571,10 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
{
unsigned long high, low, fp;
unsigned long stack_mask;
- unsigned long irq_stack_ptr, orig_sp, sp_in;
+ unsigned long irq_stack_ptr, orig_sp;
struct arm64_pt_regs *ptregs;
struct machine_specific *ms;
- sp_in = frame->sp;
-
stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1;
fp = frame->fp;
@@ -1613,7 +1611,7 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
ptregs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(orig_sp))];
frame->sp = orig_sp;
frame->pc = ptregs->pc;
- bt->bptr = sp_in;
+ bt->bptr = fp;
if (CRASHDEBUG(1))
error(INFO,
"arm64_unwind_frame: switch stacks: fp: %lx sp: %lx pc: %lx\n",
@@ -2004,8 +2002,11 @@ arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *o
ulong LR, SP, offset;
char buf[BUFSIZE];
+#if 0 /* FIXME? */
if (bt->flags & BT_FULL)
arm64_display_full_frame(bt, pt_regs);
+}
+#endif
if (CRASHDEBUG(1))
fprintf(ofp, "pt_regs: %lx\n", pt_regs);
--
2.8.1
8 years, 5 months
[PATCH v5] arm64: fix kernel memory map handling for kaslr-enabled
by AKASHI Takahiro
In my next version of kdump patch, the following VMCOREINFO will be
added:
NUMBER(VA_BITS)
NUMBER(kimage_voffset)
NUMBER(PHYS_OFFSET)
KERNELOFFSET
I think that those will also satisfy mkdumpfile requirements.
-> Pratyush
Thanks,
-Takahiro AKASHI
changes in v5:
* Calcs PHYS_OFFSET by reading VMCOREINFO, "NUMBER(PHYS_OFFSET)"
"memstart_addr"-based routine was also moved into arm64_calc_phys_base().
changes in v4:
* Fixed VA_BITS calculation for v4.5 or earlier
* Added 4-level address translation with 4KB page size
* Removed "fix a renaming of a member of struct page, _count to _refcount"
Chnages in v3:
* Refined KASLR handling
hopefully the tool works even on a live system if CONFIG_RANDOMIZE_RAM is
not configured
* Fixed a renaming of a member of struct page
* Removed a commit message regarding an issue of backtracing a panic'ed task
because this is not a bug in this tool, but my kdump patch's.
* Reported "kmem <vmalloc addr>" issue in a commit message
changes in v2:
* Fixed build warnings
* Moved ARM64_NEW_VMEMMAP to machdep->flags
* Show additional kaslr-related parameters in arm64_dump_machdep_table()
* Handle a VMCOREINFO, "NUMBER(kimage_voffset)"
======8<======
>From a6dd9d73120dcc3f2d68dbe9a8f2d16a128c4002 Mon Sep 17 00:00:00 2001
From: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
Date: Mon, 16 May 2016 17:31:55 +0900
Subject: [PATCH v5] arm64: fix kernel memory map handling for kaslr-enabled
kernel
In kernel v4.6, Kernel ASLR (KASLR) is supported on arm64, and the start
address of the kernel image can be randomized if CONFIG_RANDOMIZE_BASE is
enabled.
Even worse, the kernel image is no more mapped in the linear mapping, but
in vmalloc area (i.e. below PAGE_OFFSET).
Now, according to the kernel's memory.h, converting a virtual address to
a physical address should be done like below:
phys_addr_t __x = (phys_addr_t)(x); \
__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \
(__x - kimage_voffset); })
Please note that PHYS_OFFSET is no more equal to the start address of
the first usable memory block in SYSTEM RAM due to the fact mentioned
above.
This patch addresses this change and allows the crash utility to access
memory contents with correct addresses.
* On a live system, crash with this patch won't work, especially
with CONFIG_RANDOMIZE_BASE configured, because we currently have no way
to know kimage_voffset.
* For a core dump file, we can do simply:
$ crash <vmlinux> <vmcore>
as long as the file has "NUMBER(kimage_voffset)"
(RELOC_AUTO|KASLR is automatically set.)
Signed-off-by: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
---
arm64.c | 232 ++++++++++++++++++++++++++++++++++++++++++++++++--------------
defs.h | 26 ++++---
main.c | 7 +-
symbols.c | 12 ++--
4 files changed, 211 insertions(+), 66 deletions(-)
diff --git a/arm64.c b/arm64.c
index 86ec348..37524c8 100644
--- a/arm64.c
+++ b/arm64.c
@@ -73,6 +73,23 @@ static int arm64_get_crash_notes(void);
static void arm64_calc_VA_BITS(void);
static int arm64_is_uvaddr(ulong, struct task_context *);
+ulong
+arm64_VTOP(ulong addr)
+{
+ if (machdep->flags & NEW_VMEMMAP) {
+ if (addr >= machdep->machspec->page_offset)
+ return machdep->machspec->phys_offset
+ + (addr - machdep->machspec->page_offset);
+ else if (machdep->machspec->kimage_voffset)
+ return addr - machdep->machspec->kimage_voffset;
+ else /* no randomness */
+ return machdep->machspec->phys_offset
+ + (addr - machdep->machspec->vmalloc_start_addr);
+ } else {
+ return machdep->machspec->phys_offset
+ + (addr - machdep->machspec->page_offset);
+ }
+}
/*
* Do all necessary machine-specific setup here. This is called several times
@@ -82,6 +99,7 @@ void
arm64_init(int when)
{
ulong value;
+ char *string;
struct machine_specific *ms;
#if defined(__x86_64__)
@@ -103,9 +121,32 @@ arm64_init(int when)
if (machdep->cmdline_args[0])
arm64_parse_cmdline_args();
machdep->flags |= MACHDEP_BT_TEXT;
+
+ ms = machdep->machspec;
+ if (!ms->kimage_voffset &&
+ (string = pc->read_vmcoreinfo("NUMBER(kimage_voffset)"))) {
+ ms->kimage_voffset = htol(string, QUIET, NULL);
+ free(string);
+ }
+
+ if (ms->kimage_voffset) {
+ machdep->flags |= NEW_VMEMMAP;
+
+ /*
+ * Even if CONFIG_RANDOMIZE_BASE is not configured,
+ * derive_kaslr_offset() should work and set
+ * kt->relocate to 0
+ */
+ if (!kt->relocate && !(kt->flags2 & (RELOC_AUTO|KASLR)))
+ kt->flags2 |= (RELOC_AUTO|KASLR);
+ }
+
break;
case PRE_GDB:
+ if (kernel_symbol_exists("kimage_voffset"))
+ machdep->flags |= NEW_VMEMMAP;
+
if (!machdep->pagesize) {
/*
* Kerneldoc Documentation/arm64/booting.txt describes
@@ -161,16 +202,34 @@ arm64_init(int when)
machdep->pagemask = ~((ulonglong)machdep->pageoffset);
arm64_calc_VA_BITS();
- machdep->machspec->page_offset = ARM64_PAGE_OFFSET;
+ ms = machdep->machspec;
+ ms->page_offset = ARM64_PAGE_OFFSET;
machdep->identity_map_base = ARM64_PAGE_OFFSET;
- machdep->machspec->userspace_top = ARM64_USERSPACE_TOP;
- machdep->machspec->modules_vaddr = ARM64_MODULES_VADDR;
- machdep->machspec->modules_end = ARM64_MODULES_END;
- machdep->machspec->vmalloc_start_addr = ARM64_VMALLOC_START;
- machdep->machspec->vmalloc_end = ARM64_VMALLOC_END;
- machdep->kvbase = ARM64_VMALLOC_START;
- machdep->machspec->vmemmap_vaddr = ARM64_VMEMMAP_VADDR;
- machdep->machspec->vmemmap_end = ARM64_VMEMMAP_END;
+ machdep->kvbase = ARM64_VA_START;
+ ms->userspace_top = ARM64_USERSPACE_TOP;
+ if (machdep->flags & NEW_VMEMMAP) {
+ struct syment *sp;
+
+ sp = kernel_symbol_search("_text");
+ ms->kimage_text = (sp ? sp->value : 0);
+ sp = kernel_symbol_search("_end");
+ ms->kimage_end = (sp ? sp->value : 0);
+
+ ms->modules_vaddr = ARM64_VA_START;
+ if (kernel_symbol_exists("kasan_init"))
+ ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE;
+ ms->modules_end = ms->modules_vaddr
+ + ARM64_MODULES_VSIZE -1;
+
+ ms->vmalloc_start_addr = ms->modules_end + 1;
+ } else {
+ ms->modules_vaddr = ARM64_PAGE_OFFSET - MEGABYTES(64);
+ ms->modules_end = ARM64_PAGE_OFFSET - 1;
+ ms->vmalloc_start_addr = ARM64_VA_START;
+ }
+ ms->vmalloc_end = ARM64_VMALLOC_END;
+ ms->vmemmap_vaddr = ARM64_VMEMMAP_VADDR;
+ ms->vmemmap_end = ARM64_VMEMMAP_END;
switch (machdep->pagesize)
{
@@ -241,8 +300,6 @@ arm64_init(int when)
machdep->stacksize = ARM64_STACK_SIZE;
machdep->flags |= VMEMMAP;
- arm64_calc_phys_offset();
-
machdep->uvtop = arm64_uvtop;
machdep->kvtop = arm64_kvtop;
machdep->is_kvaddr = generic_is_kvaddr;
@@ -271,6 +328,10 @@ arm64_init(int when)
machdep->dumpfile_init = NULL;
machdep->verify_line_number = NULL;
machdep->init_kernel_pgd = arm64_init_kernel_pgd;
+
+ /* use machdep parameters */
+ arm64_calc_phys_offset();
+
break;
case POST_GDB:
@@ -420,6 +481,8 @@ arm64_dump_machdep_table(ulong arg)
fprintf(fp, "%sIRQ_STACKS", others++ ? "|" : "");
if (machdep->flags & MACHDEP_BT_TEXT)
fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : "");
+ if (machdep->flags & NEW_VMEMMAP)
+ fprintf(fp, "%sNEW_VMEMMAP", others++ ? "|" : "");
fprintf(fp, ")\n");
fprintf(fp, " kvbase: %lx\n", machdep->kvbase);
@@ -524,6 +587,11 @@ arm64_dump_machdep_table(ulong arg)
fprintf(fp, " modules_end: %016lx\n", ms->modules_end);
fprintf(fp, " vmemmap_vaddr: %016lx\n", ms->vmemmap_vaddr);
fprintf(fp, " vmemmap_end: %016lx\n", ms->vmemmap_end);
+ if (machdep->flags & NEW_VMEMMAP) {
+ fprintf(fp, " kimage_text: %016lx\n", ms->kimage_text);
+ fprintf(fp, " kimage_end: %016lx\n", ms->kimage_end);
+ fprintf(fp, " kimage_voffset: %016lx\n", ms->kimage_voffset);
+ }
fprintf(fp, " phys_offset: %lx\n", ms->phys_offset);
fprintf(fp, "__exception_text_start: %lx\n", ms->__exception_text_start);
fprintf(fp, " __exception_text_end: %lx\n", ms->__exception_text_end);
@@ -566,6 +634,42 @@ arm64_dump_machdep_table(ulong arg)
}
}
+static int
+arm64_parse_machdep_arg_l(char *argstring, char *param, ulong *value)
+{
+ int len;
+ int megabytes = FALSE;
+ char *p;
+
+ len = strlen(param);
+ if (!STRNEQ(argstring, param) || (argstring[len] != '='))
+ return FALSE;
+
+ if ((LASTCHAR(argstring) == 'm') ||
+ (LASTCHAR(argstring) == 'M')) {
+ LASTCHAR(argstring) = NULLCHAR;
+ megabytes = TRUE;
+ }
+
+ p = argstring + len + 1;
+ if (strlen(p)) {
+ int flags = RETURN_ON_ERROR | QUIET;
+ int err = 0;
+
+ if (megabytes) {
+ *value = dtol(p, flags, &err);
+ if (!err)
+ *value = MEGABYTES(*value);
+ } else {
+ *value = htol(p, flags, &err);
+ }
+
+ if (!err)
+ return TRUE;
+ }
+
+ return FALSE;
+}
/*
* Parse machine dependent command line arguments.
@@ -577,11 +681,10 @@ arm64_dump_machdep_table(ulong arg)
static void
arm64_parse_cmdline_args(void)
{
- int index, i, c, err;
+ int index, i, c;
char *arglist[MAXARGS];
char buf[BUFSIZE];
char *p;
- ulong value = 0;
for (index = 0; index < MAX_MACHDEP_ARGS; index++) {
if (!machdep->cmdline_args[index])
@@ -603,39 +706,23 @@ arm64_parse_cmdline_args(void)
c = parse_line(buf, arglist);
for (i = 0; i < c; i++) {
- err = 0;
-
- if (STRNEQ(arglist[i], "phys_offset=")) {
- int megabytes = FALSE;
- int flags = RETURN_ON_ERROR | QUIET;
-
- if ((LASTCHAR(arglist[i]) == 'm') ||
- (LASTCHAR(arglist[i]) == 'M')) {
- LASTCHAR(arglist[i]) = NULLCHAR;
- megabytes = TRUE;
- }
-
- p = arglist[i] + strlen("phys_offset=");
- if (strlen(p)) {
- if (megabytes)
- value = dtol(p, flags, &err);
- else
- value = htol(p, flags, &err);
- }
-
- if (!err) {
- if (megabytes)
- value = MEGABYTES(value);
-
- machdep->machspec->phys_offset = value;
-
- error(NOTE,
- "setting phys_offset to: 0x%lx\n\n",
- machdep->machspec->phys_offset);
+ if (arm64_parse_machdep_arg_l(arglist[i],
+ "phys_offset",
+ &machdep->machspec->phys_offset)) {
+ error(NOTE,
+ "setting phys_offset to: 0x%lx\n\n",
+ machdep->machspec->phys_offset);
+
+ machdep->flags |= PHYS_OFFSET;
+ continue;
+ } else if (arm64_parse_machdep_arg_l(arglist[i],
+ "kimage_voffset",
+ &machdep->machspec->kimage_voffset)) {
+ error(NOTE,
+ "setting kimage_voffset to: 0x%lx\n\n",
+ machdep->machspec->kimage_voffset);
- machdep->flags |= PHYS_OFFSET;
- continue;
- }
+ continue;
}
error(WARNING, "ignoring --machdep option: %s\n",
@@ -720,7 +807,32 @@ arm64_calc_phys_offset(void)
static int
arm64_kdump_phys_base(ulong *phys_offset)
{
- return arm_kdump_phys_base(phys_offset);
+ char *string;
+ struct syment *sp;
+ physaddr_t paddr;
+
+ if (string = pc->read_vmcoreinfo("NUMBER(PHYS_OFFSET)")) {
+ *phys_offset = htol(string, QUIET, NULL);
+ free(string);
+ return TRUE;
+ }
+
+ if (machdep->flags & NEW_VMEMMAP) {
+ if (!(sp = kernel_symbol_search("memstart_addr")))
+ return FALSE;
+
+ /* sanity check */
+ if (!machdep->machspec->kimage_voffset)
+ return FALSE;
+
+ paddr = sp->value - machdep->machspec->kimage_voffset;
+ if (READMEM(-1, phys_offset, sizeof(*phys_offset),
+ sp->value, paddr) > 0)
+ return TRUE;
+ else
+ return FALSE;
+ } else
+ return arm_kdump_phys_base(phys_offset);
}
static void
@@ -2509,6 +2621,11 @@ arm64_IS_VMALLOC_ADDR(ulong vaddr)
{
struct machine_specific *ms = machdep->machspec;
+ if ((machdep->flags & NEW_VMEMMAP) &&
+ (vaddr >= machdep->machspec->kimage_text) &&
+ (vaddr <= machdep->machspec->kimage_end))
+ return FALSE;
+
return ((vaddr >= ms->vmalloc_start_addr && vaddr <= ms->vmalloc_end) ||
((machdep->flags & VMEMMAP) &&
(vaddr >= ms->vmemmap_vaddr && vaddr <= ms->vmemmap_end)) ||
@@ -2539,7 +2656,10 @@ arm64_calc_VA_BITS(void)
for (bitval = highest_bit_long(value); bitval; bitval--) {
if ((value & (1UL << bitval)) == 0) {
- machdep->machspec->VA_BITS = bitval + 2;
+ if (machdep->flags & NEW_VMEMMAP)
+ machdep->machspec->VA_BITS = bitval + 1;
+ else
+ machdep->machspec->VA_BITS = bitval + 2;
break;
}
}
@@ -2593,10 +2713,22 @@ arm64_calc_virtual_memory_ranges(void)
break;
}
- vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) * SIZE(page), PUD_SIZE);
+ if (machdep->flags & NEW_VMEMMAP)
+#define STRUCT_PAGE_MAX_SHIFT 6
+ vmemmap_size = 1UL << (ms->VA_BITS - machdep->pageshift - 1
+ + STRUCT_PAGE_MAX_SHIFT);
+ else
+ vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) * SIZE(page), PUD_SIZE);
+
vmalloc_end = (ms->page_offset - PUD_SIZE - vmemmap_size - SZ_64K);
- vmemmap_start = vmalloc_end + SZ_64K;
- vmemmap_end = vmemmap_start + vmemmap_size;
+
+ if (machdep->flags & NEW_VMEMMAP) {
+ vmemmap_start = ms->page_offset - vmemmap_size;
+ vmemmap_end = ms->page_offset;
+ } else {
+ vmemmap_start = vmalloc_end + SZ_64K;
+ vmemmap_end = vmemmap_start + vmemmap_size;
+ }
ms->vmalloc_end = vmalloc_end - 1;
ms->vmemmap_vaddr = vmemmap_start;
diff --git a/defs.h b/defs.h
index 8eb601b..d6f719c 100644
--- a/defs.h
+++ b/defs.h
@@ -2846,8 +2846,8 @@ typedef u64 pte_t;
#define PTOV(X) \
((unsigned long)(X)-(machdep->machspec->phys_offset)+(machdep->machspec->page_offset))
-#define VTOP(X) \
- ((unsigned long)(X)-(machdep->machspec->page_offset)+(machdep->machspec->phys_offset))
+
+#define VTOP(X) arm64_VTOP((ulong)(X))
#define USERSPACE_TOP (machdep->machspec->userspace_top)
#define PAGE_OFFSET (machdep->machspec->page_offset)
@@ -2962,19 +2962,24 @@ typedef signed int s32;
#define VM_L3_4K (0x10)
#define KDUMP_ENABLED (0x20)
#define IRQ_STACKS (0x40)
-#define VM_L4_4K (0x80)
+#define NEW_VMEMMAP (0x80)
+#define VM_L4_4K (0x100)
/*
* sources: Documentation/arm64/memory.txt
* arch/arm64/include/asm/memory.h
* arch/arm64/include/asm/pgtable.h
*/
-
-#define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) << (machdep->machspec->VA_BITS - 1))
+#define ARM64_VA_START ((0xffffffffffffffffUL) \
+ << machdep->machspec->VA_BITS)
+#define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) \
+ << (machdep->machspec->VA_BITS - 1))
#define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS)
-#define ARM64_MODULES_VADDR (ARM64_PAGE_OFFSET - MEGABYTES(64))
-#define ARM64_MODULES_END (ARM64_PAGE_OFFSET - 1)
-#define ARM64_VMALLOC_START ((0xffffffffffffffffUL) << machdep->machspec->VA_BITS)
+
+/* only used for v4.6 or later */
+#define ARM64_MODULES_VSIZE MEGABYTES(128)
+#define ARM64_KASAN_SHADOW_SIZE (1UL << (machdep->machspec->VA_BITS - 3))
+
/*
* The following 3 definitions are the original values, but are obsolete
* for 3.17 and later kernels because they are now build-time calculations.
@@ -3055,6 +3060,10 @@ struct machine_specific {
ulong *irq_stacks;
ulong __irqentry_text_start;
ulong __irqentry_text_end;
+ /* only needed for v4.6 or later kernel */
+ ulong kimage_voffset;
+ ulong kimage_text;
+ ulong kimage_end;
};
struct arm64_stackframe {
@@ -5412,6 +5421,7 @@ void unwind_backtrace(struct bt_info *);
#ifdef ARM64
void arm64_init(int);
void arm64_dump_machdep_table(ulong);
+ulong arm64_VTOP(ulong);
int arm64_IS_VMALLOC_ADDR(ulong);
ulong arm64_swp_type(ulong);
ulong arm64_swp_offset(ulong);
diff --git a/main.c b/main.c
index 05787f0..4065e9a 100644
--- a/main.c
+++ b/main.c
@@ -227,9 +227,10 @@ main(int argc, char **argv)
optarg);
}
} else if (STREQ(long_options[option_index].name, "kaslr")) {
- if (!machine_type("X86_64"))
- error(INFO, "--kaslr only valid "
- "with X86_64 machine type.\n");
+ if (!machine_type("X86_64") &&
+ !machine_type("ARM64"))
+ error(INFO, "--kaslr not valid "
+ "with this machine type.\n");
else if (STREQ(optarg, "auto"))
kt->flags2 |= (RELOC_AUTO|KASLR);
else {
diff --git a/symbols.c b/symbols.c
index a8d3563..b0a6461 100644
--- a/symbols.c
+++ b/symbols.c
@@ -593,7 +593,8 @@ kaslr_init(void)
{
char *string;
- if (!machine_type("X86_64") || (kt->flags & RELOC_SET))
+ if ((!machine_type("X86_64") && !machine_type("ARM64")) ||
+ (kt->flags & RELOC_SET))
return;
/*
@@ -712,7 +713,7 @@ store_symbols(bfd *abfd, int dynamic, void *minisyms, long symcount,
if (machine_type("X86")) {
if (!(kt->flags & RELOC_SET))
kt->flags |= RELOC_FORCE;
- } else if (machine_type("X86_64")) {
+ } else if (machine_type("X86_64") || machine_type("ARM64")) {
if ((kt->flags2 & RELOC_AUTO) && !(kt->flags & RELOC_SET))
derive_kaslr_offset(abfd, dynamic, from,
fromend, size, store);
@@ -783,7 +784,8 @@ store_sysmap_symbols(void)
error(FATAL, "symbol table namespace malloc: %s\n",
strerror(errno));
- if (!machine_type("X86") && !machine_type("X86_64"))
+ if (!machine_type("X86") && !machine_type("X86_64") &&
+ !machine_type("ARM64"))
kt->flags &= ~RELOC_SET;
first = 0;
@@ -833,7 +835,7 @@ store_sysmap_symbols(void)
}
/*
- * Handle x86 kernels configured such that the vmlinux symbols
+ * Handle x86/arm64 kernels configured such that the vmlinux symbols
* are not as loaded into the kernel (not unity-mapped).
*/
static ulong
@@ -4681,7 +4683,7 @@ value_search(ulong value, ulong *offset)
if ((sp = machdep->value_to_symbol(value, offset)))
return sp;
- if (IS_VMALLOC_ADDR(value))
+ if (IS_VMALLOC_ADDR(value))
goto check_modules;
if ((sp = symval_hash_search(value)) == NULL)
--
2.8.1
8 years, 5 months
[PATCH v6] arm64: fix kernel memory map handling for kaslr-enabled kernel
by AKASHI Takahiro
A quick update.
changes in v6:
* arm64_kdump_phys_offset() now falls through the default case,
arm_kdump_phys_offset(), anyway.
changes in v5:
* Calcs PHYS_OFFSET by reading VMCOREINFO, "NUMBER(PHYS_OFFSET)"
"memstart_addr"-based routine was also moved into arm64_kdump_phys_base().
changes in v4:
* Fixed VA_BITS calculation for v4.5 or earlier
* Added 4-level address translation with 4KB page size
* Removed "fix a renaming of a member of struct page, _count to _refcount"
Chnages in v3:
* Refined KASLR handling
hopefully the tool works even on a live system if CONFIG_RANDOMIZE_RAM is
not configured
* Fixed a renaming of a member of struct page
* Removed a commit message regarding an issue of backtracing a panic'ed task
because this is not a bug in this tool, but my kdump patch's.
* Reported "kmem <vmalloc addr>" issue in a commit message
changes in v2:
* Fixed build warnings
* Moved ARM64_NEW_VMEMMAP to machdep->flags
* Show additional kaslr-related parameters in arm64_dump_machdep_table()
* Handle a VMCOREINFO, "NUMBER(kimage_voffset)"
======8<======
>From 666cdf305aa246ce6b30282d8e89e950dc828f70 Mon Sep 17 00:00:00 2001
From: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
Date: Mon, 16 May 2016 17:31:55 +0900
Subject: [PATCH v6] arm64: fix kernel memory map handling for kaslr-enabled
kernel
In kernel v4.6, Kernel ASLR (KASLR) is supported on arm64, and the start
address of the kernel image can be randomized if CONFIG_RANDOMIZE_BASE is
enabled.
Even worse, the kernel image is no more mapped in the linear mapping, but
in vmalloc area (i.e. below PAGE_OFFSET).
Now, according to the kernel's memory.h, converting a virtual address to
a physical address should be done like below:
phys_addr_t __x = (phys_addr_t)(x); \
__x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \
(__x - kimage_voffset); })
Please note that PHYS_OFFSET is no more equal to the start address of
the first usable memory block in SYSTEM RAM due to the fact mentioned
above.
This patch addresses this change and allows the crash utility to access
memory contents with correct addresses.
* On a live system, crash with this patch won't work, especially
with CONFIG_RANDOMIZE_BASE configured, because we currently have no way
to know kimage_voffset.
* For a core dump file, we can do simply:
$ crash <vmlinux> <vmcore>
as long as the file has "NUMBER(kimage_voffset)"
(RELOC_AUTO|KASLR is automatically set.)
Signed-off-by: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
---
arm64.c | 227 ++++++++++++++++++++++++++++++++++++++++++++++++--------------
defs.h | 26 ++++---
main.c | 7 +-
symbols.c | 12 ++--
4 files changed, 206 insertions(+), 66 deletions(-)
diff --git a/arm64.c b/arm64.c
index 86ec348..21e3d8e 100644
--- a/arm64.c
+++ b/arm64.c
@@ -73,6 +73,23 @@ static int arm64_get_crash_notes(void);
static void arm64_calc_VA_BITS(void);
static int arm64_is_uvaddr(ulong, struct task_context *);
+ulong
+arm64_VTOP(ulong addr)
+{
+ if (machdep->flags & NEW_VMEMMAP) {
+ if (addr >= machdep->machspec->page_offset)
+ return machdep->machspec->phys_offset
+ + (addr - machdep->machspec->page_offset);
+ else if (machdep->machspec->kimage_voffset)
+ return addr - machdep->machspec->kimage_voffset;
+ else /* no randomness */
+ return machdep->machspec->phys_offset
+ + (addr - machdep->machspec->vmalloc_start_addr);
+ } else {
+ return machdep->machspec->phys_offset
+ + (addr - machdep->machspec->page_offset);
+ }
+}
/*
* Do all necessary machine-specific setup here. This is called several times
@@ -82,6 +99,7 @@ void
arm64_init(int when)
{
ulong value;
+ char *string;
struct machine_specific *ms;
#if defined(__x86_64__)
@@ -103,9 +121,32 @@ arm64_init(int when)
if (machdep->cmdline_args[0])
arm64_parse_cmdline_args();
machdep->flags |= MACHDEP_BT_TEXT;
+
+ ms = machdep->machspec;
+ if (!ms->kimage_voffset &&
+ (string = pc->read_vmcoreinfo("NUMBER(kimage_voffset)"))) {
+ ms->kimage_voffset = htol(string, QUIET, NULL);
+ free(string);
+ }
+
+ if (ms->kimage_voffset) {
+ machdep->flags |= NEW_VMEMMAP;
+
+ /*
+ * Even if CONFIG_RANDOMIZE_BASE is not configured,
+ * derive_kaslr_offset() should work and set
+ * kt->relocate to 0
+ */
+ if (!kt->relocate && !(kt->flags2 & (RELOC_AUTO|KASLR)))
+ kt->flags2 |= (RELOC_AUTO|KASLR);
+ }
+
break;
case PRE_GDB:
+ if (kernel_symbol_exists("kimage_voffset"))
+ machdep->flags |= NEW_VMEMMAP;
+
if (!machdep->pagesize) {
/*
* Kerneldoc Documentation/arm64/booting.txt describes
@@ -161,16 +202,34 @@ arm64_init(int when)
machdep->pagemask = ~((ulonglong)machdep->pageoffset);
arm64_calc_VA_BITS();
- machdep->machspec->page_offset = ARM64_PAGE_OFFSET;
+ ms = machdep->machspec;
+ ms->page_offset = ARM64_PAGE_OFFSET;
machdep->identity_map_base = ARM64_PAGE_OFFSET;
- machdep->machspec->userspace_top = ARM64_USERSPACE_TOP;
- machdep->machspec->modules_vaddr = ARM64_MODULES_VADDR;
- machdep->machspec->modules_end = ARM64_MODULES_END;
- machdep->machspec->vmalloc_start_addr = ARM64_VMALLOC_START;
- machdep->machspec->vmalloc_end = ARM64_VMALLOC_END;
- machdep->kvbase = ARM64_VMALLOC_START;
- machdep->machspec->vmemmap_vaddr = ARM64_VMEMMAP_VADDR;
- machdep->machspec->vmemmap_end = ARM64_VMEMMAP_END;
+ machdep->kvbase = ARM64_VA_START;
+ ms->userspace_top = ARM64_USERSPACE_TOP;
+ if (machdep->flags & NEW_VMEMMAP) {
+ struct syment *sp;
+
+ sp = kernel_symbol_search("_text");
+ ms->kimage_text = (sp ? sp->value : 0);
+ sp = kernel_symbol_search("_end");
+ ms->kimage_end = (sp ? sp->value : 0);
+
+ ms->modules_vaddr = ARM64_VA_START;
+ if (kernel_symbol_exists("kasan_init"))
+ ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE;
+ ms->modules_end = ms->modules_vaddr
+ + ARM64_MODULES_VSIZE -1;
+
+ ms->vmalloc_start_addr = ms->modules_end + 1;
+ } else {
+ ms->modules_vaddr = ARM64_PAGE_OFFSET - MEGABYTES(64);
+ ms->modules_end = ARM64_PAGE_OFFSET - 1;
+ ms->vmalloc_start_addr = ARM64_VA_START;
+ }
+ ms->vmalloc_end = ARM64_VMALLOC_END;
+ ms->vmemmap_vaddr = ARM64_VMEMMAP_VADDR;
+ ms->vmemmap_end = ARM64_VMEMMAP_END;
switch (machdep->pagesize)
{
@@ -241,8 +300,6 @@ arm64_init(int when)
machdep->stacksize = ARM64_STACK_SIZE;
machdep->flags |= VMEMMAP;
- arm64_calc_phys_offset();
-
machdep->uvtop = arm64_uvtop;
machdep->kvtop = arm64_kvtop;
machdep->is_kvaddr = generic_is_kvaddr;
@@ -271,6 +328,10 @@ arm64_init(int when)
machdep->dumpfile_init = NULL;
machdep->verify_line_number = NULL;
machdep->init_kernel_pgd = arm64_init_kernel_pgd;
+
+ /* use machdep parameters */
+ arm64_calc_phys_offset();
+
break;
case POST_GDB:
@@ -420,6 +481,8 @@ arm64_dump_machdep_table(ulong arg)
fprintf(fp, "%sIRQ_STACKS", others++ ? "|" : "");
if (machdep->flags & MACHDEP_BT_TEXT)
fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : "");
+ if (machdep->flags & NEW_VMEMMAP)
+ fprintf(fp, "%sNEW_VMEMMAP", others++ ? "|" : "");
fprintf(fp, ")\n");
fprintf(fp, " kvbase: %lx\n", machdep->kvbase);
@@ -524,6 +587,11 @@ arm64_dump_machdep_table(ulong arg)
fprintf(fp, " modules_end: %016lx\n", ms->modules_end);
fprintf(fp, " vmemmap_vaddr: %016lx\n", ms->vmemmap_vaddr);
fprintf(fp, " vmemmap_end: %016lx\n", ms->vmemmap_end);
+ if (machdep->flags & NEW_VMEMMAP) {
+ fprintf(fp, " kimage_text: %016lx\n", ms->kimage_text);
+ fprintf(fp, " kimage_end: %016lx\n", ms->kimage_end);
+ fprintf(fp, " kimage_voffset: %016lx\n", ms->kimage_voffset);
+ }
fprintf(fp, " phys_offset: %lx\n", ms->phys_offset);
fprintf(fp, "__exception_text_start: %lx\n", ms->__exception_text_start);
fprintf(fp, " __exception_text_end: %lx\n", ms->__exception_text_end);
@@ -566,6 +634,42 @@ arm64_dump_machdep_table(ulong arg)
}
}
+static int
+arm64_parse_machdep_arg_l(char *argstring, char *param, ulong *value)
+{
+ int len;
+ int megabytes = FALSE;
+ char *p;
+
+ len = strlen(param);
+ if (!STRNEQ(argstring, param) || (argstring[len] != '='))
+ return FALSE;
+
+ if ((LASTCHAR(argstring) == 'm') ||
+ (LASTCHAR(argstring) == 'M')) {
+ LASTCHAR(argstring) = NULLCHAR;
+ megabytes = TRUE;
+ }
+
+ p = argstring + len + 1;
+ if (strlen(p)) {
+ int flags = RETURN_ON_ERROR | QUIET;
+ int err = 0;
+
+ if (megabytes) {
+ *value = dtol(p, flags, &err);
+ if (!err)
+ *value = MEGABYTES(*value);
+ } else {
+ *value = htol(p, flags, &err);
+ }
+
+ if (!err)
+ return TRUE;
+ }
+
+ return FALSE;
+}
/*
* Parse machine dependent command line arguments.
@@ -577,11 +681,10 @@ arm64_dump_machdep_table(ulong arg)
static void
arm64_parse_cmdline_args(void)
{
- int index, i, c, err;
+ int index, i, c;
char *arglist[MAXARGS];
char buf[BUFSIZE];
char *p;
- ulong value = 0;
for (index = 0; index < MAX_MACHDEP_ARGS; index++) {
if (!machdep->cmdline_args[index])
@@ -603,39 +706,23 @@ arm64_parse_cmdline_args(void)
c = parse_line(buf, arglist);
for (i = 0; i < c; i++) {
- err = 0;
-
- if (STRNEQ(arglist[i], "phys_offset=")) {
- int megabytes = FALSE;
- int flags = RETURN_ON_ERROR | QUIET;
-
- if ((LASTCHAR(arglist[i]) == 'm') ||
- (LASTCHAR(arglist[i]) == 'M')) {
- LASTCHAR(arglist[i]) = NULLCHAR;
- megabytes = TRUE;
- }
-
- p = arglist[i] + strlen("phys_offset=");
- if (strlen(p)) {
- if (megabytes)
- value = dtol(p, flags, &err);
- else
- value = htol(p, flags, &err);
- }
-
- if (!err) {
- if (megabytes)
- value = MEGABYTES(value);
-
- machdep->machspec->phys_offset = value;
-
- error(NOTE,
- "setting phys_offset to: 0x%lx\n\n",
- machdep->machspec->phys_offset);
+ if (arm64_parse_machdep_arg_l(arglist[i],
+ "phys_offset",
+ &machdep->machspec->phys_offset)) {
+ error(NOTE,
+ "setting phys_offset to: 0x%lx\n\n",
+ machdep->machspec->phys_offset);
+
+ machdep->flags |= PHYS_OFFSET;
+ continue;
+ } else if (arm64_parse_machdep_arg_l(arglist[i],
+ "kimage_voffset",
+ &machdep->machspec->kimage_voffset)) {
+ error(NOTE,
+ "setting kimage_voffset to: 0x%lx\n\n",
+ machdep->machspec->kimage_voffset);
- machdep->flags |= PHYS_OFFSET;
- continue;
- }
+ continue;
}
error(WARNING, "ignoring --machdep option: %s\n",
@@ -715,11 +802,31 @@ arm64_calc_phys_offset(void)
/*
- * Borrow the 32-bit ARM functionality.
+ * Determine PHYS_OFFSET either by reading VMCOREINFO or the kernel
+ * symbol, otherwise borrow the 32-bit ARM functionality.
*/
static int
arm64_kdump_phys_base(ulong *phys_offset)
{
+ char *string;
+ struct syment *sp;
+ physaddr_t paddr;
+
+ if ((string = pc->read_vmcoreinfo("NUMBER(PHYS_OFFSET)"))) {
+ *phys_offset = htol(string, QUIET, NULL);
+ free(string);
+ return TRUE;
+ }
+
+ if (machdep->flags & NEW_VMEMMAP &&
+ machdep->machspec->kimage_voffset &&
+ (sp = kernel_symbol_search("memstart_addr"))) {
+ paddr = sp->value - machdep->machspec->kimage_voffset;
+ if (READMEM(-1, phys_offset, sizeof(*phys_offset),
+ sp->value, paddr) > 0)
+ return TRUE;
+ }
+
return arm_kdump_phys_base(phys_offset);
}
@@ -2509,6 +2616,11 @@ arm64_IS_VMALLOC_ADDR(ulong vaddr)
{
struct machine_specific *ms = machdep->machspec;
+ if ((machdep->flags & NEW_VMEMMAP) &&
+ (vaddr >= machdep->machspec->kimage_text) &&
+ (vaddr <= machdep->machspec->kimage_end))
+ return FALSE;
+
return ((vaddr >= ms->vmalloc_start_addr && vaddr <= ms->vmalloc_end) ||
((machdep->flags & VMEMMAP) &&
(vaddr >= ms->vmemmap_vaddr && vaddr <= ms->vmemmap_end)) ||
@@ -2539,7 +2651,10 @@ arm64_calc_VA_BITS(void)
for (bitval = highest_bit_long(value); bitval; bitval--) {
if ((value & (1UL << bitval)) == 0) {
- machdep->machspec->VA_BITS = bitval + 2;
+ if (machdep->flags & NEW_VMEMMAP)
+ machdep->machspec->VA_BITS = bitval + 1;
+ else
+ machdep->machspec->VA_BITS = bitval + 2;
break;
}
}
@@ -2593,10 +2708,22 @@ arm64_calc_virtual_memory_ranges(void)
break;
}
- vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) * SIZE(page), PUD_SIZE);
+ if (machdep->flags & NEW_VMEMMAP)
+#define STRUCT_PAGE_MAX_SHIFT 6
+ vmemmap_size = 1UL << (ms->VA_BITS - machdep->pageshift - 1
+ + STRUCT_PAGE_MAX_SHIFT);
+ else
+ vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) * SIZE(page), PUD_SIZE);
+
vmalloc_end = (ms->page_offset - PUD_SIZE - vmemmap_size - SZ_64K);
- vmemmap_start = vmalloc_end + SZ_64K;
- vmemmap_end = vmemmap_start + vmemmap_size;
+
+ if (machdep->flags & NEW_VMEMMAP) {
+ vmemmap_start = ms->page_offset - vmemmap_size;
+ vmemmap_end = ms->page_offset;
+ } else {
+ vmemmap_start = vmalloc_end + SZ_64K;
+ vmemmap_end = vmemmap_start + vmemmap_size;
+ }
ms->vmalloc_end = vmalloc_end - 1;
ms->vmemmap_vaddr = vmemmap_start;
diff --git a/defs.h b/defs.h
index 8eb601b..d6f719c 100644
--- a/defs.h
+++ b/defs.h
@@ -2846,8 +2846,8 @@ typedef u64 pte_t;
#define PTOV(X) \
((unsigned long)(X)-(machdep->machspec->phys_offset)+(machdep->machspec->page_offset))
-#define VTOP(X) \
- ((unsigned long)(X)-(machdep->machspec->page_offset)+(machdep->machspec->phys_offset))
+
+#define VTOP(X) arm64_VTOP((ulong)(X))
#define USERSPACE_TOP (machdep->machspec->userspace_top)
#define PAGE_OFFSET (machdep->machspec->page_offset)
@@ -2962,19 +2962,24 @@ typedef signed int s32;
#define VM_L3_4K (0x10)
#define KDUMP_ENABLED (0x20)
#define IRQ_STACKS (0x40)
-#define VM_L4_4K (0x80)
+#define NEW_VMEMMAP (0x80)
+#define VM_L4_4K (0x100)
/*
* sources: Documentation/arm64/memory.txt
* arch/arm64/include/asm/memory.h
* arch/arm64/include/asm/pgtable.h
*/
-
-#define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) << (machdep->machspec->VA_BITS - 1))
+#define ARM64_VA_START ((0xffffffffffffffffUL) \
+ << machdep->machspec->VA_BITS)
+#define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) \
+ << (machdep->machspec->VA_BITS - 1))
#define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS)
-#define ARM64_MODULES_VADDR (ARM64_PAGE_OFFSET - MEGABYTES(64))
-#define ARM64_MODULES_END (ARM64_PAGE_OFFSET - 1)
-#define ARM64_VMALLOC_START ((0xffffffffffffffffUL) << machdep->machspec->VA_BITS)
+
+/* only used for v4.6 or later */
+#define ARM64_MODULES_VSIZE MEGABYTES(128)
+#define ARM64_KASAN_SHADOW_SIZE (1UL << (machdep->machspec->VA_BITS - 3))
+
/*
* The following 3 definitions are the original values, but are obsolete
* for 3.17 and later kernels because they are now build-time calculations.
@@ -3055,6 +3060,10 @@ struct machine_specific {
ulong *irq_stacks;
ulong __irqentry_text_start;
ulong __irqentry_text_end;
+ /* only needed for v4.6 or later kernel */
+ ulong kimage_voffset;
+ ulong kimage_text;
+ ulong kimage_end;
};
struct arm64_stackframe {
@@ -5412,6 +5421,7 @@ void unwind_backtrace(struct bt_info *);
#ifdef ARM64
void arm64_init(int);
void arm64_dump_machdep_table(ulong);
+ulong arm64_VTOP(ulong);
int arm64_IS_VMALLOC_ADDR(ulong);
ulong arm64_swp_type(ulong);
ulong arm64_swp_offset(ulong);
diff --git a/main.c b/main.c
index 05787f0..4065e9a 100644
--- a/main.c
+++ b/main.c
@@ -227,9 +227,10 @@ main(int argc, char **argv)
optarg);
}
} else if (STREQ(long_options[option_index].name, "kaslr")) {
- if (!machine_type("X86_64"))
- error(INFO, "--kaslr only valid "
- "with X86_64 machine type.\n");
+ if (!machine_type("X86_64") &&
+ !machine_type("ARM64"))
+ error(INFO, "--kaslr not valid "
+ "with this machine type.\n");
else if (STREQ(optarg, "auto"))
kt->flags2 |= (RELOC_AUTO|KASLR);
else {
diff --git a/symbols.c b/symbols.c
index a8d3563..b0a6461 100644
--- a/symbols.c
+++ b/symbols.c
@@ -593,7 +593,8 @@ kaslr_init(void)
{
char *string;
- if (!machine_type("X86_64") || (kt->flags & RELOC_SET))
+ if ((!machine_type("X86_64") && !machine_type("ARM64")) ||
+ (kt->flags & RELOC_SET))
return;
/*
@@ -712,7 +713,7 @@ store_symbols(bfd *abfd, int dynamic, void *minisyms, long symcount,
if (machine_type("X86")) {
if (!(kt->flags & RELOC_SET))
kt->flags |= RELOC_FORCE;
- } else if (machine_type("X86_64")) {
+ } else if (machine_type("X86_64") || machine_type("ARM64")) {
if ((kt->flags2 & RELOC_AUTO) && !(kt->flags & RELOC_SET))
derive_kaslr_offset(abfd, dynamic, from,
fromend, size, store);
@@ -783,7 +784,8 @@ store_sysmap_symbols(void)
error(FATAL, "symbol table namespace malloc: %s\n",
strerror(errno));
- if (!machine_type("X86") && !machine_type("X86_64"))
+ if (!machine_type("X86") && !machine_type("X86_64") &&
+ !machine_type("ARM64"))
kt->flags &= ~RELOC_SET;
first = 0;
@@ -833,7 +835,7 @@ store_sysmap_symbols(void)
}
/*
- * Handle x86 kernels configured such that the vmlinux symbols
+ * Handle x86/arm64 kernels configured such that the vmlinux symbols
* are not as loaded into the kernel (not unity-mapped).
*/
static ulong
@@ -4681,7 +4683,7 @@ value_search(ulong value, ulong *offset)
if ((sp = machdep->value_to_symbol(value, offset)))
return sp;
- if (IS_VMALLOC_ADDR(value))
+ if (IS_VMALLOC_ADDR(value))
goto check_modules;
if ((sp = symval_hash_search(value)) == NULL)
--
2.8.1
8 years, 5 months
[PATCH v2] arm64: dump a stack frame based on fp
by AKASHI Takahiro
Changes in v2:
* Basically moved arm64_print_stackframe_entry() after arm64_unwind_frame()
in arm64_back_trace_cmd() to make the output consistent with the format
on x86
Then, we had to calculate the end address of a stack frame, frame_top,
for particular cases.
-Takahiro AKASHI
======8<======
>From d8683645599a238578a0f586af563bc9f847d52c Mon Sep 17 00:00:00 2001
From: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
Date: Wed, 8 Jun 2016 11:14:22 +0900
Subject: [PATCH v2] arm64: dump a stack frame based on fp
Signed-off-by: AKASHI Takahiro <takahiro.akashi(a)linaro.org>
---
arm64.c | 140 ++++++++++++++++++++++++++++++++++++++++++++++++----------------
defs.h | 5 +++
2 files changed, 110 insertions(+), 35 deletions(-)
diff --git a/arm64.c b/arm64.c
index bdea79c..7b5b394 100644
--- a/arm64.c
+++ b/arm64.c
@@ -43,9 +43,10 @@ static void arm64_stackframe_init(void);
static int arm64_eframe_search(struct bt_info *);
static int arm64_is_kernel_exception_frame(struct bt_info *, ulong);
static int arm64_in_exception_text(ulong);
+static int arm64_in_exp_entry(ulong);
static void arm64_back_trace_cmd(struct bt_info *);
static void arm64_print_text_symbols(struct bt_info *, struct arm64_stackframe *, FILE *);
-static int arm64_print_stackframe_entry(struct bt_info *, int, struct arm64_stackframe *, FILE *);
+static int arm64_print_stackframe_entry(struct bt_info *, int, struct arm64_stackframe *, struct arm64_stackframe *, FILE *);
static void arm64_display_full_frame(struct bt_info *, ulong);
static int arm64_unwind_frame(struct bt_info *, struct arm64_stackframe *);
static int arm64_get_dumpfile_stackframe(struct bt_info *, struct arm64_stackframe *);
@@ -597,6 +598,10 @@ arm64_dump_machdep_table(ulong arg)
fprintf(fp, " __exception_text_end: %lx\n", ms->__exception_text_end);
fprintf(fp, " __irqentry_text_start: %lx\n", ms->__irqentry_text_start);
fprintf(fp, " __irqentry_text_end: %lx\n", ms->__irqentry_text_end);
+ fprintf(fp, " exp_entry1_start: %lx\n", ms->exp_entry1_start);
+ fprintf(fp, " exp_entry1_end: %lx\n", ms->exp_entry1_end);
+ fprintf(fp, " exp_entry2_start: %lx\n", ms->exp_entry2_start);
+ fprintf(fp, " exp_entry2_end: %lx\n", ms->exp_entry2_end);
fprintf(fp, " panic_task_regs: %lx\n", (ulong)ms->panic_task_regs);
fprintf(fp, " PTE_PROT_NONE: %lx\n", ms->PTE_PROT_NONE);
fprintf(fp, " PTE_FILE: ");
@@ -1283,6 +1288,16 @@ arm64_stackframe_init(void)
machdep->machspec->__irqentry_text_end = sp2->value;
}
+ if ((sp1 = kernel_symbol_search("vectors")) &&
+ (sp1n = kernel_symbol_search("cpu_switch_to")) &&
+ (sp2 = kernel_symbol_search("ret_fast_syscall")) &&
+ (sp2n = kernel_symbol_search("sys_rt_sigreturn_wrapper"))) {
+ machdep->machspec->exp_entry1_start = sp1->value;
+ machdep->machspec->exp_entry1_end = sp1n->value;
+ machdep->machspec->exp_entry2_start = sp2->value;
+ machdep->machspec->exp_entry2_end = sp2n->value;
+ }
+
if ((sp1 = kernel_symbol_search("crash_kexec")) &&
(sp1n = next_symbol(NULL, sp1)) &&
(sp2 = kernel_symbol_search("crash_save_cpu")) &&
@@ -1484,18 +1499,54 @@ arm64_in_exception_text(ulong ptr)
return FALSE;
}
+static int
+arm64_in_exp_entry(ulong addr)
+{
+ struct machine_specific *ms;
+
+ ms = machdep->machspec;
+ if ((ms->exp_entry1_start <= addr) && (addr < ms->exp_entry1_end))
+ return TRUE;
+ if ((ms->exp_entry2_start <= addr) && (addr < ms->exp_entry2_end))
+ return TRUE;
+ return FALSE;
+}
+
#define BACKTRACE_CONTINUE (1)
#define BACKTRACE_COMPLETE_KERNEL (2)
-#define BACKTRACE_COMPLETE_USER (3)
static int
-arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackframe *frame, FILE *ofp)
+arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackframe *frame, struct arm64_stackframe *caller_frame, FILE *ofp)
{
char *name, *name_plus_offset;
ulong symbol_offset;
struct syment *sp;
struct load_module *lm;
char buf[BUFSIZE];
+ unsigned long frame_top;
+
+ if (caller_frame->fp) {
+#if 0 /* FIXME: Need to debug here */
+ if (arm64_in_exp_entry(caller_frame->pc))
+ frame_top = caller_frame->fp - sizeof(struct arm64_pt_regs);
+ else
+#endif
+ frame_top = caller_frame->fp;
+ } else if (frame->fp && !arm64_in_exp_entry(frame->pc))
+ frame_top = bt->stacktop - USER_EFRAME_OFFSET;
+ else
+ frame_top = bt->stacktop - 0x10;
+
+ if (CRASHDEBUG(1)) {
+ fprintf(ofp, " cur sp:%016lx fp:%016lx pc:%016lx\n",
+ frame->sp, frame->fp, frame->pc);
+ fprintf(ofp, " nxt sp:%016lx fp:%016lx pc:%016lx\n",
+ caller_frame->sp, caller_frame->fp, caller_frame->pc);
+
+ fprintf(ofp, " frame: <%lx-%lx> check(%d, %d)\n",
+ bt->frameptr, frame_top,
+ INSTACK(frame_top, bt), INSTACK(bt->frameptr, bt));
+ }
name = closest_symbol(frame->pc);
name_plus_offset = NULL;
@@ -1507,13 +1558,16 @@ arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackfr
value_to_symstr(frame->pc, buf, bt->radix);
}
- if (bt->flags & BT_FULL) {
- arm64_display_full_frame(bt, frame->sp);
- bt->frameptr = frame->sp;
- }
+ if (bt->flags & BT_FULL)
+ arm64_display_full_frame(bt, frame_top);
+ bt->frameptr = frame_top;
fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level,
- frame->sp, name_plus_offset ? name_plus_offset : name, frame->pc);
+#if 1 /* FIXME */
+ (frame->fp ? : frame->sp), name_plus_offset ? name_plus_offset : name, frame->pc);
+#else
+ frame->fp, name_plus_offset ? name_plus_offset : name, frame->pc);
+#endif
if (BT_REFERENCE_CHECK(bt))
arm64_do_bt_reference_check(bt, frame->pc, name);
@@ -1571,12 +1625,10 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
{
unsigned long high, low, fp;
unsigned long stack_mask;
- unsigned long irq_stack_ptr, orig_sp, sp_in;
+ unsigned long irq_stack_ptr, orig_sp;
struct arm64_pt_regs *ptregs;
struct machine_specific *ms;
- sp_in = frame->sp;
-
stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1;
fp = frame->fp;
@@ -1612,8 +1664,17 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
if (INSTACK(orig_sp, bt) && (INSTACK(frame->fp, bt) || (frame->fp == 0))) {
ptregs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(orig_sp))];
frame->sp = orig_sp;
- frame->pc = ptregs->pc;
- bt->bptr = sp_in;
+
+ /*
+ * if frame->fp == 0, ptregs->pc is a pc
+ * where an exception in user mode was taken.
+ * So don't update frame->pc since we have
+ * a pseudo stack frame for exception entry.
+ */
+ if (frame->fp)
+ frame->pc = ptregs->pc;
+
+ bt->bptr = fp;
if (CRASHDEBUG(1))
error(INFO,
"arm64_unwind_frame: switch stacks: fp: %lx sp: %lx pc: %lx\n",
@@ -1634,10 +1695,11 @@ arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame)
static void
arm64_back_trace_cmd(struct bt_info *bt)
{
- struct arm64_stackframe stackframe;
+ struct arm64_stackframe stackframe, cur_frame;
int level;
ulong exception_frame;
FILE *ofp;
+ int end_stack = 0;
ofp = BT_REFERENCE_CHECK(bt) ? pc->nullfp : fp;
@@ -1657,7 +1719,7 @@ arm64_back_trace_cmd(struct bt_info *bt)
stackframe.fp = GET_STACK_ULONG(bt->bptr - 8);
stackframe.pc = GET_STACK_ULONG(bt->bptr);
stackframe.sp = bt->bptr + 8;
- bt->frameptr = stackframe.sp;
+ bt->frameptr = stackframe.fp;
} else if (bt->hp && bt->hp->esp) {
if (arm64_on_irq_stack(bt->tc->processor, bt->hp->esp)) {
arm64_set_irq_stack(bt);
@@ -1703,23 +1765,9 @@ arm64_back_trace_cmd(struct bt_info *bt)
while (1) {
bt->instptr = stackframe.pc;
- switch (arm64_print_stackframe_entry(bt, level, &stackframe, ofp))
- {
- case BACKTRACE_COMPLETE_KERNEL:
- return;
- case BACKTRACE_COMPLETE_USER:
- goto complete_user;
- case BACKTRACE_CONTINUE:
- break;
- }
-
- if (exception_frame) {
- arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp);
- exception_frame = 0;
- }
-
+ cur_frame = stackframe;
if (!arm64_unwind_frame(bt, &stackframe))
- break;
+ end_stack = 1;
if (arm64_in_exception_text(bt->instptr) && INSTACK(stackframe.fp, bt)) {
if (!(bt->flags & BT_IRQSTACK) ||
@@ -1731,9 +1779,30 @@ arm64_back_trace_cmd(struct bt_info *bt)
!arm64_on_irq_stack(bt->tc->processor, stackframe.sp)) {
bt->flags &= ~BT_IRQSTACK;
if (arm64_switch_stack(bt, &stackframe, ofp) == USER_MODE)
- break;
+ end_stack = 1;
+ else
+ arm64_print_exception_frame(bt, stackframe.sp,
+ KERNEL_MODE, ofp);
+
+ /* Next, we will display a process stack. */
+ bt->frameptr = stackframe.sp;
}
+ switch (arm64_print_stackframe_entry(bt, level, &cur_frame, &stackframe, ofp))
+ {
+ case BACKTRACE_COMPLETE_KERNEL:
+ return;
+ case BACKTRACE_CONTINUE:
+ break;
+ }
+
+ if (end_stack)
+ break;
+
+ if (arm64_in_exp_entry(cur_frame.pc) && exception_frame) {
+ arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp);
+ exception_frame = 0;
+ }
level++;
}
@@ -1912,7 +1981,6 @@ arm64_switch_stack(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp
if (frame->fp == 0)
return USER_MODE;
- arm64_print_exception_frame(bt, frame->sp, KERNEL_MODE, ofp);
return KERNEL_MODE;
}
@@ -2004,8 +2072,10 @@ arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *o
ulong LR, SP, offset;
char buf[BUFSIZE];
- if (bt->flags & BT_FULL)
- arm64_display_full_frame(bt, pt_regs);
+ if (mode == KERNEL_MODE)
+ fprintf(ofp, "--- <Exception in kernel> ---\n");
+ else
+ fprintf(ofp, "--- <Exception in user> ---\n");
if (CRASHDEBUG(1))
fprintf(ofp, "pt_regs: %lx\n", pt_regs);
diff --git a/defs.h b/defs.h
index d6f719c..90d8406 100644
--- a/defs.h
+++ b/defs.h
@@ -3060,6 +3060,11 @@ struct machine_specific {
ulong *irq_stacks;
ulong __irqentry_text_start;
ulong __irqentry_text_end;
+ /* for exception vector code */
+ ulong exp_entry1_start;
+ ulong exp_entry1_end;
+ ulong exp_entry2_start;
+ ulong exp_entry2_end;
/* only needed for v4.6 or later kernel */
ulong kimage_voffset;
ulong kimage_text;
--
2.8.1
8 years, 5 months
arm64: odd backtrace?
by AKASHI Takahiro
Dave,
When I ran "bt" against a process running in a user mode, I got
an odd backtrace result:
===8<===
crash> ps
...
> 1324 1223 2 ffff80002018be80 RU 0.0 960 468 dhry
1325 2 1 ffff800021089900 IN 0.0 0 0 [kworker/u16:0]
crash> bt 1324
PID: 1324 TASK: ffff80002018be80 CPU: 2 COMMAND: "dhry"
ffff800022f6ae08: ffff00000812ae44 (crash_save_cpu on IRQ stack)
#0 [ffff800022f6ae10] crash_save_cpu at ffff00000812ae44
#1 [ffff800022f6ae60] handle_IPI at ffff00000808e718
#2 [ffff800022f6b020] gic_handle_irq at ffff0000080815f8
#3 [ffff800022f6b050] el0_irq_naked at ffff000008084c4c
pt_regs: ffff800022f6af60
PC: ffffffffffffffff [unknown or invalid address]
LR: ffff800020107ed0 [unknown or invalid address]
SP: 0000000000000000 PSTATE: 004016a4
X29: ffff000008084c4c X28: ffff800022f6b080 X27: ffff000008e60c54
X26: ffff800020107ed0 X25: 0000000000001fff X24: 0000000000000003
X23: ffff0000080815f8 X22: ffff800022f6b040 X21: 0000000000000000
X20: ffff000008bce000 X19: ffff00000808e758 X18: ffff800022f6b010
X17: ffff00000808a820 X16: ffff800022f6aff0 X15: 0000000000000000
X14: 0000000000000000 X13: 0000000000000000 X12: 0000000000402138
X11: ffff000008675850 X10: ffff800022f6afe0 X9: 0000000000000000
X8: ffff800022f6afc0 X7: 0000000000000000 X6: 0000000000000000
X5: 0000000000000000 X4: 0000000000000001 X3: 0000000000000000
X2: 0000000000493000 X1: 0000000000498000 X0: ffffffffffffffff
ORIG_X0: 0000000020000000 SYSCALLNO: 4021f0
bt: WARNING: arm64_unwind_frame: on IRQ stack: oriq_sp: ffff800020107ed0 fp: 0 (?)
pt_regs: ffff800020107ed0
PC: 00000000004016a4 LR: 00000000004016a4 SP: 0000ffffc10c40a0
X29: 0000ffffc10c40a0 X28: 0000000000000000 X27: 0000000000000000
X26: 0000000000000000 X25: 0000000000402138 X24: 00000000004021f0
X23: 0000000000000000 X22: 0000000000000000 X21: 00000000004001a0
X20: 0000000000000000 X19: 0000000000000000 X18: 0000000000000000
X17: 0000000000000001 X16: 0000000000000000 X15: 0000000000493000
X14: 0000000000498000 X13: ffffffffffffffff X12: 0000000000000005
X11: 000000000000001e X10: 0101010101010101 X9: fffffffff59a9190
X8: 7f7f7f7f7f7f7f7f X7: 1f535226301f2b4c X6: 00000003001d1000
X5: 00101d0003000000 X4: 0000000000000000 X3: 4952545320454d4f
X2: 0000000010c35b40 X1: 0000000000000011 X0: 0000000010c35b40
ORIG_X0: 0000000000498700 SYSCALLNO: ffffffffffffffff PSTATE: 20000000
===>8===
* PC, LR and SP look wrong.
I don't know how those pt_regs values were derived.
* The message, "WARNING: arm64_unwind_frame: on IRQ stack: oriq_sp:
ffff800020107ed0 fp: 0 (?)" should be refined.
Apparently, in this case, the process is running in a user mode,
and so there is no normal kernel stack.
Thanks,
-Takahiro AKASHI
8 years, 5 months
[PATH v4 0/2] fix kernel memory map handling for kaslr-enabled kernel
by AKASHI Takahiro
changes in v4:
* Fixed VA_BITS calculation for v4.5 or earlier
* Added 4-level address translation with 4KB page size
* Removed "fix a renaming of a member of struct page, _count to _refcount"
* Removed "kmem <vmalloc addr>" issue description from a commit message
(This was not a bug.)
Chnages in v3:
* Refined KASLR handling
hopefully the tool works even on a live system if CONFIG_RANDOMIZE_RAM is
not configured
* Fixed a renaming of a member of struct page
* Removed a commit message regarding an issue of backtracing a panic'ed task
because this is not a bug in this tool, but my kdump patch's.
* Reported "kmem <vmalloc addr>" issue in a commit message
changes in v2:
* Fixed build warnings
* Moved ARM64_NEW_VMEMMAP to machdep->flags
* Show additional kaslr-related parameters in arm64_dump_machdep_table()
* Handle a VMCOREINFO, "NUMBER(kimage_voffset)"
AKASHI Takahiro (2):
arm64: fix kernel memory map handling for kaslr-enabled kernel
arm64: add 4-level translation
arm64.c | 338 +++++++++++++++++++++++++++++++++++++++++++++++++++-----------
defs.h | 49 +++++++--
main.c | 7 +-
symbols.c | 12 ++-
4 files changed, 334 insertions(+), 72 deletions(-)
--
2.8.1
8 years, 5 months
[PATCH] pykdump: add maxel parameter to readStructNext
by Masayoshi Mizuma
Hi Alex,
When I searched struct journal_head by readStructNext(), the
number of journal_head was over _MAXEL (== 10000) and the
searching was stopped.
It is good to add a parameter to change the limit.
Signed-off-by: Masayoshi Mizuma <m.mizuma(a)jp.fujitsu.com>
---
pykdump/wrapcrash.py | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/pykdump/wrapcrash.py b/pykdump/wrapcrash.py
index 74d805e..6c83e67 100755
--- a/pykdump/wrapcrash.py
+++ b/pykdump/wrapcrash.py
@@ -1202,7 +1202,7 @@ def readSUListFromHead(headaddr, listfieldname, mystruct, maxel=_MAXEL,
# an embedded listhead. 'shead' is either a structure or tPtr pointer
# to structure
-def readStructNext(shead, nextname, inchead = True):
+def readStructNext(shead, nextname, maxel=_MAXEL, inchead = True):
if (not isinstance(shead, StructResult)):
# This should be tPtr
if (shead == 0):
@@ -1211,7 +1211,7 @@ def readStructNext(shead, nextname, inchead = True):
stype = shead.PYT_symbol
offset = shead.PYT_sinfo[nextname].offset
out = []
- for p in readList(Addr(shead), offset, inchead=inchead):
+ for p in readList(Addr(shead), offset, maxel, inchead=inchead):
out.append(readSU(stype, p))
return out
--
1.7.1
8 years, 5 months