See the following stack trace:
(gdb) bt
#0 0x00005635ac2b166b in arm64_unwind_frame (frame=0x7ffdaf35cb70,
bt=0x7ffdaf35d430) at arm64.c:2821
#1 arm64_back_trace_cmd (bt=0x7ffdaf35d430) at arm64.c:3306
#2 0x00005635ac27b108 in back_trace (bt=bt@entry=0x7ffdaf35d430) at
kernel.c:3239
#3 0x00005635ac2880ae in cmd_bt () at kernel.c:2863
#4 0x00005635ac1f16dc in exec_command () at main.c:893
#5 0x00005635ac1f192a in main_loop () at main.c:840
#6 0x00005635ac50df81 in captured_main (data=<optimized out>) at main.c:1284
#7 gdb_main (args=<optimized out>) at main.c:1313
#8 0x00005635ac50e000 in gdb_main_entry (argc=<optimized out>,
argv=<optimized out>) at main.c:1338
#9 0x00005635ac1ea2a5 in main (argc=5, argv=0x7ffdaf35dde8) at main.c:721
The issue may be encountered when thread_union symbol not found in vmlinux
due to compiling optimization.
This patch will try the following 2 methods to get the irq_stack_size
when thread_union symbol unavailable:
1. change the thread_shift when KASAN is enabled and with vmcoreinfo.
In arm64/include/asm/memory.h:
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
...
#define IRQ_STACK_SIZE THREAD_SIZE
Since enabling the KASAN will affect the final value,
this patch reset IRQ_STACK_SIZE according to the calculation process in
kernel code.
2. Try getting the value from kernel code disassembly, to get
THREAD_SHIFT directly from tbnz instruction.
In arch/arm64/kernel/entry.S:
.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
...
add sp, sp, x0
sub x0, sp, x0
tbnz x0, #THREAD_SHIFT, 0f
$ gdb vmlinux
(gdb) disass vectors
Dump of assembler code for function vectors:
...
0xffff800080010804 <+4>: add sp, sp, x0
0xffff800080010808 <+8>: sub x0, sp, x0
0xffff80008001080c <+12>: tbnz w0, #16, 0xffff80008001081c
<vectors+28>
Signed-off-by: yeping.zheng <yeping.zheng(a)nio.com>
Improved-by: Tao Liu <ltao(a)redhat.com>
---
Hi Yeping & Lianbo,
I re-drafted the patch, which re-ordered the if block in
arm64_set_irq_stack_size(). I guess what Lianbo mean, is to let the
normal vmcores, aka the ones with vmcoreinfo, go into the default path;
only the ones with no vmcoreinfo, go into the tbnz disassembly path. In
this way the patch change have minimal influence for the normal
vmcores(this is the majority case).
In addition, this patch is not tested, please have a review and
test against it.
Thanks,
Tao Liu
---
arm64.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 54 insertions(+), 2 deletions(-)
diff --git a/arm64.c b/arm64.c
index 067c879..28e7ccb 100644
--- a/arm64.c
+++ b/arm64.c
@@ -95,6 +95,7 @@ static int arm64_is_uvaddr(ulong, struct task_context *);
static void arm64_calc_KERNELPACMASK(void);
static void arm64_recalc_KERNELPACMASK(void);
static int arm64_get_vmcoreinfo(unsigned long *vaddr, const char *label, int base);
+static ulong arm64_set_irq_stack_size(void);
struct kernel_range {
unsigned long modules_vaddr, modules_end;
@@ -2340,8 +2341,10 @@ arm64_irq_stack_init(void)
if (MEMBER_EXISTS("thread_union", "stack")) {
if ((sz = MEMBER_SIZE("thread_union", "stack")) > 0)
ms->irq_stack_size = sz;
- } else
- ms->irq_stack_size = ARM64_IRQ_STACK_SIZE;
+ } else {
+ ulong res = arm64_set_irq_stack_size();
+ ms->irq_stack_size = (res > 0) ? res : ARM64_IRQ_STACK_SIZE;
+ }
machdep->flags |= IRQ_STACKS;
@@ -5056,6 +5059,55 @@ static void arm64_recalc_KERNELPACMASK(void){
}
}
+static ulong arm64_set_irq_stack_size(void)
+{
+ int min_thread_shift = 14;
+ ulong thread_shift = 0;
+ char buf1[BUFSIZE];
+ char *pos1, *pos2;
+ int errflag = 0;
+
+ if (kernel_symbol_exists("vmcoreinfo_data") &&
+ kernel_symbol_exists("vmcoreinfo_size")) {
+ if (kernel_symbol_exists("kasan_enable_current")) {
+ min_thread_shift += 1;
+ }
+ thread_shift = (min_thread_shift < machdep->pageshift) ?
+ machdep->pageshift : min_thread_shift;
+ } else {
+ sprintf(buf1, "x/32i vectors");
+ open_tmpfile();
+ if (!gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR)) {
+ goto out;
+ }
+ rewind(pc->tmpfile);
+ while (fgets(buf1, BUFSIZE, pc->tmpfile)) {
+ if ((pos1 = strstr(buf1, "tbnz"))) {
+ if ((pos2 = strchr(pos1, '#'))) {
+ pos2 += 1;
+ for (pos1 = pos2;
+ *pos2 != '\0' && *pos2 != ',';
+ pos2++);
+ *pos2 = '\0';
+ thread_shift = stol(pos1,
+ RETURN_ON_ERROR|QUIET, &errflag);
+ if (errflag) {
+ thread_shift = 0;
+ }
+ break;
+ }
+ }
+ }
+out:
+ close_tmpfile();
+ }
+
+ if (thread_shift)
+ return ((1UL) << thread_shift);
+ else
+ return 0;
+}
+
#endif /* ARM64 */
--
2.40.1