[PATCH 0/3] Add Rust support in crash-utility
by Lianbo Jiang
Currently, the kernel has supported Rust, and it has been
enabled in some distributions by default. So the crash tool
can not resolve the mangled Rust symbols on such vmcores.
For example:
crash> bt
PID: 3520 TASK: ffff8f240f670000 CPU: 1 COMMAND: "insmod"
#0 [ffffd08c4f063a20] machine_kexec at ffffffff9575e60e
#1 [ffffd08c4f063a40] __crash_kexec at ffffffff958db711
#2 [ffffd08c4f063b00] panic at ffffffff9560cede
#3 [ffffd08c4f063b80] _RNvCscb18lrEyTSA_10rust_panic10area_in_hp at ffffffffc07fe107 [rust_panic]
#4 [ffffd08c4f063c20] _RNvMCscb18lrEyTSA_10rust_panicNtB2_10HelloPanic8step_two at ffffffffc07fe160 [rust_panic]
#5 [ffffd08c4f063cf0] do_one_initcall at ffffffff956c7aaa
...
The patchset will solve the current issues, the crash tool
reuses the rust demangle lib in gdb to demangle the mangled
Rust symbol into human-readable symbol.
With the patchset:
crash> bt
PID: 3520 TASK: ffff8f240f670000 CPU: 1 COMMAND: "insmod"
#0 [ffffd08c4f063a20] machine_kexec at ffffffff9575e60e
#1 [ffffd08c4f063a40] __crash_kexec at ffffffff958db711
#2 [ffffd08c4f063b00] panic at ffffffff9560cede
#3 [ffffd08c4f063b80] rust_panic::area_in_hp at ffffffffc07fe107 [rust_panic]
#4 [ffffd08c4f063c20] <rust_panic::HelloPanic>::step_two at ffffffffc07fe160 [rust_panic]
#5 [ffffd08c4f063cf0] do_one_initcall at ffffffff956c7aaa
...
crash> sym "rust_panic::area_in_hp"
ffffffffc07fe010 (t) rust_panic::area_in_hp [rust_panic] /root/linux-6.16.3/samples/rust/rust_panic.rs: 22
crash> dis "rust_panic::area_in_hp"
0xffffffffc07fe010 <rust_panic::area_in_hp>::area_in_hp>: push %rbx
0xffffffffc07fe011 <rust_panic::area_in_hp+1>::area_in_hp+1>: sub $0x90,%rsp
0xffffffffc07fe018 <rust_panic::area_in_hp+8>::area_in_hp+8>: mov %rdi,%rbx
0xffffffffc07fe01b <rust_panic::area_in_hp+11>::area_in_hp+11>: movq $0xffffffffc0bd5020,(%rsp)
0xffffffffc07fe023 <rust_panic::area_in_hp+19>::area_in_hp+19>: movq $0x1,0x8(%rsp)
0xffffffffc07fe02c <rust_panic::area_in_hp+28>::area_in_hp+28>: movq $0x0,0x20(%rsp)
0xffffffffc07fe035 <rust_panic::area_in_hp+37>::area_in_hp+37>: movq $0x8,0x10(%rsp)
0xffffffffc07fe03e <rust_panic::area_in_hp+46>::area_in_hp+46>: movq $0x0,0x18(%rsp)
0xffffffffc07fe047 <rust_panic::area_in_hp+55>::area_in_hp+55>: mov %rsp,%rcx
0xffffffffc07fe04a <rust_panic::area_in_hp+58>::area_in_hp+58>: mov $0xc,%edx
0xffffffffc07fe04f <rust_panic::area_in_hp+63>::area_in_hp+63>: mov $0xffffffff96afb62c,%rdi
0xffffffffc07fe056 <rust_panic::area_in_hp+70>::area_in_hp+70>: mov $0xffffffffc0bd5118,%rsi
0xffffffffc07fe05d <rust_panic::area_in_hp+77>::area_in_hp+77>: call 0xffffffff95fcb2e0 <kernel::print::call_printk>
0xffffffffc07fe062 <rust_panic::area_in_hp+82>::area_in_hp+82>: movq $0xffffffffc0bd50d0,0x30(%rsp)
...
crash> gdb bt
#0 0xffffffff958db684 in crash_setup_regs (newregs=0xffffd08c4f063a48, oldregs=0x0) at ./arch/x86/include/asm/kexec.h:108
#1 0xffffffff958db711 in __crash_kexec (regs=regs@entry=0x0) at kernel/crash_core.c:122
#2 0xffffffff9560cede in panic (fmt=<optimized out>) at kernel/panic.c:401
#3 0xffffffffc07fe107 in rust_panic::HelloPanic::trigger_panic (self=0x1) at rust_panic.rs:59
#4 rust_panic::HelloPanic::step_three (self=0x1) at rust_panic.rs:53
#5 rust_panic::area_in_hp (rectangle=0xffffd08c4f063c38) at rust_panic.rs:24
#6 0xffffffffc07fe160 in rust_panic::HelloPanic::step_two (self=<optimized out>) at rust_panic.rs:46
#7 0xffffffffc0c5d067 in ?? ()
crash> frame 5
#5 rust_panic::area_in_hp (rectangle=0xffffd08c4f063c38) at rust_panic.rs:24
24 in rust_panic.rs
crash> whatis rectangle
*mut rust_panic::RectangleHP
crash> p *rectangle
$2 = rust_panic::RectangleHP {
width: 30,
height: 50
}
crash> struct RectangleHP
struct rust_panic::RectangleHP {
width: u32,
height: u32,
}
SIZE: 8
crash>
Anyway, there are still many limitations of debugging complex, nested
data type about Rust code in kernel. I do not expect patches to always
work well in the real world, but it can get improved over time. As you
know, also some challenges come from the compilers, they may have
different behavior, for example:
crash> gdb bt
#0 rust_helper_BUG () at rust/helpers/bug.c:7
#1 0xffffffff98b5a92a in kernel::panic (info=0xffffcf05880d7800) at rust/kernel/lib.rs:202
#2 0xffffffff9840e310 in core::panicking::panic_fmt (fmt=...) at /home/llvm-20.1.8-rust-1.88.0-x86_64/lib/rustlib/src/rust/library/core/src/panicking.rs:75
#3 0xffffffffc12e71ec in ?? ()
#4 0xffffcf05880d7880 in ?? ()
#5 0xffffffffc14680a8 in ?? ()
crash>
Note: The above kernel is compiled with the clang(clang version 20.1.8 (https://github.com/llvm/llvm-project.git 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)), the gdb stack unwind looks incomplete.
BTW: I did not see the similar issues on the kernel that compiled by the gcc (GCC) 14.3.1.
Note: the rust_panic is a Rust kernel module, which is used to test the
current patchset. If anyone needs it, I can share it via email.
Lianbo Jiang (3):
Add a rustfilt command to demangle a mangled Rust symbol
Enable demangling a mangled Rust support
Enable resolving mangled Rust symbol in lockless ring buffer
Makefile | 2 +-
defs.h | 2 ++
global_data.c | 1 +
help.c | 10 +++++++
printk.c | 28 +++++++++++++++++-
symbols.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++
6 files changed, 119 insertions(+), 2 deletions(-)
--
2.50.1
4 days, 16 hours
[Extension] Request to add new extension to Crash Extension Modules page: sdinfo
by briston_dev
Dear Crash Utility Maintainers,
I would like to submit a new extension module for inclusion in the Crash Extension Modules page. Below are the required details:
1.Extension Name: sdinfo
2.Command(s): sdinfo
3.Comments
- Functionality: Displays Linux kernel's SCSI disk information, including devices, hosts, targets, and associated I/O requests and SCSI commands. Support for NVMe devices is currently under development and will be added in upcoming releases.
- To build the module from the top-level `crash-<version>` directory, enter:
$ cp <path-to>/scsi.c scsi.mk extensions
$ make extensions
- Package Website: https://github.com/briston-dev/crash-diskutils
- Requires crash-utility version >= 7.2.8
- Author: Yao Sang <sangyao(a)kylinos.cn>
As specified in the extension guidelines, please add this module to the Crash Extension Modules page. This extension will be actively maintained with regular updates to ensure compatibility with new kernel versions and additional feature enhancements.
Let me know if additional information is needed. Thank you for your consideration!
Best regards,
Yao Sang
4 days, 18 hours
[PATCH RFC][makedumpfile 00/10] btf/kallsyms based eppic extension for mm page filtering
by Tao Liu
A) This patchset will introduce the following features to makedumpfile:
1) Enable eppic script for memory pages filtering.
2) Enable btf and kallsyms for symbol type and address resolving.
3) Port maple tree data structures and functions, primarily used for
vma iteration.
B) The purpose of the features are:
1) Currently makedumpfile filters mm pages based on page flags, because flags
can help to determine one page's usage. But this page-flag-checking method
lacks of flexibility in certain cases, e.g. if we want to filter those mm
pages occupied by GPU during vmcore dumping due to:
a) GPU may be taking a large memory and contains sensitive data;
b) GPU mm pages have no relations to kernel crash and useless for vmcore
analysis.
But there is no GPU mm page specific flags, and apparently we don't need
to create one just for kdump use. A programmable filtering tool is more
suitable for such cases. In addition, different GPU vendors may use
different ways for mm pages allocating, programmable filtering is better
than hard coding these GPU specific logics into makedumpfile in this case.
2) Currently makedumpfile already contains a programmable filtering tool, aka
eppic script, which allows user to write customized code for data erasing.
However it has the following drawbacks:
a) cannot do mm page filtering.
b) need to access to debuginfo of both kernel and modules, which is not
applicable in the 2nd kernel.
c) Poor performance, making vmcore dumping time unacceptable (See
the following performance testing).
makedumpfile need to resolve the dwarf data from debuginfo, to get symbols
types and addresses. In recent kernel there are dwarf alternatives such
as btf/kallsyms which can be used for this purpose. And btf/kallsyms info
are already packed within vmcore, so we can use it directly.
3) Maple tree data structures are used in recent kernels, such as vma
iteration. So maple tree poring is needed.
With these, this patchset introduces an upgraded eppic, which is based on
btf/kallsyms symbol resolving, and is programmable for mm page filtering.
The following info shows its usage and performance, please note the tests
are performed in 1st kernel:
$ time ./makedumpfile -d 31 -l /var/crash/127.0.0.1-2025-06-10-18\:03\:12/vmcore
/tmp/dwarf.out -x /lib/debug/lib/modules/6.11.8-300.fc41.x86_64/vmlinux
--eppic eppic_scripts/filter_amdgpu_mm_pages.c
real 14m6.894s
user 4m16.900s
sys 9m44.695s
$ time ./makedumpfile -d 31 -l /var/crash/127.0.0.1-2025-06-10-18\:03\:12/vmcore
/tmp/btf.out --eppic eppic_scripts/filter_amdgpu_mm_pages.c
real 0m10.672s
user 0m9.270s
sys 0m1.130s
-rw------- 1 root root 367475074 Jun 10 18:06 btf.out
-rw------- 1 root root 367475074 Jun 10 21:05 dwarf.out
-rw-rw-rw- 1 root root 387181418 Jun 10 18:03 /var/crash/127.0.0.1-2025-06-10-18:03:12/vmcore
C) Discussion:
1) GPU types: Currently only tested with amdgpu's mm page filtering, others
are not tested.
2) Code structure: There are some similar code shared by makedumpfile and
crash, such as maple tree data structure, also I planed to port the
btf/kallsyms code to crash as well, so there are code duplications for
crash & makedumpfile. Since I havn't working on crash poring, code change
on btf/kallsyms is expected. How can we share the code, creating a common
library or keep the duplication as it is?
3) OS: The code can work on rhel-10+/rhel9.5+ on x86_64/arm64/s390/ppc64.
Others are not tested.
D) Testing:
1) If you don't want to create your vmcore, you can find a vmcore which I
created with amdgpu mm pages unfiltered [1], the amdgpu mm pages are
allocated by program [2]. You can use the vmcore in 1st kernel to filter
the amdgpu mm pages by the previous performance testing cmdline. To
verify the pages are filtered in crash:
Unfiltered:
crash> search -c "!QAZXSW@#EDC"
ffff96b7fa800000: !QAZXSW@#EDCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
ffff96b87c800000: !QAZXSW@#EDCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
crash> rd ffff96b7fa800000
ffff96b7fa800000: 405753585a415121 !QAZXSW@
crash> rd ffff96b87c800000
ffff96b87c800000: 405753585a415121 !QAZXSW@
Filtered:
crash> search -c "!QAZXSW@#EDC"
crash> rd ffff96b7fa800000
rd: page excluded: kernel virtual address: ffff96b7fa800000 type: "64-bit KVADDR"
crash> rd ffff96b87c800000
rd: page excluded: kernel virtual address: ffff96b87c800000 type: "64-bit KVADDR"
2) You can use eppic_scripts/print_all_vma.c against an ordinary vmcore to
test only btf/kallsyms functions by output all VMAs if no amdgpu
vmcores/machine avaliable.
[1]: https://people.redhat.com/~ltao/core/
[2]: https://gist.github.com/liutgnu/a8cbce1c666452f1530e1410d1f352df
Tao Liu (10):
dwarf_info: Support kernel address randomization
dwarf_info: Fix a infinite recursion bug for search_domain
Add page filtering function
Add btf/kallsyms support for symbol type/address resolving
Export necessary btf/kallsyms functions to eppic extension
Port the maple tree data structures and functions
Supporting main() as the entry of eppic script
Enable page filtering for dwarf eppic
Enable page filtering for btf/kallsyms eppic
Introducing 2 eppic scripts to test the dwarf/btf eppic extension
Makefile | 6 +-
btf.c | 919 +++++++++++++++++++++++++
btf.h | 176 +++++
dwarf_info.c | 15 +-
eppic_maple.c | 431 ++++++++++++
eppic_maple.h | 8 +
eppic_scripts/filter_amdgpu_mm_pages.c | 36 +
eppic_scripts/print_all_vma.c | 29 +
erase_info.c | 123 +++-
erase_info.h | 22 +
extension_btf.c | 218 ++++++
extension_eppic.c | 41 +-
extension_eppic.h | 6 +-
kallsyms.c | 371 ++++++++++
kallsyms.h | 42 ++
makedumpfile.c | 21 +-
makedumpfile.h | 11 +
17 files changed, 2448 insertions(+), 27 deletions(-)
create mode 100644 btf.c
create mode 100644 btf.h
create mode 100644 eppic_maple.c
create mode 100644 eppic_maple.h
create mode 100644 eppic_scripts/filter_amdgpu_mm_pages.c
create mode 100644 eppic_scripts/print_all_vma.c
create mode 100644 extension_btf.c
create mode 100644 kallsyms.c
create mode 100644 kallsyms.h
--
2.47.0
1 week, 3 days
[PATCH] Support running on X86_64 with RISCV target
by Pnina Feder
Signed-off-by: Pnina Feder <pnina.feder(a)mobileye.com>
---
symbols.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/symbols.c b/symbols.c
index e30fafe..5b42ecf 100644
--- a/symbols.c
+++ b/symbols.c
@@ -4539,7 +4539,7 @@ is_shared_object(char *file)
case EM_X86_64:
if (machine_type("X86_64") || machine_type("ARM64") ||
- machine_type("PPC64"))
+ machine_type("PPC64") || machine_type("RISCV64"))
return TRUE;
break;
--
2.43.0
1 week, 4 days
[PATCH] RISCV64: Add 'PAGE DIRECTORY' property to the 'vtop' command
by Austin Kim
Currently, 'vtop' command does not include 'PAGE DIRECTORY' information for
RISC-V-based vmcores. Since 'PAGE DIRECTORY' is used as the root page table,
we need this information. Note that Arm64, Arm32 and x86 vmcore shows
'PAGE DIRECTORY' for 'vtop' command.
(before)
crash> vtop 0xffffffff80d8e1e0
VIRTUAL PHYSICAL
ffffffff80d8e1e0 40f8e1e0
PGD: ffffffff81c4fff0 => 4fffe801
PMD: 000000013fffa000 => 00000000103800eb
PTE: 40e00000 => b35030414c583
PAGE: 002cd40c10531000
PTE PHYSICAL FLAGS
b35030414c583 2cd40c10531000 (PRESENT|READ|DIRTY|SOFT)
(after)
crash> vtop 0xffffffff80d8e1e0
VIRTUAL PHYSICAL
ffffffff80d8e1e0 40f8e1e0
PAGE DIRECTORY: ffffffff81c4f000
PGD: ffffffff81c4fff0 => 4fffe801
PMD: 000000013fffa000 => 00000000103800eb
PTE: 40e00000 => b35030414c583
PAGE: 002cd40c10531000
PTE PHYSICAL FLAGS
b35030414c583 2cd40c10531000 (PRESENT|READ|DIRTY|SOFT)
Signed-off-by: Austin Kim <austindh.kim(a)gmail.com>
---
riscv64.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/riscv64.c b/riscv64.c
index 073ebb9..ef5c41d 100644
--- a/riscv64.c
+++ b/riscv64.c
@@ -634,6 +634,9 @@ riscv64_vtop_3level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose)
ulong pte_val, pte_pfn;
ulong pt_phys;
+ if (verbose)
+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
/* PGD */
pgd_ptr = pgd + pgd_index_l3_4k(vaddr);
FILL_PGD(pgd, KVADDR, PAGESIZE());
@@ -1213,6 +1216,9 @@ riscv64_vtop_4level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose)
ulong pte_val, pte_pfn;
ulong pt_phys;
+ if (verbose)
+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
/* PGD */
pgd_ptr = pgd + pgd_index_l4_4k(vaddr);
FILL_PGD(pgd, KVADDR, PAGESIZE());
@@ -1289,6 +1295,9 @@ riscv64_vtop_5level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose)
ulong pte_val, pte_pfn;
ulong pt_phys;
+ if (verbose)
+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
/* PGD */
pgd_ptr = pgd + pgd_index_l5_4k(vaddr);
FILL_PGD(pgd, KVADDR, PAGESIZE());
--
2.34.1
1 week, 4 days
[PATCH RFC] help: Add 'help -l' to show memory layout
by Rongwei Wang
From: Rongwei Wang <rongwei.wrw(a)gmail.com>
'help -m' can show most of variables which
relates to memory layout, e.g. userspace_top,
page_offset, vmalloc_start_addr, etc. They
aren't a visual way to show a memory layout
for kernel space.
This patch provides 'help -l' to show memory
layout in a table-based way, usage likes:
crash> help -l
+---------------------------------------------------------+
|xxxxxxxxxxxxxxxxxxx gap (size: 8.0 MB) xxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+---------------------------------------------------------+
| FIXMAP |
| (0xffffffffff578000 - 0xffffffffff7ff000 size: 2.5 MB) |
+---------------------------------------------------------+
|xxxxxxxxxxxxxxxxxxx gap (size: 5.5 MB) xxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+---------------------------------------------------------+
| Module Area |
| (0xffffffffa0000000 - 0xffffffffff000000 size: 1.5 GB) |
+---------------------------------------------------------+
|xxxxxxxxxxxxxxxxxx gap (size: 451.9 MB) xxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+---------------------------------------------------------+
| kernel image |
| (0xffffffff81000000 - 0xffffffff83c26000 size: 44.1 MB) |
+---------------------------------------------------------+
|xxxxxxxxxxxxxxxxxx gap (size: 21.0 TB) xxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+---------------------------------------------------------+
| Vmemmap Area |
| (0xffffea0000000000 - 0xffffeaffffffffff size: 1.0 TB) |
+---------------------------------------------------------+
|xxxxxxxxxxxxxxxxxxx gap (size: 1.0 TB) xxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+---------------------------------------------------------+
| Vmalloc/Vfree Area |
| (0xffffc90000000000 - 0xffffe8ffffffffff size: 32.0 TB) |
+---------------------------------------------------------+
|xxxxxxxxxxxxxxxxxx gap (size: 512.0 GB) xxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+---------------------------------------------------------+
| Linear Mapping Area |
| (0xffff888000000000 - 0xffffc87fffffffff size: 64.0 TB) |
+---------------------------------------------------------+
| Kernel Space Offset |
| (0xffff800000000000 - 0xffff887fffffffff size: 8.5 TB) |
+---------------------------------------------------------+
|xxxxxxxxxxxxxxx gap (size: 16776960.0 TB) xxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+---------------------------------------------------------+
| User Space |
| (size: 128.0 TB) |
+---------------------------------------------------------+
Signed-off-by: Rongwei Wang <rongwei.wrw(a)gmail.com>
---
defs.h | 14 +-
help.c | 7 +-
memory.c | 11 ++
x86_64.c | 388 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 418 insertions(+), 2 deletions(-)
diff --git a/defs.h b/defs.h
index bbd6d4b..66b1c8e 100644
--- a/defs.h
+++ b/defs.h
@@ -4097,12 +4097,23 @@ typedef signed int s32;
#define __PHYSICAL_MASK_SHIFT_5LEVEL 52
#define __PHYSICAL_MASK_SHIFT (machdep->machspec->physical_mask_shift)
#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
-#define __VIRTUAL_MASK_SHIFT 48
+#define __VIRTUAL_MASK_SHIFT 47
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK )
+
+#define KASAN_SHADOW_OFFSET 0xdffffc0000000000
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
+ ((-1UL << __VIRTUAL_MASK_SHIFT) >> \
+ KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + \
+ (1ULL << (__VIRTUAL_MASK_SHIFT - \
+ KASAN_SHADOW_SCALE_SHIFT)))
+
#define _PAGE_BIT_NX 63
#define _PAGE_PRESENT 0x001
#define _PAGE_RW 0x002
@@ -5908,6 +5919,7 @@ int phys_to_page(physaddr_t, ulong *);
int generic_get_kvaddr_ranges(struct vaddr_range *);
int l1_cache_size(void);
int dumpfile_memory(int);
+void dump_memory_layout(void);
#define DUMPFILE_MEM_USED (1)
#define DUMPFILE_FREE_MEM (2)
#define DUMPFILE_MEM_DUMP (3)
diff --git a/help.c b/help.c
index 5d61e0d..cd54744 100644
--- a/help.c
+++ b/help.c
@@ -538,7 +538,7 @@ cmd_help(void)
oflag = 0;
while ((c = getopt(argcnt, args,
- "efNDdmM:ngcaBbHhkKsvVoptTzLOr")) != EOF) {
+ "efNDdmM:ngcaBbHhkKsvVoptTzLOrl")) != EOF) {
switch(c)
{
case 'e':
@@ -666,6 +666,7 @@ cmd_help(void)
fprintf(fp, " -v - vm_table\n");
fprintf(fp, " -V - vm_table (verbose)\n");
fprintf(fp, " -z - help options\n");
+ fprintf(fp, " -l - show memory layout\n");
return;
case 'L':
@@ -676,6 +677,10 @@ cmd_help(void)
dump_registers();
return;
+ case 'l':
+ dump_memory_layout();
+ return;
+
default:
argerrs++;
break;
diff --git a/memory.c b/memory.c
index 400d31a..55ed2f1 100644
--- a/memory.c
+++ b/memory.c
@@ -17657,6 +17657,17 @@ dumpfile_memory(int cmd)
return retval;
}
+#ifdef X86_64
+extern void x86_64_dump_memory_layout(void);
+#endif
+
+void dump_memory_layout(void)
+{
+#ifdef X86_64
+ x86_64_dump_memory_layout();
+#endif
+}
+
/*
* Functions for sparse mem support
*/
diff --git a/x86_64.c b/x86_64.c
index d7da536..b4d2821 100644
--- a/x86_64.c
+++ b/x86_64.c
@@ -1151,6 +1151,394 @@ x86_64_dump_machdep_table(ulong arg)
fprintf(fp, "excpetion_functions_orig\n");
}
+#define MAX_LAYOUT 32
+#define MAX_COL_LAYOUT 60
+struct mem_segment {
+ char name[64];
+ char desc[128];
+ unsigned long start;
+ unsigned long end;
+ int width;
+ char fill_char;
+};
+
+struct mem_layout {
+ int count;
+ int capacity;
+ struct mem_segment segs[MAX_LAYOUT];
+};
+
+char* format_bytes(unsigned long bytes, char* buffer, int buffer_size)
+{
+ const char* units[] = {"B", "KB", "MB", "GB", "TB"};
+ int i = 0;
+ double readable_size = (double)bytes;
+
+ /* Handle the edge case of zero bytes */
+ if (bytes == 0) {
+ snprintf(buffer, buffer_size, "0 B");
+ return buffer;
+ }
+
+ /* Handle negative values if necessary, though size is typically non-negative */
+ if (bytes < 0) {
+ snprintf(buffer, buffer_size, "Invalid size");
+ return buffer;
+ }
+
+ while (readable_size >= 1024 && i < (sizeof(units) / sizeof(units[0]) - 1)) {
+ readable_size /= 1024;
+ i++;
+ }
+
+ memset(buffer, '\0', buffer_size);
+ snprintf(buffer, buffer_size, "%.1f %s", readable_size, units[i]);
+
+ return buffer;
+}
+
+int compare_segments(const void *a, const void *b)
+{
+ const struct mem_segment *seg_a = (const struct mem_segment *)a;
+ const struct mem_segment *seg_b = (const struct mem_segment *)b;
+
+ if (seg_a->start > seg_b->start) return -1;
+ if (seg_a->start < seg_b->start) return 1;
+ return 0;
+}
+
+void* make_layout(struct mem_layout *layout, int max_row, int max_col)
+{
+ int col = MAX_COL_LAYOUT;
+ int row = max_row + 1;
+ char *layout_raw;
+ int i,j;
+ unsigned int cursor = 0;
+ int idx = 0;
+
+ if (max_col > col)
+ col = max_col;
+
+ layout_raw = (char *)malloc(row * col * sizeof(char));
+ memset(layout_raw, ' ', row * col * sizeof(char));
+ for (i=0; i<layout->count; i++) {
+ int center_bias = 0;
+ char fill = layout->segs[i].fill_char;
+
+ memset(layout_raw+cursor, '-', col);
+ layout_raw[cursor] = layout_raw[cursor+col-2] = '+';
+ layout_raw[cursor+col-1] = '\n';
+ cursor += col; /* next row */
+
+ memset(layout_raw+cursor, fill, col);
+ layout_raw[cursor] = '|';
+ layout_raw[cursor+col-2] = '|';
+ layout_raw[cursor+col-1] = '\n';
+ center_bias = (col - strlen(layout->segs[i].name)) / 2;
+ memcpy(layout_raw + cursor + center_bias, layout->segs[i].name,
+ strlen(layout->segs[i].name));
+ cursor += col; /* next row */
+
+ if (strlen(layout->segs[i].desc) != 0) {
+ memset(layout_raw+cursor, fill, col);
+ layout_raw[cursor] = '|';
+ layout_raw[cursor+col-2] = '|';
+ layout_raw[cursor+col-1] = '\n';
+
+ center_bias = (col - strlen(layout->segs[i].desc)) / 2;
+ memcpy(layout_raw + cursor + center_bias, layout->segs[i].desc,
+ strlen(layout->segs[i].desc));
+
+ cursor += col; /* next row */
+ } else {
+ /* It's a gap area. */
+ int width = layout->segs[i].width;
+
+ while(width--) {
+ memset(layout_raw+cursor, fill, col);
+ layout_raw[cursor] = '|';
+ layout_raw[cursor+col-2] = '|';
+ layout_raw[cursor+col-1] = '\n';
+ cursor += col; /* next row */
+ }
+ }
+
+ if (i == (layout->count - 1)) {
+ /* last line */
+ memset(layout_raw+cursor, '-', col);
+ layout_raw[cursor] = layout_raw[cursor+col-2] = '+';
+ layout_raw[cursor+col-1] = '\n';
+ layout_raw[cursor+col] = '\0';
+ }
+ }
+
+ return layout_raw;
+}
+
+void print_layout(struct mem_layout *layout)
+{
+ int max_col = 0;
+ int max_row = 0;
+ struct mem_segment *segs = layout->segs;
+ struct mem_segment seg;
+ int i, j;
+ char *layout_raw;
+ char *string;
+
+ if (layout == NULL)
+ return;
+
+ /* calculate the max col which can includes all 'desc' */
+ for (i=0; i<layout->count; i++) {
+ int col = 0;
+ int cursor = 0;
+ int row = 1; /* the minimal row */
+
+ seg = segs[i];
+ col = strlen(seg.name);
+
+ max_col = (max_col >= col) ?: col;
+ /* The gap area has no desc. */
+ if (seg.desc[0] != '\0') {
+ col = strlen(seg.desc);
+ row += 1;
+ max_col = (max_col >= col) ?: col;
+ } else
+ row += segs[i].width;
+
+ max_row += row;
+ }
+ /* add border line */
+ max_row += layout->count + 1;
+ max_col + 3;
+
+ layout_raw = make_layout(layout, max_row, max_col);
+ fprintf(fp, "%s", layout_raw);
+ free(layout_raw);
+}
+
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+/*
+ * table-based memory layout:
+ *
+ * +---------------------------------------------------------+
+ * |xxxxxxxxxxxxxxxxxxx gap (size: 8.0 MB) xxxxxxxxxxxxxxxxxx|
+ * +---------------------------------------------------------+
+ * | FIXMAP |
+ * | (0xffffffffff578000 - 0xffffffffff7ff000 size: 2.5 MB) |
+ * +---------------------------------------------------------+
+ * |xxxxxxxxxxxxxxxxxxx gap (size: 5.5 MB) xxxxxxxxxxxxxxxxxx|
+ * +---------------------------------------------------------+
+ * | Module Area |
+ * | (0xffffffffa0000000 - 0xffffffffff000000 size: 1.5 GB) |
+ * +---------------------------------------------------------+
+ * |xxxxxxxxxxxxxxxxxx gap (size: 451.9 MB) xxxxxxxxxxxxxxxxx|
+ * +---------------------------------------------------------+
+ * | kernel image |
+ * | (0xffffffff81000000 - 0xffffffff83c26000 size: 44.1 MB) |
+ * +---------------------------------------------------------+
+ * |xxxxxxxxxxxxxxxxxx gap (size: 21.0 TB) xxxxxxxxxxxxxxxxxx|
+ * |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ * |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ * +---------------------------------------------------------+
+ * | Vmemmap Area |
+ * | (0xffffea0000000000 - 0xffffeaffffffffff size: 1.0 TB) |
+ * +---------------------------------------------------------+
+ * |xxxxxxxxxxxxxxxxxxx gap (size: 1.0 TB) xxxxxxxxxxxxxxxxxx|
+ * |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ * +---------------------------------------------------------+
+ * | Vmalloc/Vfree Area |
+ * | (0xffffc90000000000 - 0xffffe8ffffffffff size: 32.0 TB) |
+ * +---------------------------------------------------------+
+ * |xxxxxxxxxxxxxxxxxx gap (size: 512.0 GB) xxxxxxxxxxxxxxxxx|
+ * +---------------------------------------------------------+
+ * | Linear Mapping Area |
+ * | (0xffff888000000000 - 0xffffc87fffffffff size: 64.0 TB) |
+ * +---------------------------------------------------------+
+ * | Kernel Space Offset |
+ * | (0xffff800000000000 - 0xffff887fffffffff size: 8.5 TB) |
+ * +---------------------------------------------------------+
+ * |xxxxxxxxxxxxxxx gap (size: 16776960.0 TB) xxxxxxxxxxxxxxx|
+ * |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ * |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ * +---------------------------------------------------------+
+ * | User Space |
+ * | (size: 128.0 TB) |
+ * +---------------------------------------------------------+
+ *
+ * kernel space:
+ * _end, _text
+ * vmemmap_end: ms->vmemmap_end
+ * vmemmap_vaddr: ms->vmemmap_vaddr
+ * vmalloc_end: ms->vmalloc_end
+ * vmalloc_start_addr: ms->vmalloc_start_addr
+ * page_offset_base: ms->page_offset
+ *
+ * user space:
+ * userspace_top: ms->userspace_top
+ *
+ */
+void x86_64_dump_memory_layout(void)
+{
+ struct mem_layout *layout = NULL;
+ ulong text_start, text_end;
+ struct machine_specific *ms = machdep->machspec;
+ int i, next_idx;
+ char size_buf[20];
+ long value = 0;
+
+ layout = malloc(sizeof(struct mem_layout));
+ if (layout == NULL || layout->count == 0) {
+ printf("Layout is empty, nothing to print.\n");
+ return;
+ }
+
+ /* Create a temporary copy to sort for printing, preserving the original order. */
+ struct mem_segment *sorted_segments = layout->segs;
+ if(!sorted_segments) {
+ perror("Failed to allocate memory for sorting");
+ return;
+ }
+
+ if (!symbol_exists("_text"))
+ return;
+ else
+ text_start = symbol_value("_text");
+
+ if (!symbol_exists("_end"))
+ return;
+ else
+ text_end = symbol_value("_end");
+
+ snprintf(sorted_segments[0].name, 64, "kernel image");
+ snprintf(sorted_segments[0].desc, 64, "(0x%lx - 0x%lx size: %s)",
+ text_start, text_end,
+ format_bytes(text_end - text_start + 1, size_buf, 20));
+ sorted_segments[0].start = text_start;
+ sorted_segments[0].end = text_end;
+ sorted_segments[0].fill_char = ' ';
+
+ snprintf(sorted_segments[1].name, 64, "Vmemmap Area");
+ snprintf(sorted_segments[1].desc, 64, "(0x%lx - 0x%lx size: %s)",
+ (ulong)ms->vmemmap_vaddr, (ulong)ms->vmemmap_end,
+ format_bytes(ms->vmemmap_end - ms->vmemmap_vaddr + 1, size_buf, 20));
+ sorted_segments[1].start = (ulong)ms->vmemmap_vaddr;
+ sorted_segments[1].end = (ulong)ms->vmemmap_end;
+ sorted_segments[1].fill_char = ' ';
+
+ snprintf(sorted_segments[2].name, 64, "Module Area");
+ snprintf(sorted_segments[2].desc, 64, "(0x%lx - 0x%lx size: %s)",
+ (ulong)ms->modules_vaddr,(ulong)ms->modules_end,
+ format_bytes(ms->modules_end - ms->modules_vaddr + 1, size_buf, 20));
+ sorted_segments[2].start = (ulong)ms->modules_vaddr;
+ sorted_segments[2].end = (ulong)ms->modules_end;
+ sorted_segments[2].fill_char = ' ';
+
+ snprintf(sorted_segments[3].name, 64, "Vmalloc/Vfree Area");
+ snprintf(sorted_segments[3].desc, 64, "(0x%lx - 0x%lx size: %s)",
+ (ulong)ms->vmalloc_start_addr, (ulong)ms->vmalloc_end,
+ format_bytes(ms->vmalloc_end - ms->vmalloc_start_addr + 1, size_buf, 20));
+ sorted_segments[3].start = (ulong)ms->vmalloc_start_addr;
+ sorted_segments[3].end = (ulong)ms->vmalloc_end;
+ sorted_segments[3].fill_char = ' ';
+
+ snprintf(sorted_segments[4].name, 64, "Linear Mapping Area");
+ sorted_segments[4].start = (ulong)ms->page_offset;
+ sorted_segments[4].end = (ulong)ms->page_offset + (1UL << machdep->max_physmem_bits) - 1;
+ sorted_segments[4].fill_char = ' ';
+ snprintf(sorted_segments[4].desc, 64, "(0x%lx - 0x%lx size: %s)",
+ sorted_segments[4].start, sorted_segments[4].end,
+ format_bytes(1UL << machdep->max_physmem_bits, size_buf, 20));
+
+ snprintf(sorted_segments[5].name, 64, "User Space");
+ snprintf(sorted_segments[5].desc, 64, "(size: %s)",
+ format_bytes((ulong)ms->userspace_top, size_buf, 20));
+ sorted_segments[5].start = 0UL;
+ sorted_segments[5].end = (ulong)ms->userspace_top - 1;
+ sorted_segments[5].fill_char = ' ';
+
+ snprintf(sorted_segments[6].name, 64, "Kernel Space Offset");
+ sorted_segments[6].start = -1UL - (1UL << __VIRTUAL_MASK_SHIFT) + 1;
+ sorted_segments[6].end = (ulong)ms->page_offset - 1;
+ sorted_segments[6].fill_char = ' ';
+ snprintf(sorted_segments[6].desc, 64, "(0x%lx - 0x%lx size: %s)",
+ sorted_segments[6].start, sorted_segments[6].end,
+ format_bytes(sorted_segments[6].end - sorted_segments[6].start + 1, size_buf, 20));
+
+ layout->count = 7;
+ if (kernel_symbol_exists("kasan_init")) {
+ snprintf(sorted_segments[7].name, 64, "KASAN");
+ sorted_segments[7].start = KASAN_SHADOW_START;
+ sorted_segments[7].end = KASAN_SHADOW_END;
+ sorted_segments[7].fill_char = ' ';
+ snprintf(sorted_segments[7].desc, 64, "(0x%lx - 0x%lx size: %s)",
+ sorted_segments[7].start, sorted_segments[7].end,
+ format_bytes(sorted_segments[7].end - sorted_segments[7].start + 1, size_buf, 20));
+ layout->count++;
+ }
+
+ if (enumerator_value("__end_of_permanent_fixed_addresses", &value)) {
+ unsigned fixaddr_size = 0;
+ int idx = layout->count;
+
+ fixaddr_size = value << PAGE_SHIFT;
+
+ snprintf(sorted_segments[7].name, 64, "FIXMAP");
+ sorted_segments[idx].end = round_up(VSYSCALL_START + PAGE_SIZE, 1 << PMD_SHIFT) - PAGE_SIZE;
+ sorted_segments[idx].start = sorted_segments[idx].end - fixaddr_size;
+
+ sorted_segments[idx].fill_char = ' ';
+ snprintf(sorted_segments[idx].desc, 64, "(0x%lx - 0x%lx size: %s)",
+ sorted_segments[idx].start, sorted_segments[idx].end,
+ format_bytes(sorted_segments[idx].end - sorted_segments[idx].start + 1, size_buf, 20));
+ layout->count++;
+ }
+
+ /* Sort segments from highest address to lowest. */
+ qsort(sorted_segments, layout->count, sizeof(struct mem_segment), compare_segments);
+
+ next_idx = layout->count;
+ /* Insert gap area */
+ for (i=0; i<layout->count; i++) {
+ unsigned long prev_start;
+ unsigned long end = sorted_segments[i].end;
+
+ if (i == 0)
+ prev_start = -1UL;
+ else
+ prev_start = sorted_segments[i-1].start;
+
+ if (prev_start == (end + 1))
+ continue;
+
+ if ((prev_start - end) >= (8UL * 1024 * 1024 * 1024 * 1024))
+ sorted_segments[next_idx].width = 3;
+ else if ((prev_start - end) >= (1UL * 1024 * 1024 * 1024 * 1024))
+ sorted_segments[next_idx].width = 2;
+ else
+ sorted_segments[next_idx].width = 1;
+
+ sorted_segments[next_idx].start = end + 1;
+ sorted_segments[next_idx].end = (i == 0) ? prev_start : prev_start - 1;
+ sorted_segments[next_idx].fill_char = 'x';
+ snprintf(sorted_segments[next_idx].name, 64, " gap (size: %s) ",
+ format_bytes(sorted_segments[next_idx].end - sorted_segments[next_idx].start + 1,
+ size_buf, 20));
+ sorted_segments[next_idx].desc[0] = '\0';
+
+ next_idx++;
+ }
+
+ layout->count = next_idx;
+ qsort(sorted_segments, layout->count, sizeof(struct mem_segment), compare_segments);
+
+ print_layout(layout);
+ free(layout);
+}
+
/*
* Gather the cpu_pda array info, updating any smp-related items that
* were possibly bypassed or improperly initialized in kernel_init().
--
2.39.3
1 week, 5 days
Re: [PATCH 1/2] vmware_vmss: support segment registers
by lijiang
Hi, Ajay
Thank you for the patch.
On Fri, Aug 29, 2025 at 2:16 PM <devel-request(a)lists.crash-utility.osci.io>
wrote:
> Date: Mon, 11 Aug 2025 05:56:22 +0000
> From: Ajay Kaher <ajay.kaher(a)broadcom.com>
> Subject: [Crash-utility] [PATCH 1/2] vmware_vmss: support segment
> registers
> To: devel(a)lists.crash-utility.osci.io
> Cc: alexey.makhalov(a)broadcom.com,
> vamsi-krishna.brahmajosyula(a)broadcom.com, tapas.kundu(a)broadcom.com
> ,
> ajay.kaher(a)broadcom.com
> Message-ID: <20250811055623.179491-1-ajay.kaher(a)broadcom.com>
>
> adding support for segment registers for vmware vmss dumps.
>
> Signed-off-by: Ajay Kaher <ajay.kaher(a)broadcom.com>
>
> ---
> vmware_guestdump.c | 2 +-
> vmware_vmss.c | 114 +++++++++++++++++++++++++++++++++++----------
> vmware_vmss.h | 92 +++++++++++++++++++++++++-----------
> 3 files changed, 154 insertions(+), 54 deletions(-)
>
>
The code looks good, but I saw a warning:
gcc -c -g -DX86_64 -DLZO -DGDB_16_2 vmware_vmss.c -Wall -O2
-Wstrict-prototypes -Wmissing-prototypes -fstack-protector
-Wformat-security
vmware_vmss.c: In function ‘dump_registers_for_vmss_dump’:
vmware_vmss.c:895:73: warning: format ‘%x’ expects argument of type
‘unsigned int’, but argument 3 has type ‘uint64_t’ {aka ‘long unsigned
int’} [-Wformat=]
895 | fprintf(fp, "Missing registers for this
CPU: 0x%x\n", vmss.vcpu_regs[i]);
|
~^ ~~~~~~~~~~~~~~~~~
|
| |
|
unsigned int uint64_t {aka long unsigned int}
|
%lx
BTW: I can not test the patch, and it relies on your testing.
Thanks
Lianbo
diff --git a/vmware_guestdump.c b/vmware_guestdump.c
> index 78f37fb..d515df5 100644
> --- a/vmware_guestdump.c
> +++ b/vmware_guestdump.c
> @@ -320,7 +320,7 @@ vmware_guestdump_init(char *filename, FILE *ofp)
> goto exit;
> }
>
> - vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint32_t));
> + vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint64_t));
> vmss.regs64 = calloc(vmss.num_vcpus, sizeof(void *));
> if (!vmss.vcpu_regs || !vmss.regs64) {
> error(INFO, LOGPRX"Failed to allocate memory\n");
> diff --git a/vmware_vmss.c b/vmware_vmss.c
> index 8121ab6..1a71d02 100644
> --- a/vmware_vmss.c
> +++ b/vmware_vmss.c
> @@ -317,7 +317,7 @@ vmware_vmss_init(char *filename, FILE *ofp)
>
> vmss.num_vcpus = u.val32;
> vmss.regs64 =
> malloc(vmss.num_vcpus * sizeof(void *));
> - vmss.vcpu_regs =
> malloc(vmss.num_vcpus * sizeof(uint32_t));
> + vmss.vcpu_regs =
> malloc(vmss.num_vcpus * sizeof(uint64_t));
>
> for (k = 0; k <
> vmss.num_vcpus; k++) {
> vmss.regs64[k] =
> malloc(sizeof(vmssregs64));
> @@ -432,15 +432,65 @@ vmware_vmss_init(char *filename, FILE *ofp)
> int cpu = idx[0];
> vmss.regs64[cpu]->rflags
> |= u.val32;
> vmss.vcpu_regs[cpu] |=
> REGS_PRESENT_RFLAGS;
> + } else if (strcmp(name,
> "S.base64") == 0) {
> + int cpu = idx[0];
> + int seg_index = idx[1];
> + switch (seg_index) {
> + case SEG_FS:
> +
> vmss.regs64[cpu]->fs_base = u.val64;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_FS_BASE;
> + break;
> + case SEG_GS:
> +
> vmss.regs64[cpu]->gs_base = u.val64;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_GS_BASE;
> + break;
> + }
> + } else if (strcmp(name, "S") == 0)
> {
> + int cpu = idx[0];
> + int seg_index = idx[1];
> + switch (seg_index) {
> + case SEG_ES:
> +
> vmss.regs64[cpu]->es = u.val32;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_ES;
> + break;
> + case SEG_CS:
> +
> vmss.regs64[cpu]->cs = u.val32;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_CS;
> + break;
> + case SEG_SS:
> +
> vmss.regs64[cpu]->ss = u.val32;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_SS;
> + break;
> + case SEG_DS:
> +
> vmss.regs64[cpu]->ds = u.val32;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_DS;
> + break;
> + case SEG_FS:
> +
> vmss.regs64[cpu]->fs = u.val32;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_FS;
> + break;
> + case SEG_GS:
> +
> vmss.regs64[cpu]->gs = u.val32;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_GS;
> + break;
> + case SEG_LDTR:
> +
> vmss.regs64[cpu]->ldtr = u.val32;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_LDTR;
> + break;
> + case SEG_TR:
> +
> vmss.regs64[cpu]->tr = u.val32;
> +
> vmss.vcpu_regs[cpu] |= REGS_PRESENT_TR;
> + break;
> + default:
> +
> error(INFO, "Unknown VMSS Segment [%d][%d]\n", cpu, seg_index);
> + }
> }
> }
> -
> DEBUG_PARSE_PRINT((ofp, "\n"));
> }
> }
> }
>
> -
> if (vmss.memsize == 0) {
> char *vmem_filename, *p;
>
> @@ -902,36 +952,50 @@ vmware_vmss_get_cpu_reg(int cpu, int regno, const
> char *name, int size,
> if (cpu >= vmss.num_vcpus)
> return FALSE;
>
> - /* All supported registers are 8 bytes long. */
> - if (size != 8)
> - return FALSE;
> -
> -#define CASE(R,r) \
> +#define CASE_32(R,r) \
> case R##_REGNUM: \
> + if (size != 4) \
> + return FALSE; \
> if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_##R)) \
> return FALSE; \
> memcpy(value, &vmss.regs64[cpu]->r, size); \
> break
>
> +#define CASE_64(R,r) \
> + case R##_REGNUM: \
> + if (size != 8) \
> + return FALSE; \
> + if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_##R)) \
> + return FALSE; \
> + memcpy(value, &vmss.regs64[cpu]->r, size); \
> + break
>
> switch (regno) {
> - CASE (RAX, rax);
> - CASE (RBX, rbx);
> - CASE (RCX, rcx);
> - CASE (RDX, rdx);
> - CASE (RSI, rsi);
> - CASE (RDI, rdi);
> - CASE (RBP, rbp);
> - CASE (RSP, rsp);
> - CASE (R8, r8);
> - CASE (R9, r9);
> - CASE (R10, r10);
> - CASE (R11, r11);
> - CASE (R12, r12);
> - CASE (R13, r13);
> - CASE (R14, r14);
> - CASE (R15, r15);
> - CASE (RIP, rip);
> + CASE_64 (RAX, rax);
> + CASE_64 (RBX, rbx);
> + CASE_64 (RCX, rcx);
> + CASE_64 (RDX, rdx);
> + CASE_64 (RSI, rsi);
> + CASE_64 (RDI, rdi);
> + CASE_64 (RBP, rbp);
> + CASE_64 (RSP, rsp);
> + CASE_64 (R8, r8);
> + CASE_64 (R9, r9);
> + CASE_64 (R10, r10);
> + CASE_64 (R11, r11);
> + CASE_64 (R12, r12);
> + CASE_64 (R13, r13);
> + CASE_64 (R14, r14);
> + CASE_64 (R15, r15);
> + CASE_64 (RIP, rip);
> + CASE_32 (ES, es);
> + CASE_32 (CS, cs);
> + CASE_32 (SS, ss);
> + CASE_32 (DS, ds);
> + CASE_32 (FS, fs);
> + CASE_32 (GS, gs);
> + CASE_64 (FS_BASE, fs_base);
> + CASE_64 (GS_BASE, gs_base);
> case EFLAGS_REGNUM:
> if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_RFLAGS))
> return FALSE;
> diff --git a/vmware_vmss.h b/vmware_vmss.h
> index 01d9446..5bc0370 100644
> --- a/vmware_vmss.h
> +++ b/vmware_vmss.h
> @@ -110,41 +110,77 @@ struct vmssregs64 {
> uint64_t r13;
> uint64_t r14;
> uint64_t r15;
> + uint64_t es;
> + uint64_t cs;
> + uint64_t ss;
> + uint64_t ds;
> + uint64_t fs;
> + uint64_t gs;
> + uint64_t ldtr;
> + uint64_t tr;
> +
> /* manually managed */
> uint64_t idtr;
> uint64_t cr[VMW_CR64_SIZE / 8];
> uint64_t rip;
> uint64_t rflags;
> + uint64_t fs_base;
> + uint64_t gs_base;
> };
> typedef struct vmssregs64 vmssregs64;
>
> -#define REGS_PRESENT_RAX 1<<0
> -#define REGS_PRESENT_RCX 1<<1
> -#define REGS_PRESENT_RDX 1<<2
> -#define REGS_PRESENT_RBX 1<<3
> -#define REGS_PRESENT_RBP 1<<4
> -#define REGS_PRESENT_RSP 1<<5
> -#define REGS_PRESENT_RSI 1<<6
> -#define REGS_PRESENT_RDI 1<<7
> -#define REGS_PRESENT_R8 1<<8
> -#define REGS_PRESENT_R9 1<<9
> -#define REGS_PRESENT_R10 1<<10
> -#define REGS_PRESENT_R11 1<<11
> -#define REGS_PRESENT_R12 1<<12
> -#define REGS_PRESENT_R13 1<<13
> -#define REGS_PRESENT_R14 1<<14
> -#define REGS_PRESENT_R15 1<<15
> -#define REGS_PRESENT_IDTR 1<<16
> -#define REGS_PRESENT_CR0 1<<17
> -#define REGS_PRESENT_CR1 1<<18
> -#define REGS_PRESENT_CR2 1<<19
> -#define REGS_PRESENT_CR3 1<<20
> -#define REGS_PRESENT_CR4 1<<21
> -#define REGS_PRESENT_RIP 1<<22
> -#define REGS_PRESENT_RFLAGS 1<<23
> -#define REGS_PRESENT_GPREGS 65535
> -#define REGS_PRESENT_CRS 4063232
> -#define REGS_PRESENT_ALL 16777215
> +typedef enum SegmentName {
> + SEG_ES,
> + SEG_CS,
> + SEG_SS,
> + SEG_DS,
> + SEG_FS,
> + SEG_GS,
> + SEG_LDTR,
> + SEG_TR,
> + NUM_SEGS
> +} SegmentName;
> +
> +#define REGS_PRESENT_RAX 1L<<0
> +#define REGS_PRESENT_RCX 1L<<1
> +#define REGS_PRESENT_RDX 1L<<2
> +#define REGS_PRESENT_RBX 1L<<3
> +#define REGS_PRESENT_RBP 1L<<4
> +#define REGS_PRESENT_RSP 1L<<5
> +#define REGS_PRESENT_RSI 1L<<6
> +#define REGS_PRESENT_RDI 1L<<7
> +#define REGS_PRESENT_R8 1L<<8
> +#define REGS_PRESENT_R9 1L<<9
> +#define REGS_PRESENT_R10 1L<<10
> +#define REGS_PRESENT_R11 1L<<11
> +#define REGS_PRESENT_R12 1L<<12
> +#define REGS_PRESENT_R13 1L<<13
> +#define REGS_PRESENT_R14 1L<<14
> +#define REGS_PRESENT_R15 1L<<15
> +#define REGS_PRESENT_IDTR 1L<<16
> +#define REGS_PRESENT_CR0 1L<<17
> +#define REGS_PRESENT_CR1 1L<<18
> +#define REGS_PRESENT_CR2 1L<<19
> +#define REGS_PRESENT_CR3 1L<<20
> +#define REGS_PRESENT_CR4 1L<<21
> +#define REGS_PRESENT_RIP 1L<<22
> +#define REGS_PRESENT_RFLAGS 1L<<23
> +
> +#define REGS_PRESENT_ES 1L<<24
> +#define REGS_PRESENT_CS 1L<<25
> +#define REGS_PRESENT_SS 1L<<26
> +#define REGS_PRESENT_DS 1L<<27
> +#define REGS_PRESENT_FS 1L<<28
> +#define REGS_PRESENT_GS 1L<<29
> +#define REGS_PRESENT_LDTR 1L<<30
> +#define REGS_PRESENT_TR 1L<<31
> +#define REGS_PRESENT_FS_BASE 1L<<32
> +#define REGS_PRESENT_GS_BASE 1L<<33
> +
> +#define REGS_PRESENT_GPREGS 0x000000000000FFFF
> +#define REGS_PRESENT_CRS 0x00000000003E0000
> +#define REGS_PRESENT_SEG 0x00000003FF000000
> +#define REGS_PRESENT_ALL 0x00000003FFFFFFFF
>
> #define MAX_REGIONS 3
> struct vmssdata {
> @@ -159,7 +195,7 @@ struct vmssdata {
> uint64_t memsize;
> ulong phys_base;
> int separate_vmem;
> - uint32_t *vcpu_regs;
> + uint64_t *vcpu_regs;
> uint64_t num_vcpus;
> vmssregs64 **regs64;
> };
> --
> 2.40.4
>
2 weeks, 4 days
Re: [PATCH 2/2] vmware_guestdump: support segment registers
by lijiang
On Fri, Aug 29, 2025 at 2:16 PM <devel-request(a)lists.crash-utility.osci.io>
wrote:
> Date: Mon, 11 Aug 2025 05:56:23 +0000
> From: Ajay Kaher <ajay.kaher(a)broadcom.com>
> Subject: [Crash-utility] [PATCH 2/2] vmware_guestdump: support segment
> registers
> To: devel(a)lists.crash-utility.osci.io
> Cc: alexey.makhalov(a)broadcom.com,
> vamsi-krishna.brahmajosyula(a)broadcom.com, tapas.kundu(a)broadcom.com
> ,
> ajay.kaher(a)broadcom.com
> Message-ID: <20250811055623.179491-2-ajay.kaher(a)broadcom.com>
>
> adding support for segment registers for vmware guest dumps.
>
> Signed-off-by: Ajay Kaher <ajay.kaher(a)broadcom.com>
>
> ---
> vmware_guestdump.c | 17 +++++++++++++++--
> 1 file changed, 15 insertions(+), 2 deletions(-)
>
>
when I tried to apply the patch[2], it failed with the following error. Do
you encounter the same issue? Tao.
$ git am 2.patch
Applying: vmware_guestdump: support segment registers
error: patch failed: vmware_guestdump.c:107
error: vmware_guestdump.c: patch does not apply
Patch failed at 0001 vmware_guestdump: support segment registers
hint: Use 'git am --show-current-patch=diff' to see the failed patch
hint: When you have resolved this problem, run "git am --continue".
hint: If you prefer to skip this patch, run "git am --skip" instead.
hint: To restore the original branch and stop patching, run "git am
--abort".
hint: Disable this message with "git config set advice.mergeConflict false"
Thanks
Lianbo
> diff --git a/vmware_guestdump.c b/vmware_guestdump.c
> index d515df5..52a1623 100644
> --- a/vmware_guestdump.c
> +++ b/vmware_guestdump.c
> @@ -107,8 +107,14 @@ struct vcpu_state2 {
> uint64_t eflags;
> uint64_t rsp;
> uint64_t ss;
> + uint64_t fs_base;
> + uint64_t gs_base;
> + uint64_t ds;
> + uint64_t es;
> + uint64_t fs;
> + uint64_t gs;
> } regs64;
> - uint8_t reserved3[65];
> + uint8_t reserved3[17];
> } __attribute__((packed));
>
> /*
> @@ -378,7 +384,14 @@ vmware_guestdump_init(char *filename, FILE *ofp)
> vmss.regs64[i]->cr[4] = vs1.cr4;
> vmss.regs64[i]->rip = vs2.regs64.rip;
> vmss.regs64[i]->rflags = vs2.regs64.eflags;
> -
> + vmss.regs64[i]->es = vs2.regs64.es;
> + vmss.regs64[i]->cs = vs2.regs64.cs;
> + vmss.regs64[i]->ss = vs2.regs64.ss;
> + vmss.regs64[i]->ds = vs2.regs64.ds;
> + vmss.regs64[i]->fs = vs2.regs64.fs;
> + vmss.regs64[i]->gs = vs2.regs64.gs;
> + vmss.regs64[i]->fs_base = vs2.regs64.fs_base;
> + vmss.regs64[i]->gs_base = vs2.regs64.gs_base;
> vmss.vcpu_regs[i] = REGS_PRESENT_ALL;
> }
>
> --
> 2.40.4
>
2 weeks, 4 days
[PATCH v2 1/2] vmware_vmss: support segment registers
by Ajay Kaher
adding support for segment registers for vmware vmss dumps.
diff from v1:
- modified dump_registers_for_vmss_dump() to remove compiler warning.
Signed-off-by: Ajay Kaher <ajay.kaher(a)broadcom.com>
---
vmware_guestdump.c | 2 +-
vmware_vmss.c | 116 +++++++++++++++++++++++++++++++++++----------
vmware_vmss.h | 92 ++++++++++++++++++++++++-----------
3 files changed, 155 insertions(+), 55 deletions(-)
diff --git a/vmware_guestdump.c b/vmware_guestdump.c
index 1a6ef9b..dc10d42 100644
--- a/vmware_guestdump.c
+++ b/vmware_guestdump.c
@@ -360,7 +360,7 @@ vmware_guestdump_init(char *filename, FILE *ofp)
goto exit;
}
- vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint32_t));
+ vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint64_t));
vmss.regs64 = calloc(vmss.num_vcpus, sizeof(void *));
if (!vmss.vcpu_regs || !vmss.regs64) {
error(INFO, LOGPRX"Failed to allocate memory\n");
diff --git a/vmware_vmss.c b/vmware_vmss.c
index 8121ab6..3f9ad33 100644
--- a/vmware_vmss.c
+++ b/vmware_vmss.c
@@ -317,7 +317,7 @@ vmware_vmss_init(char *filename, FILE *ofp)
vmss.num_vcpus = u.val32;
vmss.regs64 = malloc(vmss.num_vcpus * sizeof(void *));
- vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint32_t));
+ vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint64_t));
for (k = 0; k < vmss.num_vcpus; k++) {
vmss.regs64[k] = malloc(sizeof(vmssregs64));
@@ -432,15 +432,65 @@ vmware_vmss_init(char *filename, FILE *ofp)
int cpu = idx[0];
vmss.regs64[cpu]->rflags |= u.val32;
vmss.vcpu_regs[cpu] |= REGS_PRESENT_RFLAGS;
+ } else if (strcmp(name, "S.base64") == 0) {
+ int cpu = idx[0];
+ int seg_index = idx[1];
+ switch (seg_index) {
+ case SEG_FS:
+ vmss.regs64[cpu]->fs_base = u.val64;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_FS_BASE;
+ break;
+ case SEG_GS:
+ vmss.regs64[cpu]->gs_base = u.val64;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_GS_BASE;
+ break;
+ }
+ } else if (strcmp(name, "S") == 0) {
+ int cpu = idx[0];
+ int seg_index = idx[1];
+ switch (seg_index) {
+ case SEG_ES:
+ vmss.regs64[cpu]->es = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_ES;
+ break;
+ case SEG_CS:
+ vmss.regs64[cpu]->cs = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_CS;
+ break;
+ case SEG_SS:
+ vmss.regs64[cpu]->ss = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_SS;
+ break;
+ case SEG_DS:
+ vmss.regs64[cpu]->ds = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_DS;
+ break;
+ case SEG_FS:
+ vmss.regs64[cpu]->fs = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_FS;
+ break;
+ case SEG_GS:
+ vmss.regs64[cpu]->gs = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_GS;
+ break;
+ case SEG_LDTR:
+ vmss.regs64[cpu]->ldtr = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_LDTR;
+ break;
+ case SEG_TR:
+ vmss.regs64[cpu]->tr = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_TR;
+ break;
+ default:
+ error(INFO, "Unknown VMSS Segment [%d][%d]\n", cpu, seg_index);
+ }
}
}
-
DEBUG_PARSE_PRINT((ofp, "\n"));
}
}
}
-
if (vmss.memsize == 0) {
char *vmem_filename, *p;
@@ -842,7 +892,7 @@ dump_registers_for_vmss_dump(void)
fprintf(fp, "CPU %d:\n", i);
if (vmss.vcpu_regs[i] != REGS_PRESENT_ALL) {
- fprintf(fp, "Missing registers for this CPU: 0x%x\n", vmss.vcpu_regs[i]);
+ fprintf(fp, "Missing registers for this CPU: 0x%lx\n", vmss.vcpu_regs[i]);
continue;
}
@@ -902,36 +952,50 @@ vmware_vmss_get_cpu_reg(int cpu, int regno, const char *name, int size,
if (cpu >= vmss.num_vcpus)
return FALSE;
- /* All supported registers are 8 bytes long. */
- if (size != 8)
- return FALSE;
-
-#define CASE(R,r) \
+#define CASE_32(R,r) \
case R##_REGNUM: \
+ if (size != 4) \
+ return FALSE; \
if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_##R)) \
return FALSE; \
memcpy(value, &vmss.regs64[cpu]->r, size); \
break
+#define CASE_64(R,r) \
+ case R##_REGNUM: \
+ if (size != 8) \
+ return FALSE; \
+ if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_##R)) \
+ return FALSE; \
+ memcpy(value, &vmss.regs64[cpu]->r, size); \
+ break
switch (regno) {
- CASE (RAX, rax);
- CASE (RBX, rbx);
- CASE (RCX, rcx);
- CASE (RDX, rdx);
- CASE (RSI, rsi);
- CASE (RDI, rdi);
- CASE (RBP, rbp);
- CASE (RSP, rsp);
- CASE (R8, r8);
- CASE (R9, r9);
- CASE (R10, r10);
- CASE (R11, r11);
- CASE (R12, r12);
- CASE (R13, r13);
- CASE (R14, r14);
- CASE (R15, r15);
- CASE (RIP, rip);
+ CASE_64 (RAX, rax);
+ CASE_64 (RBX, rbx);
+ CASE_64 (RCX, rcx);
+ CASE_64 (RDX, rdx);
+ CASE_64 (RSI, rsi);
+ CASE_64 (RDI, rdi);
+ CASE_64 (RBP, rbp);
+ CASE_64 (RSP, rsp);
+ CASE_64 (R8, r8);
+ CASE_64 (R9, r9);
+ CASE_64 (R10, r10);
+ CASE_64 (R11, r11);
+ CASE_64 (R12, r12);
+ CASE_64 (R13, r13);
+ CASE_64 (R14, r14);
+ CASE_64 (R15, r15);
+ CASE_64 (RIP, rip);
+ CASE_32 (ES, es);
+ CASE_32 (CS, cs);
+ CASE_32 (SS, ss);
+ CASE_32 (DS, ds);
+ CASE_32 (FS, fs);
+ CASE_32 (GS, gs);
+ CASE_64 (FS_BASE, fs_base);
+ CASE_64 (GS_BASE, gs_base);
case EFLAGS_REGNUM:
if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_RFLAGS))
return FALSE;
diff --git a/vmware_vmss.h b/vmware_vmss.h
index 01d9446..5bc0370 100644
--- a/vmware_vmss.h
+++ b/vmware_vmss.h
@@ -110,41 +110,77 @@ struct vmssregs64 {
uint64_t r13;
uint64_t r14;
uint64_t r15;
+ uint64_t es;
+ uint64_t cs;
+ uint64_t ss;
+ uint64_t ds;
+ uint64_t fs;
+ uint64_t gs;
+ uint64_t ldtr;
+ uint64_t tr;
+
/* manually managed */
uint64_t idtr;
uint64_t cr[VMW_CR64_SIZE / 8];
uint64_t rip;
uint64_t rflags;
+ uint64_t fs_base;
+ uint64_t gs_base;
};
typedef struct vmssregs64 vmssregs64;
-#define REGS_PRESENT_RAX 1<<0
-#define REGS_PRESENT_RCX 1<<1
-#define REGS_PRESENT_RDX 1<<2
-#define REGS_PRESENT_RBX 1<<3
-#define REGS_PRESENT_RBP 1<<4
-#define REGS_PRESENT_RSP 1<<5
-#define REGS_PRESENT_RSI 1<<6
-#define REGS_PRESENT_RDI 1<<7
-#define REGS_PRESENT_R8 1<<8
-#define REGS_PRESENT_R9 1<<9
-#define REGS_PRESENT_R10 1<<10
-#define REGS_PRESENT_R11 1<<11
-#define REGS_PRESENT_R12 1<<12
-#define REGS_PRESENT_R13 1<<13
-#define REGS_PRESENT_R14 1<<14
-#define REGS_PRESENT_R15 1<<15
-#define REGS_PRESENT_IDTR 1<<16
-#define REGS_PRESENT_CR0 1<<17
-#define REGS_PRESENT_CR1 1<<18
-#define REGS_PRESENT_CR2 1<<19
-#define REGS_PRESENT_CR3 1<<20
-#define REGS_PRESENT_CR4 1<<21
-#define REGS_PRESENT_RIP 1<<22
-#define REGS_PRESENT_RFLAGS 1<<23
-#define REGS_PRESENT_GPREGS 65535
-#define REGS_PRESENT_CRS 4063232
-#define REGS_PRESENT_ALL 16777215
+typedef enum SegmentName {
+ SEG_ES,
+ SEG_CS,
+ SEG_SS,
+ SEG_DS,
+ SEG_FS,
+ SEG_GS,
+ SEG_LDTR,
+ SEG_TR,
+ NUM_SEGS
+} SegmentName;
+
+#define REGS_PRESENT_RAX 1L<<0
+#define REGS_PRESENT_RCX 1L<<1
+#define REGS_PRESENT_RDX 1L<<2
+#define REGS_PRESENT_RBX 1L<<3
+#define REGS_PRESENT_RBP 1L<<4
+#define REGS_PRESENT_RSP 1L<<5
+#define REGS_PRESENT_RSI 1L<<6
+#define REGS_PRESENT_RDI 1L<<7
+#define REGS_PRESENT_R8 1L<<8
+#define REGS_PRESENT_R9 1L<<9
+#define REGS_PRESENT_R10 1L<<10
+#define REGS_PRESENT_R11 1L<<11
+#define REGS_PRESENT_R12 1L<<12
+#define REGS_PRESENT_R13 1L<<13
+#define REGS_PRESENT_R14 1L<<14
+#define REGS_PRESENT_R15 1L<<15
+#define REGS_PRESENT_IDTR 1L<<16
+#define REGS_PRESENT_CR0 1L<<17
+#define REGS_PRESENT_CR1 1L<<18
+#define REGS_PRESENT_CR2 1L<<19
+#define REGS_PRESENT_CR3 1L<<20
+#define REGS_PRESENT_CR4 1L<<21
+#define REGS_PRESENT_RIP 1L<<22
+#define REGS_PRESENT_RFLAGS 1L<<23
+
+#define REGS_PRESENT_ES 1L<<24
+#define REGS_PRESENT_CS 1L<<25
+#define REGS_PRESENT_SS 1L<<26
+#define REGS_PRESENT_DS 1L<<27
+#define REGS_PRESENT_FS 1L<<28
+#define REGS_PRESENT_GS 1L<<29
+#define REGS_PRESENT_LDTR 1L<<30
+#define REGS_PRESENT_TR 1L<<31
+#define REGS_PRESENT_FS_BASE 1L<<32
+#define REGS_PRESENT_GS_BASE 1L<<33
+
+#define REGS_PRESENT_GPREGS 0x000000000000FFFF
+#define REGS_PRESENT_CRS 0x00000000003E0000
+#define REGS_PRESENT_SEG 0x00000003FF000000
+#define REGS_PRESENT_ALL 0x00000003FFFFFFFF
#define MAX_REGIONS 3
struct vmssdata {
@@ -159,7 +195,7 @@ struct vmssdata {
uint64_t memsize;
ulong phys_base;
int separate_vmem;
- uint32_t *vcpu_regs;
+ uint64_t *vcpu_regs;
uint64_t num_vcpus;
vmssregs64 **regs64;
};
--
2.40.4
2 weeks, 6 days
[PATCH 1/2] vmware_vmss: support segment registers
by Ajay Kaher
adding support for segment registers for vmware vmss dumps.
Signed-off-by: Ajay Kaher <ajay.kaher(a)broadcom.com>
---
vmware_guestdump.c | 2 +-
vmware_vmss.c | 114 +++++++++++++++++++++++++++++++++++----------
vmware_vmss.h | 92 +++++++++++++++++++++++++-----------
3 files changed, 154 insertions(+), 54 deletions(-)
diff --git a/vmware_guestdump.c b/vmware_guestdump.c
index 78f37fb..d515df5 100644
--- a/vmware_guestdump.c
+++ b/vmware_guestdump.c
@@ -320,7 +320,7 @@ vmware_guestdump_init(char *filename, FILE *ofp)
goto exit;
}
- vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint32_t));
+ vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint64_t));
vmss.regs64 = calloc(vmss.num_vcpus, sizeof(void *));
if (!vmss.vcpu_regs || !vmss.regs64) {
error(INFO, LOGPRX"Failed to allocate memory\n");
diff --git a/vmware_vmss.c b/vmware_vmss.c
index 8121ab6..1a71d02 100644
--- a/vmware_vmss.c
+++ b/vmware_vmss.c
@@ -317,7 +317,7 @@ vmware_vmss_init(char *filename, FILE *ofp)
vmss.num_vcpus = u.val32;
vmss.regs64 = malloc(vmss.num_vcpus * sizeof(void *));
- vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint32_t));
+ vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint64_t));
for (k = 0; k < vmss.num_vcpus; k++) {
vmss.regs64[k] = malloc(sizeof(vmssregs64));
@@ -432,15 +432,65 @@ vmware_vmss_init(char *filename, FILE *ofp)
int cpu = idx[0];
vmss.regs64[cpu]->rflags |= u.val32;
vmss.vcpu_regs[cpu] |= REGS_PRESENT_RFLAGS;
+ } else if (strcmp(name, "S.base64") == 0) {
+ int cpu = idx[0];
+ int seg_index = idx[1];
+ switch (seg_index) {
+ case SEG_FS:
+ vmss.regs64[cpu]->fs_base = u.val64;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_FS_BASE;
+ break;
+ case SEG_GS:
+ vmss.regs64[cpu]->gs_base = u.val64;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_GS_BASE;
+ break;
+ }
+ } else if (strcmp(name, "S") == 0) {
+ int cpu = idx[0];
+ int seg_index = idx[1];
+ switch (seg_index) {
+ case SEG_ES:
+ vmss.regs64[cpu]->es = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_ES;
+ break;
+ case SEG_CS:
+ vmss.regs64[cpu]->cs = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_CS;
+ break;
+ case SEG_SS:
+ vmss.regs64[cpu]->ss = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_SS;
+ break;
+ case SEG_DS:
+ vmss.regs64[cpu]->ds = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_DS;
+ break;
+ case SEG_FS:
+ vmss.regs64[cpu]->fs = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_FS;
+ break;
+ case SEG_GS:
+ vmss.regs64[cpu]->gs = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_GS;
+ break;
+ case SEG_LDTR:
+ vmss.regs64[cpu]->ldtr = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_LDTR;
+ break;
+ case SEG_TR:
+ vmss.regs64[cpu]->tr = u.val32;
+ vmss.vcpu_regs[cpu] |= REGS_PRESENT_TR;
+ break;
+ default:
+ error(INFO, "Unknown VMSS Segment [%d][%d]\n", cpu, seg_index);
+ }
}
}
-
DEBUG_PARSE_PRINT((ofp, "\n"));
}
}
}
-
if (vmss.memsize == 0) {
char *vmem_filename, *p;
@@ -902,36 +952,50 @@ vmware_vmss_get_cpu_reg(int cpu, int regno, const char *name, int size,
if (cpu >= vmss.num_vcpus)
return FALSE;
- /* All supported registers are 8 bytes long. */
- if (size != 8)
- return FALSE;
-
-#define CASE(R,r) \
+#define CASE_32(R,r) \
case R##_REGNUM: \
+ if (size != 4) \
+ return FALSE; \
if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_##R)) \
return FALSE; \
memcpy(value, &vmss.regs64[cpu]->r, size); \
break
+#define CASE_64(R,r) \
+ case R##_REGNUM: \
+ if (size != 8) \
+ return FALSE; \
+ if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_##R)) \
+ return FALSE; \
+ memcpy(value, &vmss.regs64[cpu]->r, size); \
+ break
switch (regno) {
- CASE (RAX, rax);
- CASE (RBX, rbx);
- CASE (RCX, rcx);
- CASE (RDX, rdx);
- CASE (RSI, rsi);
- CASE (RDI, rdi);
- CASE (RBP, rbp);
- CASE (RSP, rsp);
- CASE (R8, r8);
- CASE (R9, r9);
- CASE (R10, r10);
- CASE (R11, r11);
- CASE (R12, r12);
- CASE (R13, r13);
- CASE (R14, r14);
- CASE (R15, r15);
- CASE (RIP, rip);
+ CASE_64 (RAX, rax);
+ CASE_64 (RBX, rbx);
+ CASE_64 (RCX, rcx);
+ CASE_64 (RDX, rdx);
+ CASE_64 (RSI, rsi);
+ CASE_64 (RDI, rdi);
+ CASE_64 (RBP, rbp);
+ CASE_64 (RSP, rsp);
+ CASE_64 (R8, r8);
+ CASE_64 (R9, r9);
+ CASE_64 (R10, r10);
+ CASE_64 (R11, r11);
+ CASE_64 (R12, r12);
+ CASE_64 (R13, r13);
+ CASE_64 (R14, r14);
+ CASE_64 (R15, r15);
+ CASE_64 (RIP, rip);
+ CASE_32 (ES, es);
+ CASE_32 (CS, cs);
+ CASE_32 (SS, ss);
+ CASE_32 (DS, ds);
+ CASE_32 (FS, fs);
+ CASE_32 (GS, gs);
+ CASE_64 (FS_BASE, fs_base);
+ CASE_64 (GS_BASE, gs_base);
case EFLAGS_REGNUM:
if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_RFLAGS))
return FALSE;
diff --git a/vmware_vmss.h b/vmware_vmss.h
index 01d9446..5bc0370 100644
--- a/vmware_vmss.h
+++ b/vmware_vmss.h
@@ -110,41 +110,77 @@ struct vmssregs64 {
uint64_t r13;
uint64_t r14;
uint64_t r15;
+ uint64_t es;
+ uint64_t cs;
+ uint64_t ss;
+ uint64_t ds;
+ uint64_t fs;
+ uint64_t gs;
+ uint64_t ldtr;
+ uint64_t tr;
+
/* manually managed */
uint64_t idtr;
uint64_t cr[VMW_CR64_SIZE / 8];
uint64_t rip;
uint64_t rflags;
+ uint64_t fs_base;
+ uint64_t gs_base;
};
typedef struct vmssregs64 vmssregs64;
-#define REGS_PRESENT_RAX 1<<0
-#define REGS_PRESENT_RCX 1<<1
-#define REGS_PRESENT_RDX 1<<2
-#define REGS_PRESENT_RBX 1<<3
-#define REGS_PRESENT_RBP 1<<4
-#define REGS_PRESENT_RSP 1<<5
-#define REGS_PRESENT_RSI 1<<6
-#define REGS_PRESENT_RDI 1<<7
-#define REGS_PRESENT_R8 1<<8
-#define REGS_PRESENT_R9 1<<9
-#define REGS_PRESENT_R10 1<<10
-#define REGS_PRESENT_R11 1<<11
-#define REGS_PRESENT_R12 1<<12
-#define REGS_PRESENT_R13 1<<13
-#define REGS_PRESENT_R14 1<<14
-#define REGS_PRESENT_R15 1<<15
-#define REGS_PRESENT_IDTR 1<<16
-#define REGS_PRESENT_CR0 1<<17
-#define REGS_PRESENT_CR1 1<<18
-#define REGS_PRESENT_CR2 1<<19
-#define REGS_PRESENT_CR3 1<<20
-#define REGS_PRESENT_CR4 1<<21
-#define REGS_PRESENT_RIP 1<<22
-#define REGS_PRESENT_RFLAGS 1<<23
-#define REGS_PRESENT_GPREGS 65535
-#define REGS_PRESENT_CRS 4063232
-#define REGS_PRESENT_ALL 16777215
+typedef enum SegmentName {
+ SEG_ES,
+ SEG_CS,
+ SEG_SS,
+ SEG_DS,
+ SEG_FS,
+ SEG_GS,
+ SEG_LDTR,
+ SEG_TR,
+ NUM_SEGS
+} SegmentName;
+
+#define REGS_PRESENT_RAX 1L<<0
+#define REGS_PRESENT_RCX 1L<<1
+#define REGS_PRESENT_RDX 1L<<2
+#define REGS_PRESENT_RBX 1L<<3
+#define REGS_PRESENT_RBP 1L<<4
+#define REGS_PRESENT_RSP 1L<<5
+#define REGS_PRESENT_RSI 1L<<6
+#define REGS_PRESENT_RDI 1L<<7
+#define REGS_PRESENT_R8 1L<<8
+#define REGS_PRESENT_R9 1L<<9
+#define REGS_PRESENT_R10 1L<<10
+#define REGS_PRESENT_R11 1L<<11
+#define REGS_PRESENT_R12 1L<<12
+#define REGS_PRESENT_R13 1L<<13
+#define REGS_PRESENT_R14 1L<<14
+#define REGS_PRESENT_R15 1L<<15
+#define REGS_PRESENT_IDTR 1L<<16
+#define REGS_PRESENT_CR0 1L<<17
+#define REGS_PRESENT_CR1 1L<<18
+#define REGS_PRESENT_CR2 1L<<19
+#define REGS_PRESENT_CR3 1L<<20
+#define REGS_PRESENT_CR4 1L<<21
+#define REGS_PRESENT_RIP 1L<<22
+#define REGS_PRESENT_RFLAGS 1L<<23
+
+#define REGS_PRESENT_ES 1L<<24
+#define REGS_PRESENT_CS 1L<<25
+#define REGS_PRESENT_SS 1L<<26
+#define REGS_PRESENT_DS 1L<<27
+#define REGS_PRESENT_FS 1L<<28
+#define REGS_PRESENT_GS 1L<<29
+#define REGS_PRESENT_LDTR 1L<<30
+#define REGS_PRESENT_TR 1L<<31
+#define REGS_PRESENT_FS_BASE 1L<<32
+#define REGS_PRESENT_GS_BASE 1L<<33
+
+#define REGS_PRESENT_GPREGS 0x000000000000FFFF
+#define REGS_PRESENT_CRS 0x00000000003E0000
+#define REGS_PRESENT_SEG 0x00000003FF000000
+#define REGS_PRESENT_ALL 0x00000003FFFFFFFF
#define MAX_REGIONS 3
struct vmssdata {
@@ -159,7 +195,7 @@ struct vmssdata {
uint64_t memsize;
ulong phys_base;
int separate_vmem;
- uint32_t *vcpu_regs;
+ uint64_t *vcpu_regs;
uint64_t num_vcpus;
vmssregs64 **regs64;
};
--
2.40.4
3 weeks