----- Original Message -----
VMSS dump files contain the state of each vCPU at the time of
suspending
the VM. This change enables 'crash' to read some relevant registers from
each vCPU state to display them in 'bt' and adds additional output for
commands 'help -D', 'help -r' and 'help -p'.
This is also the first step towards implementing kaslr offset
calculation for VMSS dump files.
Sergio,
Nicely done -- queued for crash-7.2.2:
https://github.com/crash-utility/crash/commit/907196e93dc94df104df21ba51a...
Thanks,
Dave
---
defs.h | 5 +
help.c | 3 +
kernel.c | 2 +
main.c | 3 +
memory.c | 2 +
vmware_vmss.c | 391
++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
vmware_vmss.h | 31 +++++
x86_64.c | 13 +-
8 files changed, 440 insertions(+), 10 deletions(-)
diff --git a/defs.h b/defs.h
index 7998ebf..44efc8a 100644
--- a/defs.h
+++ b/defs.h
@@ -283,6 +283,7 @@ struct number_option {
#define LKCD_KERNTYPES() (pc->flags & KERNTYPES)
#define KVMDUMP_DUMPFILE() (pc->flags & KVMDUMP)
#define SADUMP_DUMPFILE() (pc->flags & SADUMP)
+#define VMSS_DUMPFILE() (pc->flags & VMWARE_VMSS)
#define NETDUMP_LOCAL (0x1) /* netdump_data flags */
#define NETDUMP_REMOTE (0x2)
@@ -6388,6 +6389,10 @@ int vmware_vmss_init(char *filename, FILE *ofp);
uint vmware_vmss_page_size(void);
int read_vmware_vmss(int, void *, int, ulong, physaddr_t);
int write_vmware_vmss(int, void *, int, ulong, physaddr_t);
+void vmware_vmss_display_regs(int, FILE *);
+void get_vmware_vmss_regs(struct bt_info *, ulong *, ulong *);
+int vmware_vmss_memory_dump(FILE *);
+void dump_registers_for_vmss_dump(void);
/*
* gnu_binutils.c
diff --git a/help.c b/help.c
index 5f6d9be..06b7961 100644
--- a/help.c
+++ b/help.c
@@ -710,6 +710,9 @@ dump_registers(void)
} else if (NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) {
dump_registers_for_elf_dumpfiles();
return;
+ } else if (VMSS_DUMPFILE()) {
+ dump_registers_for_vmss_dump();
+ return;
}
error(FATAL, "-r option not supported on %s\n",
diff --git a/kernel.c b/kernel.c
index 1bf6251..7642217 100644
--- a/kernel.c
+++ b/kernel.c
@@ -2969,6 +2969,8 @@ back_trace(struct bt_info *bt)
get_xendump_regs(bt, &eip, &esp);
else if (SADUMP_DUMPFILE())
get_sadump_regs(bt, &eip, &esp);
+ else if (VMSS_DUMPFILE())
+ get_vmware_vmss_regs(bt, &eip, &esp);
else if (REMOTE_PAUSED()) {
if (!is_task_active(bt->task) || !get_remote_regs(bt, &eip, &esp))
machdep->get_stack_frame(bt, &eip, &esp);
diff --git a/main.c b/main.c
index 2aae0c6..15834cb 100644
--- a/main.c
+++ b/main.c
@@ -1361,6 +1361,9 @@ dump_program_context(void)
if (pc->flags & DISKDUMP)
sprintf(&buf[strlen(buf)],
"%sDISKDUMP", others++ ? "|" :
"");
+ if (pc->flags & VMWARE_VMSS)
+ sprintf(&buf[strlen(buf)],
+ "%sVMWARE_VMSS", others++ ? "|" :
"");
if (pc->flags & SYSMAP)
sprintf(&buf[strlen(buf)],
"%sSYSMAP", others++ ? "|" : "");
diff --git a/memory.c b/memory.c
index 0669276..9f752c2 100644
--- a/memory.c
+++ b/memory.c
@@ -16909,6 +16909,8 @@ dumpfile_memory(int cmd)
retval = kcore_memory_dump(fp);
else if (pc->flags & SADUMP)
retval = sadump_memory_dump(fp);
+ else if (pc->flags & VMWARE_VMSS)
+ retval = vmware_vmss_memory_dump(fp);
break;
case DUMPFILE_ENVIRONMENT:
diff --git a/vmware_vmss.c b/vmware_vmss.c
index 667676a..3bf0325 100644
--- a/vmware_vmss.c
+++ b/vmware_vmss.c
@@ -25,6 +25,8 @@
#define VMW_PAGE_SIZE (4096)
#define VMW_PAGE_SHIFT (12)
+#define MAX_BLOCK_DUMP (128)
+
static vmssdata vmss = { 0 };
int
@@ -128,7 +130,8 @@ vmware_vmss_init(char *filename, FILE *ofp)
DEBUG_PARSE_PRINT((ofp, LOGPRX"Group: %-20s offset=%#llx size=0x%#llx.\n",
grps[i].name, (ulonglong)grps[i].position, (ulonglong)grps[i].size));
- if (strcmp(grps[i].name, "memory") != 0) {
+ if (strcmp(grps[i].name, "memory") != 0 &&
+ (strcmp(grps[i].name, "cpu") != 0 || !machine_type("X86_64")))
{
continue;
}
@@ -198,12 +201,6 @@ vmware_vmss_init(char *filename, FILE *ofp)
}
blockpos += padsize;
- if (fseek(fp, blockpos + nbytes, SEEK_SET) == -1) {
- error(INFO, LOGPRX"Cannot seek past block at %#llx.\n",
- (ulonglong)(blockpos + nbytes));
- break;
- }
-
if (strcmp(name, "Memory") == 0) {
/* The things that we really care about...*/
vmss.memoffset = blockpos;
@@ -217,11 +214,61 @@ vmware_vmss_init(char *filename, FILE *ofp)
result = FALSE;
goto exit;
}
+
+ if (fseek(fp, blockpos + nbytes, SEEK_SET) == -1) {
+ error(INFO, LOGPRX"Cannot seek past block at %#llx.\n",
+ (ulonglong)(blockpos + nbytes));
+ break;
+ }
+ } else if (strcmp(name, "gpregs") == 0 &&
+ nbytes == VMW_GPREGS_SIZE &&
+ idx[0] < vmss.num_vcpus) {
+ int cpu = idx[0];
+
+ if (fread(vmss.regs64[cpu], VMW_GPREGS_SIZE, 1, fp) != 1) {
+ error(INFO, LOGPRX"Failed to read '%s': [Error %d] %s\n",
+ filename, errno, strerror(errno));
+ break;
+ }
+ } else if (strcmp(name, "CR64") == 0 &&
+ nbytes == VMW_CR64_SIZE &&
+ idx[0] < vmss.num_vcpus) {
+ int cpu = idx[0];
+
+ if (fread(&vmss.regs64[cpu]->cr[0], VMW_CR64_SIZE, 1, fp) != 1) {
+ error(INFO, LOGPRX"Failed to read '%s': [Error %d] %s\n",
+ filename, errno, strerror(errno));
+ break;
+ }
+ } else if (strcmp(name, "IDTR") == 0 &&
+ nbytes == VMW_IDTR_SIZE &&
+ idx[0] < vmss.num_vcpus) {
+ int cpu = idx[0];
+ uint64_t idtr;
+
+ if (fseek(fp, blockpos + 2, SEEK_SET) == -1) {
+ error(INFO, LOGPRX"Cannot seek past block at %#llx.\n",
+ (ulonglong)(blockpos + 2));
+ break;
+ }
+ if (fread(&idtr, sizeof(idtr), 1, fp) != 1) {
+ error(INFO, LOGPRX"Failed to read '%s': [Error %d] %s\n",
+ filename, errno, strerror(errno));
+ break;
+ }
+ vmss.regs64[cpu]->idtr = idtr;
+ } else {
+ if (fseek(fp, blockpos + nbytes, SEEK_SET) == -1) {
+ error(INFO, LOGPRX"Cannot seek past block at %#llx.\n",
+ (ulonglong)(blockpos + nbytes));
+ break;
+ }
}
} else {
union {
uint8_t val[TAG_VALSIZE_MASK];
uint32_t val32;
+ uint64_t val64;
} u;
unsigned k;
unsigned valsize = TAG_VALSIZE(tag);
@@ -253,6 +300,30 @@ vmware_vmss_init(char *filename, FILE *ofp)
if (strcmp(name, "align_mask") == 0) {
vmss.alignmask = u.val32;
}
+ } else if (strcmp(grps[i].name, "cpu") == 0) {
+ if (strcmp(name, "cpu:numVCPUs") == 0) {
+ if (vmss.regs64 != NULL) {
+ error(INFO, LOGPRX"Duplicated cpu:numVCPUs entry.\n");
+ break;
+ }
+
+ vmss.num_vcpus = u.val32;
+ vmss.regs64 = malloc(vmss.num_vcpus * sizeof(void *));
+
+ for (k = 0; k < vmss.num_vcpus; k++) {
+ vmss.regs64[k] = malloc(sizeof(vmssregs64));
+ memset(vmss.regs64[k], 0, sizeof(vmssregs64));
+ }
+ } else if (strcmp(name, "rip") == 0) {
+ int cpu = idx[0];
+ vmss.regs64[cpu]->rip = u.val64;
+ } else if (strcmp(name, "eflags") == 0) {
+ int cpu = idx[0];
+ vmss.regs64[cpu]->rflags |= u.val32;
+ } else if (strcmp(name, "EFLAGS") == 0) {
+ int cpu = idx[0];
+ vmss.regs64[cpu]->rflags |= u.val32;
+ }
}
DEBUG_PARSE_PRINT((ofp, "\n"));
@@ -350,3 +421,309 @@ write_vmware_vmss(int fd, void *bufptr, int cnt, ulong
addr, physaddr_t paddr)
return SEEK_ERROR;
}
+void
+vmware_vmss_display_regs(int cpu, FILE *ofp)
+{
+ if (cpu >= vmss.num_vcpus)
+ return;
+
+ if (machine_type("X86_64")) {
+ fprintf(ofp,
+ " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n"
+ " RAX: %016llx RBX: %016llx RCX: %016llx\n"
+ " RDX: %016llx RSI: %016llx RDI: %016llx\n"
+ " RBP: %016llx R8: %016llx R9: %016llx\n"
+ " R10: %016llx R11: %016llx R12: %016llx\n"
+ " R13: %016llx R14: %016llx R15: %016llx\n",
+ (ulonglong)vmss.regs64[cpu]->rip,
+ (ulonglong)vmss.regs64[cpu]->rsp,
+ (ulonglong)vmss.regs64[cpu]->rflags,
+ (ulonglong)vmss.regs64[cpu]->rax,
+ (ulonglong)vmss.regs64[cpu]->rbx,
+ (ulonglong)vmss.regs64[cpu]->rcx,
+ (ulonglong)vmss.regs64[cpu]->rdx,
+ (ulonglong)vmss.regs64[cpu]->rsi,
+ (ulonglong)vmss.regs64[cpu]->rdi,
+ (ulonglong)vmss.regs64[cpu]->rbp,
+ (ulonglong)vmss.regs64[cpu]->r8,
+ (ulonglong)vmss.regs64[cpu]->r9,
+ (ulonglong)vmss.regs64[cpu]->r10,
+ (ulonglong)vmss.regs64[cpu]->r11,
+ (ulonglong)vmss.regs64[cpu]->r12,
+ (ulonglong)vmss.regs64[cpu]->r13,
+ (ulonglong)vmss.regs64[cpu]->r14,
+ (ulonglong)vmss.regs64[cpu]->r15
+ );
+ }
+}
+
+void
+get_vmware_vmss_regs(struct bt_info *bt, ulong *ipp, ulong *spp)
+{
+ ulong ip, sp;
+
+ ip = sp = 0;
+
+ if (!is_task_active(bt->task)) {
+ machdep->get_stack_frame(bt, ipp, spp);
+ return;
+ }
+
+ bt->flags |= BT_DUMPFILE_SEARCH;
+ if (machine_type("X86_64"))
+ machdep->get_stack_frame(bt, ipp, spp);
+ else if (machine_type("X86"))
+ get_netdump_regs_x86(bt, ipp, spp);
+ if (bt->flags & BT_DUMPFILE_SEARCH)
+ return;
+
+ if ((vmss.regs64 == NULL) ||
+ (bt->tc->processor >= vmss.num_vcpus))
+ return;
+
+ ip = (ulong)vmss.regs64[bt->tc->processor]->rip;
+ sp = (ulong)vmss.regs64[bt->tc->processor]->rsp;
+ if (is_kernel_text(ip) &&
+ (((sp >= GET_STACKBASE(bt->task)) &&
+ (sp < GET_STACKTOP(bt->task))) ||
+ in_alternate_stack(bt->tc->processor, sp))) {
+ *ipp = ip;
+ *spp = sp;
+ bt->flags |= BT_KERNEL_SPACE;
+ return;
+ }
+
+ if (!is_kernel_text(ip) &&
+ in_user_stack(bt->tc->task, sp))
+ bt->flags |= BT_USER_SPACE;
+}
+
+int
+vmware_vmss_memory_dump(FILE *fp)
+{
+ cptdumpheader hdr;
+ cptgroupdesc *grps = NULL;
+ unsigned grpsize;
+ unsigned i;
+ int result = TRUE;
+
+ if (fseek(vmss.dfp, 0, SEEK_SET) != 0) {
+ fprintf(fp, "Error seeking to position 0.\n");
+ return FALSE;
+ }
+
+ if (fread(&hdr, sizeof(cptdumpheader), 1, vmss.dfp) != 1) {
+ fprintf(fp, "Failed to read vmss file: [Error %d] %s\n",
+ errno, strerror(errno));
+ return FALSE;
+ }
+
+ fprintf(fp, "vmware_vmss:\n");
+ fprintf(fp, " Header: id=%x version=%d numgroups=%d\n",
+ hdr.id, hdr.version, hdr.numgroups);
+
+ vmss.cpt64bit = (hdr.id != CPTDUMP_OLD_MAGIC_NUMBER);
+ fprintf(fp, " Checkpoint is %d-bit\n", vmss.cpt64bit ? 64 : 32);
+
+ grpsize = hdr.numgroups * sizeof (cptgroupdesc);
+ grps = (cptgroupdesc *) malloc(grpsize * sizeof(cptgroupdesc));
+ if (grps == NULL) {
+ fprintf(fp, "Failed to allocate memory! [Error %d] %s\n",
+ errno, strerror(errno));
+ return FALSE;
+ }
+
+ if (fread(grps, sizeof(cptgroupdesc), grpsize, vmss.dfp) != grpsize) {
+ fprintf(fp, "Failed to read vmss file: [Error %d] %s\n",
+ errno, strerror(errno));
+ result = FALSE;
+ goto exit;
+ }
+
+ for (i = 0; i < hdr.numgroups; i++) {
+ if (fseek(vmss.dfp, grps[i].position, SEEK_SET) == -1) {
+ fprintf(fp, "Bad offset of VMSS Group['%s'] in vmss file at
%#llx.\n",
+ grps[i].name, (ulonglong)grps[i].position);
+ continue;
+ }
+ fprintf(fp, "\nGroup: %s offset=%#llx size=0x%#llx\n",
+ grps[i].name, (ulonglong)grps[i].position, (ulonglong)grps[i].size);
+
+ for (;;) {
+ uint16_t tag;
+ char name[TAG_NAMELEN_MASK + 1];
+ unsigned nameLen;
+ unsigned nindx;
+ int idx[3];
+ unsigned j;
+ int nextgroup = FALSE;
+
+ if (fread(&tag, sizeof(tag), 1, vmss.dfp) != 1) {
+ fprintf(fp, "Cannot read tag.\n");
+ break;
+ }
+ if (tag == NULL_TAG)
+ break;
+
+ nameLen = TAG_NAMELEN(tag);
+ if (fread(name, nameLen, 1, vmss.dfp) != 1) {
+ fprintf(fp, "Cannot read tag name.\n");
+ break;
+ }
+ name[nameLen] = 0;
+ fprintf(fp, " Item %20s", name);
+
+ nindx = TAG_NINDX(tag);
+ if (nindx > 3) {
+ fprintf(fp, "Too many indexes %d (> 3).\n", nindx);
+ break;
+ }
+ idx[0] = idx[1] = idx[2] = NO_INDEX;
+ for (j= 0; j < 3; j++) {
+ if (j < nindx) {
+ if (fread(&idx[j], sizeof(idx[0]), 1, vmss.dfp) != 1) {
+ fprintf(fp, "Cannot read index.\n");
+ nextgroup = TRUE;
+ break;
+ }
+ fprintf(fp, "[%d]", idx[j]);
+ } else
+ fprintf(fp, " ");
+ }
+ if (nextgroup)
+ break;
+
+ if (IS_BLOCK_TAG(tag)) {
+ uint64_t nbytes;
+ uint64_t blockpos;
+ uint64_t nbytesinmem;
+ int compressed = IS_BLOCK_COMPRESSED_TAG(tag);
+ uint16_t padsize;
+ unsigned k, l;
+ char byte;
+
+ if (fread(&nbytes, sizeof(nbytes), 1, vmss.dfp) != 1) {
+ fprintf(fp, "Cannot read block size.\n");
+ break;
+ }
+ if (fread(&nbytesinmem, sizeof(nbytesinmem), 1, vmss.dfp) != 1) {
+ fprintf(fp, "Cannot read block memory size.\n");
+ break;
+ }
+ if (fread(&padsize, sizeof(padsize), 1, vmss.dfp) != 1) {
+ fprintf(fp, "Cannot read block padding size.\n");
+ break;
+ }
+ if ((blockpos = ftell(vmss.dfp)) == -1) {
+ fprintf(fp, "Cannot determine location within VMSS file.\n");
+ break;
+ }
+ blockpos += padsize;
+
+ fprintf(fp, " => %sBLOCK: position=%#llx size=%#llx memsize=%#llx\n",
+ compressed ? "COMPRESSED " : "",
+ (ulonglong)blockpos, (ulonglong)nbytes, (ulonglong)nbytesinmem);
+
+ if (nbytes && nbytes <= MAX_BLOCK_DUMP && !compressed) {
+ fprintf(fp, "Hex dump: \n");
+ l = 0;
+ for (k = 0; k < nbytes; k++) {
+ if (fread(&byte, 1, 1, vmss.dfp) != 1) {
+ fprintf(fp, "Cannot read byte.\n");
+ result = FALSE;
+ goto exit;
+ }
+
+ fprintf(fp, " %02hhX", byte);
+
+ if (l++ == 15) {
+ fprintf(fp, "\n");
+ l = 0;
+ }
+ }
+ if (l)
+ fprintf(fp, "\n\n");
+ else
+ fprintf(fp, "\n");
+ } else {
+ if (fseek(vmss.dfp, blockpos + nbytes, SEEK_SET) == -1) {
+ fprintf(fp, "Cannot seek past block at %#llx.\n",
+ (ulonglong)(blockpos + nbytes));
+ result = FALSE;
+ goto exit;
+ }
+ }
+ } else {
+ union {
+ uint8_t val[TAG_VALSIZE_MASK];
+ uint32_t val32;
+ uint64_t val64;
+ } u;
+ unsigned k;
+ unsigned valsize = TAG_VALSIZE(tag);
+ uint64_t blockpos = ftell(vmss.dfp);
+
+ fprintf(fp, " => position=%#llx size=%#x: ",
+ (ulonglong)blockpos, valsize);
+
+ if (fread(u.val, sizeof(u.val[0]), valsize, vmss.dfp) != valsize) {
+ fprintf(fp, "Cannot read item.\n");
+ break;
+ }
+ for (k = 0; k < valsize; k++) {
+ /* Assume Little Endian */
+ fprintf(fp, "%02X", u.val[valsize - k - 1]);
+ }
+
+
+ fprintf(fp, "\n");
+ }
+ }
+ }
+
+exit:
+ if (grps)
+ free(grps);
+
+ return result;
+}
+
+void
+dump_registers_for_vmss_dump(void)
+{
+ int i;
+ vmssregs64 *regs;
+
+ if (!machine_type("X86_64")) {
+ fprintf(fp, "-r option not supported on this dumpfile type\n");
+ return;
+ }
+
+ for (i = 0; i < vmss.num_vcpus; i++) {
+ regs = vmss.regs64[i];
+
+ if (i)
+ fprintf(fp, "\n");
+
+ fprintf(fp, "CPU %d:\n", i);
+
+ fprintf(fp, " RAX: %016llx RBX: %016llx RCX: %016llx\n",
+ (ulonglong)regs->rax, (ulonglong)regs->rbx, (ulonglong)regs->rcx);
+ fprintf(fp, " RDX: %016llx RSI: %016llx RDI: %016llx\n",
+ (ulonglong)regs->rdx, (ulonglong)regs->rsi, (ulonglong)regs->rdi);
+ fprintf(fp, " RSP: %016llx RBP: %016llx R8: %016llx\n",
+ (ulonglong)regs->rsp, (ulonglong)regs->rbp, (ulonglong)regs->r8);
+ fprintf(fp, " R9: %016llx R10: %016llx R11: %016llx\n",
+ (ulonglong)regs->r9, (ulonglong)regs->r10, (ulonglong)regs->r11);
+ fprintf(fp, " R12: %016llx R13: %016llx R14: %016llx\n",
+ (ulonglong)regs->r12, (ulonglong)regs->r13, (ulonglong)regs->r14);
+ fprintf(fp, " R15: %016llx RIP: %016llx RFLAGS: %08llx\n",
+ (ulonglong)regs->r15, (ulonglong)regs->rip, (ulonglong)regs->rflags);
+ fprintf(fp, " IDT: base: %016llx\n",
+ (ulonglong)regs->idtr);
+ fprintf(fp, " CR0: %016llx CR1: %016llx CR2: %016llx\n",
+ (ulonglong)regs->cr[0], (ulonglong)regs->cr[1], (ulonglong)regs->cr[2]);
+ fprintf(fp, " CR3: %016llx CR4: %016llx\n",
+ (ulonglong)regs->cr[3], (ulonglong)regs->cr[4]);
+ }
+}
diff --git a/vmware_vmss.h b/vmware_vmss.h
index a4b8937..41d14c3 100644
--- a/vmware_vmss.h
+++ b/vmware_vmss.h
@@ -89,6 +89,35 @@ struct memregion {
};
typedef struct memregion memregion;
+#define VMW_GPREGS_SIZE (128)
+#define VMW_CR64_SIZE (72)
+#define VMW_IDTR_SIZE (10)
+struct vmssregs64 {
+ /* read from vmss */
+ uint64_t rax;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbx;
+ uint64_t rbp;
+ uint64_t rsp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ /* manually managed */
+ uint64_t idtr;
+ uint64_t cr[VMW_CR64_SIZE / 8];
+ uint64_t rip;
+ uint64_t rflags;
+};
+typedef struct vmssregs64 vmssregs64;
+
#define MAX_REGIONS 3
struct vmssdata {
int32_t cpt64bit;
@@ -99,6 +128,8 @@ struct vmssdata {
memregion regions[MAX_REGIONS];
uint64_t memoffset;
uint64_t memsize;
+ uint64_t num_vcpus;
+ vmssregs64 **regs64;
};
typedef struct vmssdata vmssdata;
diff --git a/x86_64.c b/x86_64.c
index 0d5e150..7b02761 100644
--- a/x86_64.c
+++ b/x86_64.c
@@ -3273,6 +3273,8 @@ x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in)
diskdump_display_regs(bt->tc->processor, ofp);
else if (SADUMP_DUMPFILE())
sadump_display_regs(bt->tc->processor, ofp);
+ else if (VMSS_DUMPFILE())
+ vmware_vmss_display_regs(bt->tc->processor, ofp);
return;
}
@@ -3295,13 +3297,16 @@ x86_64_low_budget_back_trace_cmd(struct bt_info
*bt_in)
diskdump_display_regs(bt->tc->processor, ofp);
else if (SADUMP_DUMPFILE())
sadump_display_regs(bt->tc->processor, ofp);
+ else if (VMSS_DUMPFILE())
+ vmware_vmss_display_regs(bt->tc->processor, ofp);
else if (pc->flags2 & QEMU_MEM_DUMP_ELF)
display_regs_from_elf_notes(bt->tc->processor, ofp);
return;
} else if ((bt->flags & BT_KERNEL_SPACE) &&
(KVMDUMP_DUMPFILE() ||
(ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) ||
- SADUMP_DUMPFILE() || (pc->flags2 & QEMU_MEM_DUMP_ELF))) {
+ SADUMP_DUMPFILE() || (pc->flags2 & QEMU_MEM_DUMP_ELF) ||
+ VMSS_DUMPFILE())) {
fprintf(ofp, " [exception RIP: ");
if ((sp = value_search(bt->instptr, &offset))) {
fprintf(ofp, "%s", sp->name);
@@ -3317,6 +3322,8 @@ x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in)
diskdump_display_regs(bt->tc->processor, ofp);
else if (SADUMP_DUMPFILE())
sadump_display_regs(bt->tc->processor, ofp);
+ else if (VMSS_DUMPFILE())
+ vmware_vmss_display_regs(bt->tc->processor, ofp);
else if (pc->flags2 & QEMU_MEM_DUMP_ELF)
display_regs_from_elf_notes(bt->tc->processor, ofp);
@@ -4941,7 +4948,7 @@ skip_stage:
if (halt_rip && halt_rsp) {
*rip = halt_rip;
*rsp = halt_rsp;
- if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE())
+ if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE() || VMSS_DUMPFILE())
bt_in->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH;
return;
}
@@ -4986,7 +4993,7 @@ skip_stage:
machdep->get_stack_frame(bt, rip, rsp);
- if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE())
+ if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE() || VMSS_DUMPFILE())
bt_in->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH;
}
--
2.14.3