[PATCH] Fix for some command-line cases an unintended unlink of a file crash didn't create and a leftover temporary
by David Mair
When the crash command line includes two filenames where one is a
compressed kernel and the other is a debuginfo file and they appear in
that order then if the uncompressed temporary version of the kernel is
actually larger than the debuginfo then crash will end with an error but
will also unlink the debuginfo file and will not clean up the (intended
temporary) uncompressed copy of the kernel.
This patch at least fixes the unintended unlink and leaving the
temporary present. It doesn't fix the failure to start but that's
because the wrong files are assumed the debuginfo and kernel. The size
case that led to this discovery is probably rare.
The cause is that evidence of a temporary file to unlink is that there
is a value in pc->namelist and pc->namelist_orig (or pc->namelist_debug
and pc->namelist_orig_debug) but when the file size test in
select_namelist() results in the pc->namelist copy to pc->namelist_debug
the _orig is not copied as well so the implied file to unlink is the one
set in pc->namelist (the debuginfo filename now).
The patch causes a populated namelist_orig value to be swapped with
namelist_debug_orig if the namelist value is copied to namelist_debug.
Signed-off-by: David Mair <dmair(a)suse.com>
---
symbols.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/symbols.c b/symbols.c
index 4c6fbf4..e1ed719 100644
--- a/symbols.c
+++ b/symbols.c
@@ -3520,6 +3520,7 @@ int
select_namelist(char *new)
{
struct stat stat1, stat2;
+ char *namecp;
if (pc->server_namelist) {
pc->namelist_debug = new;
@@ -3533,6 +3534,12 @@ select_namelist(char *new)
if (stat1.st_size > stat2.st_size) {
pc->namelist_debug = pc->namelist;
+ if (pc->namelist_orig)
+ {
+ namecp = pc->namelist_debug_orig;
+ pc->namelist_debug_orig = pc->namelist_orig;
+ pc->namelist_orig = namecp;
+ }
pc->namelist = new;
} else if (stat2.st_size > stat1.st_size)
pc->namelist_debug = new;
10 years, 5 months
[PATCH 1/1] xendump: Use off_t not long for 32bit code
by Don Slutz
This enables crash to handle xen dumps that are larger then 4G
in size in 32bit mode.
Signed-off-by: Don Slutz <dslutz(a)verizon.com>
---
This is the same as was sent as an attachment. Just a clean top post.
x86.c | 10 ++++-----
x86_64.c | 10 ++++-----
xendump.c | 74 +++++++++++++++++++++++++++++++++------------------------------
xendump.h | 6 +++---
4 files changed, 52 insertions(+), 48 deletions(-)
diff --git a/x86.c b/x86.c
index 833a11b..608bb88 100644
--- a/x86.c
+++ b/x86.c
@@ -4897,7 +4897,7 @@ x86_xendump_p2m_create(struct xendump_data *xd)
"MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n",
ctrlreg_offset);
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)ctrlreg_offset;
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -4997,7 +4997,7 @@ x86_pvops_xendump_p2m_create(struct xendump_data *xd)
"MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n",
ctrlreg_offset);
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)ctrlreg_offset;
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -5369,7 +5369,7 @@ x86_xendump_panic_task(struct xendump_data *xd)
INVALID_MEMBER(cpu_user_regs_esp))
return NO_TASK;
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)OFFSET(vcpu_guest_context_user_regs) +
(off_t)OFFSET(cpu_user_regs_esp);
@@ -5419,7 +5419,7 @@ x86_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *eip, ul
INVALID_MEMBER(cpu_user_regs_esp))
goto generic;
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)OFFSET(vcpu_guest_context_user_regs) +
(off_t)OFFSET(cpu_user_regs_esp);
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -5427,7 +5427,7 @@ x86_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *eip, ul
if (read(xd->xfd, &xesp, sizeof(ulong)) != sizeof(ulong))
goto generic;
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)OFFSET(vcpu_guest_context_user_regs) +
(off_t)OFFSET(cpu_user_regs_eip);
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
diff --git a/x86_64.c b/x86_64.c
index f4a3e8b..a2e4636 100644
--- a/x86_64.c
+++ b/x86_64.c
@@ -6184,7 +6184,7 @@ x86_64_xendump_p2m_create(struct xendump_data *xd)
"MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n",
ctrlreg_offset);
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)ctrlreg_offset;
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -6270,7 +6270,7 @@ x86_64_pvops_xendump_p2m_create(struct xendump_data *xd)
"MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n",
ctrlreg_offset);
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)ctrlreg_offset;
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -6601,7 +6601,7 @@ x86_64_xendump_panic_task(struct xendump_data *xd)
INVALID_MEMBER(cpu_user_regs_esp))
return NO_TASK;
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)OFFSET(vcpu_guest_context_user_regs) +
(off_t)OFFSET(cpu_user_regs_rsp);
@@ -6653,7 +6653,7 @@ x86_64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip,
INVALID_MEMBER(cpu_user_regs_rsp))
goto generic;
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)OFFSET(vcpu_guest_context_user_regs) +
(off_t)OFFSET(cpu_user_regs_rsp);
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -6661,7 +6661,7 @@ x86_64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip,
if (read(xd->xfd, &xrsp, sizeof(ulong)) != sizeof(ulong))
goto generic;
- offset = (off_t)xd->xc_core.header.xch_ctxt_offset +
+ offset = xd->xc_core.header.xch_ctxt_offset +
(off_t)OFFSET(vcpu_guest_context_user_regs) +
(off_t)OFFSET(cpu_user_regs_rip);
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
diff --git a/xendump.c b/xendump.c
index 6d6b51e..9d78916 100644
--- a/xendump.c
+++ b/xendump.c
@@ -126,9 +126,9 @@ xc_core_verify(char *file, char *buf)
xd->xc_core.header.xch_magic = xcp->xch_magic;
xd->xc_core.header.xch_nr_vcpus = xcp->xch_nr_vcpus;
xd->xc_core.header.xch_nr_pages = xcp->xch_nr_pages;
- xd->xc_core.header.xch_ctxt_offset = (ulong)xcp->xch_ctxt_offset;
- xd->xc_core.header.xch_index_offset = (ulong)xcp->xch_index_offset;
- xd->xc_core.header.xch_pages_offset = (ulong)xcp->xch_pages_offset;
+ xd->xc_core.header.xch_ctxt_offset = (off_t)xcp->xch_ctxt_offset;
+ xd->xc_core.header.xch_index_offset = (off_t)xcp->xch_index_offset;
+ xd->xc_core.header.xch_pages_offset = (off_t)xcp->xch_pages_offset;
xd->flags |= (XENDUMP_LOCAL | XC_CORE_ORIG | XC_CORE_P2M_CREATE);
@@ -187,7 +187,7 @@ xc_core_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr)
PFN_NOT_FOUND)
return READ_ERROR;
- offset = (off_t)xd->xc_core.header.xch_pages_offset +
+ offset = xd->xc_core.header.xch_pages_offset +
((off_t)(page_index) * (off_t)xd->page_size);
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -852,7 +852,7 @@ read_xendump_hyper(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr)
if ((page_index = xc_core_mfn_to_page_index(pfn)) == PFN_NOT_FOUND)
return READ_ERROR;
- offset = (off_t)xd->xc_core.header.xch_pages_offset +
+ offset = xd->xc_core.header.xch_pages_offset +
((off_t)(page_index) * (off_t)xd->page_size);
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -1040,15 +1040,15 @@ xendump_memory_dump(FILE *fp)
fprintf(fp, " xch_nr_pages: %d (0x%x)\n",
xd->xc_core.header.xch_nr_pages,
xd->xc_core.header.xch_nr_pages);
- fprintf(fp, " xch_ctxt_offset: %ld (0x%lx)\n",
- xd->xc_core.header.xch_ctxt_offset,
- xd->xc_core.header.xch_ctxt_offset);
- fprintf(fp, " xch_index_offset: %ld (0x%lx)\n",
- xd->xc_core.header.xch_index_offset,
- xd->xc_core.header.xch_index_offset);
- fprintf(fp, " xch_pages_offset: %ld (0x%lx)\n",
- xd->xc_core.header.xch_pages_offset,
- xd->xc_core.header.xch_pages_offset);
+ fprintf(fp, " xch_ctxt_offset: %llu (0x%llx)\n",
+ (ulonglong)xd->xc_core.header.xch_ctxt_offset,
+ (ulonglong)xd->xc_core.header.xch_ctxt_offset);
+ fprintf(fp, " xch_index_offset: %llu (0x%llx)\n",
+ (ulonglong)xd->xc_core.header.xch_index_offset,
+ (ulonglong)xd->xc_core.header.xch_index_offset);
+ fprintf(fp, " xch_pages_offset: %llu (0x%llx)\n",
+ (ulonglong)xd->xc_core.header.xch_pages_offset,
+ (ulonglong)xd->xc_core.header.xch_pages_offset);
fprintf(fp, " elf_class: %s\n", xd->xc_core.elf_class == ELFCLASS64 ? "ELFCLASS64" :
xd->xc_core.elf_class == ELFCLASS32 ? "ELFCLASS32" : "n/a");
@@ -1285,7 +1285,7 @@ xc_core_mfn_to_page(ulong mfn, char *pgbuf)
if (xd->flags & XC_CORE_ELF)
return xc_core_elf_mfn_to_page(mfn, pgbuf);
- if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset,
+ if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset,
SEEK_SET) == -1) {
error(INFO, "cannot lseek to page index\n");
return NULL;
@@ -1325,7 +1325,7 @@ xc_core_mfn_to_page(ulong mfn, char *pgbuf)
return NULL;
}
- if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset,
+ if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset,
SEEK_SET) == -1) {
error(INFO, "cannot lseek to xch_pages_offset\n");
return NULL;
@@ -1400,7 +1400,7 @@ xc_core_elf_mfn_to_page(ulong mfn, char *pgbuf)
return NULL;
}
- if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset,
+ if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset,
SEEK_SET) == -1)
error(FATAL, "cannot lseek to xch_pages_offset\n");
@@ -1434,7 +1434,7 @@ xc_core_mfn_to_page_index(ulong mfn)
if (xd->flags & XC_CORE_ELF)
return xc_core_elf_mfn_to_page_index(mfn);
- if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset,
+ if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset,
SEEK_SET) == -1) {
error(INFO, "cannot lseek to page index\n");
return MFN_NOT_FOUND;
@@ -1527,7 +1527,7 @@ xc_core_mfns(ulong arg, FILE *ofp)
ulonglong tmp64[MAX_BATCH_SIZE];
size_t size;
- if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_index_offset,
+ if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset,
SEEK_SET) == -1) {
error(INFO, "cannot lseek to page index\n");
return FALSE;
@@ -1677,7 +1677,7 @@ xc_core_pfn_to_page_index(ulong pfn)
p2m_idx = xd->xc_core.p2m_frame_index_list[idx];
- if (lseek(xd->xfd, (off_t)xd->xc_core.header.xch_pages_offset,
+ if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset,
SEEK_SET) == -1) {
error(INFO, "cannot lseek to xch_pages_offset\n");
return PFN_NOT_FOUND;
@@ -1801,7 +1801,7 @@ xc_core_pfn_valid(ulong pfn)
if (pfn >= (ulong)xd->xc_core.header.xch_nr_pages)
return FALSE;
- offset = (off_t)xd->xc_core.header.xch_index_offset;
+ offset = xd->xc_core.header.xch_index_offset;
if (xd->flags & XC_CORE_64BIT_HOST)
offset += (off_t)(pfn * sizeof(ulonglong));
@@ -2542,25 +2542,27 @@ xc_core_dump_Elf32_Shdr(Elf32_Off offset, int store)
return;
if (STREQ(name, ".xen_prstatus"))
- xd->xc_core.header.xch_ctxt_offset =
- (unsigned long)shdr.sh_offset;
+ xd->xc_core.header.xch_ctxt_offset =
+ (off_t)shdr.sh_offset;
if (STREQ(name, ".xen_shared_info"))
xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset;
if (STREQ(name, ".xen_pfn")) {
- xd->xc_core.header.xch_index_offset = shdr.sh_offset;
+ xd->xc_core.header.xch_index_offset =
+ (off_t)shdr.sh_offset;
xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE);
}
if (STREQ(name, ".xen_p2m")) {
- xd->xc_core.header.xch_index_offset = shdr.sh_offset;
+ xd->xc_core.header.xch_index_offset =
+ (off_t)shdr.sh_offset;
xd->flags |= XC_CORE_P2M_CREATE;
}
if (STREQ(name, ".xen_pages"))
- xd->xc_core.header.xch_pages_offset =
- (unsigned long)shdr.sh_offset;
+ xd->xc_core.header.xch_pages_offset =
+ (off_t)shdr.sh_offset;
if (STREQ(name, ".xen_ia64_mapped_regs"))
xd->xc_core.ia64_mapped_regs_offset =
@@ -2642,25 +2644,27 @@ xc_core_dump_Elf64_Shdr(Elf64_Off offset, int store)
return;
if (STREQ(name, ".xen_prstatus"))
- xd->xc_core.header.xch_ctxt_offset =
- (unsigned long)shdr.sh_offset;
+ xd->xc_core.header.xch_ctxt_offset =
+ (off_t)shdr.sh_offset;
if (STREQ(name, ".xen_shared_info"))
xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset;
if (STREQ(name, ".xen_pfn")) {
- xd->xc_core.header.xch_index_offset = shdr.sh_offset;
+ xd->xc_core.header.xch_index_offset =
+ (off_t)shdr.sh_offset;
xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE);
}
if (STREQ(name, ".xen_p2m")) {
- xd->xc_core.header.xch_index_offset = shdr.sh_offset;
+ xd->xc_core.header.xch_index_offset =
+ (off_t)shdr.sh_offset;
xd->flags |= XC_CORE_P2M_CREATE;
}
if (STREQ(name, ".xen_pages"))
- xd->xc_core.header.xch_pages_offset =
- (unsigned long)shdr.sh_offset;
+ xd->xc_core.header.xch_pages_offset =
+ (off_t)shdr.sh_offset;
if (STREQ(name, ".xen_ia64_mapped_regs"))
xd->xc_core.ia64_mapped_regs_offset =
@@ -2814,7 +2818,7 @@ xc_core_elf_pfn_init(void)
chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT;
for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) {
- offset = (off_t)xd->xc_core.header.xch_index_offset +
+ offset = xd->xc_core.header.xch_index_offset +
(off_t)(c * sizeof(uint64_t));
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
@@ -2834,7 +2838,7 @@ xc_core_elf_pfn_init(void)
chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT;
for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) {
- offset = (off_t)xd->xc_core.header.xch_index_offset +
+ offset = xd->xc_core.header.xch_index_offset +
(off_t)(c * sizeof(struct xen_dumpcore_p2m));
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
diff --git a/xendump.h b/xendump.h
index 9ece4da..17aae37 100644
--- a/xendump.h
+++ b/xendump.h
@@ -42,9 +42,9 @@ struct xen_core_header {
unsigned int xch_magic;
unsigned int xch_nr_vcpus;
unsigned int xch_nr_pages;
- unsigned long xch_ctxt_offset;
- unsigned long xch_index_offset;
- unsigned long xch_pages_offset;
+ off_t xch_ctxt_offset;
+ off_t xch_index_offset;
+ off_t xch_pages_offset;
};
struct pfn_offset_cache {
--
1.8.4
10 years, 5 months
Re: [Crash-utility] [PATCH] crash: ARM: support LPAE
by Dave Anderson
----- Original Message -----
> >
> > Given that the vmcore indentifies itself as a kdump ELF vmcore that contains
> > a VMCOREINFO note, it would seem that the kdump facility "just works" with
> > an ARM PAE kernel as long as the physical memory can be contained within
> > 4GB?
>
> On our platfrom I have test physical memory beyond 4GB. And it worked fine.
> Maybe my test is not all-around. I will try to do that on qemu, which I failed
> to do last time.
Yeah, it looks like as long as the beginning of the highest physical memory
PT_LOAD segment *begins* before the 4GB mark, then it would work OK. However,
the 32-bit ELF Elf32_Phdr.p_paddr field is an 32-bit value that cannot contain
a physical address that is 4GB or larger.
> > But if the system contained physical memory beyond 4GB, then it would
> > require a 64-bit ELF header, and therefore your recent changes to kexec-tools,
> > correct? In addition, it would require update to the crash utility's netdump.c
> > is_netdump() function to to accept 64-bit ELF headers for EM_ARM vmcores.
>
>
> Yes, kexec is ready for LPAE now.
>
> Maybe I can try to do that. But perhaps I can't test it because
> of lack of entironment.
I believe the 32-bit vs. 64-bit ELF header is configurable, correct?
On RHEL, by default we configure 64-bit ELF headers for 32-bit x86
machines regardless of their memory size. So you should be able to
create a vmcore with a 64-bit ELF header on a system that has less
than 4GB of physical memory.
But as I mentioned above, there will need to be at least one fix for
the crash utility, because it will fail at line 258 of netdump.c.
To accept 64-bit ARM headers, there would need to be a additional
case statement like this:
case EM_ARM:
if (machine_type_mismatch(file, "ARM", NULL,
source_query))
goto bailout;
break;
I'm not sure whether any other fixes would be required?
>
> >
> > Also, w/respect to this commit:
> >
> > commit 56b700fd6f1e49149880fb1b6ffee0dca5be45fb
> > Author: Liu Hua <sdu.liu(a)huawei.com>
> > Date: Fri Apr 18 07:45:36 2014 +0100
> >
> > ARM: 8030/1: ARM : kdump : add arch_crash_save_vmcoreinfo
> >
> > For vmcore generated by LPAE enabled kernel, user space
> > utility such as crash needs additional infomation to
> > parse.
> >
> > So this patch add arch_crash_save_vmcoreinfo as what PAE enabled
> > i386 linux does.
> >
> > Cc: <stable(a)vger.kernel.org>
> > Reviewed-by: Will Deacon <will.deacon(a)arm.com>
> > Signed-off-by: Liu Hua <sdu.liu(a)huawei.com>
> > Signed-off-by: Russell King <rmk+kernel(a)arm.linux.org.uk>
> >
> > diff --git a/arch/arm/kernel/machine_kexec.c
> > b/arch/arm/kernel/machine_kexec.c
> > index f0d180d..8cf0996 100644
> > --- a/arch/arm/kernel/machine_kexec.c
> > +++ b/arch/arm/kernel/machine_kexec.c
> > @@ -184,3 +184,10 @@ void machine_kexec(struct kimage *image)
> >
> > soft_restart(reboot_entry_phys);
> > }
> > +
> > +void arch_crash_save_vmcoreinfo(void)
> > +{
> > +#ifdef CONFIG_ARM_LPAE
> > + VMCOREINFO_CONFIG(ARM_LPAE);
> > +#endif
> > +}
> >
> > I note that the sample vmcore you sent me does not have the ARM_LPAE vmcoreinfo
> > item, and that your patch doesn't require/check it. Was it your intention
> > to use the above as determining factor for setting the "PAE" bit?
>
> The kernel version I used is 3.13. So it does not contained this infomation.
> At the begining I used this vmcoreinfo, But I found a better way to indentify
> the LPAE enabled kernel. PG_DIR_SIZE of LPAE enabled kernel is larger than
> that of the normal(0x5000 : 0x4000). What do you thank about it?
That was my only concern regarding the patchset, because it presumes that
the difference will be either 0x4000 or 0x5000. But that's not necessarily
true, at least on older kernels. For example, here are the values seen
in my small sample set of ARM dumpfiles, showing the kernel release along with
the values of the "swapper_pg_dir" and "_text" symbols, and the difference
between the two:
RELEASE: 2.6.35-rc3-00272-gd189df4
swapper_pg_dir: c0004000
_text: c002c000
(28000)
RELEASE: 2.6.38-rc2-00274-g1f0324c-dirty
swapper_pg_dir: c0004000
_text: c0050000
(4c000)
RELEASE: 2.6.36-rc6-next-20101005-00033-g5d269a5-dirty
swapper_pg_dir: c0004000
_text: c01d3000
(1cf000)
RELEASE: 3.1.1
swapper_pg_dir: c0004000
_text: c0008000
(4000)
RELEASE: 3.1.1
swapper_pg_dir: c0004000
_text: c0008000
(4000)
RELEASE: 3.0.8+
swapper_pg_dir: c0004000
_text: c0108000
(104000)
RELEASE: 3.13.5 <-- your LPAE kernel
swapper_pg_dir: 80003000
_text: 80008000
(5000)
RELEASE: 3.0.8+
swapper_pg_dir: c0004000
_text: c0108000
(104000)
RELEASE: 3.1.1
swapper_pg_dir: c0004000
_text: c0008000
(4000)
RELEASE: 3.1.1
swapper_pg_dir: c0004000
_text: c0008000
(4000)
Note that in some earlier kernels, the "_text" symbol is often much
higher. But I presume that it would be highly unlikely that the difference
would ever be 0x5000 in an older kernel -- so until somebody reports a
problem, it seems OK to do it that way.
However, just in case the layout changes in the future, there should be
a fail-safe check for the VMCOREINFO_CONFIG(ARM_LPAE) in arm_init(),
that does something like this:
if ((string = pc->read_vmcoreinfo("CONFIG_ARM_LPAE"))) {
machdep->flags |= PAE;
free(string);
} else
[check for 0x5000 difference]
There's really no need to check for the "y" contents of the string, because
if the entry exists, then CONFIG_ARM_LPAE is configured.
> > In any case, thanks for the vmlinux/vmcore pair, which moves us part of the way
> > towards supporting LPAE -- with support for 64-bit ELF headers to be addressed in
> > the future.
>
> Thanks to your agreement. I will work on this issue continually.
Great -- again, I really appreciate your help.
Thanks,
Dave
10 years, 5 months
Re: [Crash-utility] [Xen-devel] libxl: crash fails to load vmcore when the guest memory greater than 4G
by Don Slutz
I was able to reproduce this. The issue that I found is that the file offset was
being truncated to a long (32bits...). The attached patch fixes my test case.
-Don Slutz
On 06/09/14 21:50, Zhangwei (FF) wrote:
> I'm not very sure, but if my vmlinux file is not correct, crash should load vmcore failed, whether the guest memory greater than 4G or not.
>
> My vmlinux file was downloaded from: ftp://ftp.redhat.com/pub/redhat/linux/enterprise/5Server/en/os/i386/Debug...
>
> and I have send the vmlinux and vmcore files (good and bad one) download link to your email, you can use gunzip command to unzip these files.
>
> In addition, my redhat5.5(32bit) guest os information:
> [root@localhost ~]# uname -a
> Linux localhost.localdomain 2.6.18-194.el5PAE #1 SMP Tue Mar 16 22:00:21 EDT 2010 i686 i686 i386 GNU/Linux [root@localhost ~]#
>
>
> ---------------------------------------------------------------------
> Sender: Daniel Kiper [mailto:dkiper@net-space.pl]
> Send Time: 2014年6月7日 5:22
> To: Zhangwei (FF)
> CC: Daniel Kiper; vincent.hanquez(a)eu.citrix.com; stefano.stabellini(a)eu.citrix.com; xen-devel(a)lists.xen.org; Chenguoping; Xuzhichuang
> Topic: Re: [Xen-devel] libxl: crash fails to load vmcore when the guest memory greater than 4G
>
> On Fri, Jun 06, 2014 at 02:00:40AM +0000, Zhangwei (FF) wrote:
>> Hi, Daniel
>> Thanks for replying.
>> The latest crash tool has the same problem. Following are the results.
>>
>> When the guest memory greater than 4G(such as 8G), crash load vmcore failed.
>>
>> [root@localhost sdb]# crash vmlinux redhat_5.5_32_hvm_8G.core crash 7.0.6 Copyright (C) 2002-2014 Red Hat, Inc.
>> Copyright (C) 2004, 2005, 2006, 2010 IBM Corporation Copyright (C) 1999-2006 Hewlett-Packard Co Copyright (C) 2005, 2006, 2011, 2012 Fujitsu Limited Copyright (C) 2006, 2007 VA Linux Systems Japan K.K.
>> Copyright (C) 2005, 2011 NEC Corporation Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc.
>> Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc.
>> This program is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Enter "help copying" to see the conditions.
>> This program has absolutely no warranty. Enter "help warranty" for details.
>> GNU gdb (GDB) 7.6
>> Copyright (C) 2013 Free Software Foundation, Inc.
>> License GPLv3+: GNU GPL version 3 or later
>> <http://gnu.org/licenses/gpl.html>
>> This is free software: you are free to change and redistribute it.
>> There is NO WARRANTY, to the extent permitted by law. Type "show copying"
>> and "show warranty" for details.
>> This GDB was configured as "i686-pc-linux-gnu"...
>> crash: read error: kernel virtual address: c0787540 type: "possible"
>> WARNING: cannot read cpu_possible_map
>> crash: read error: kernel virtual address: c06fffe0 type: "online"
>> WARNING: cannot read cpu_online_map
>> crash: read error: kernel virtual address: c068a340 type: "system_utsname"
>> crash: vmlinux and redhat_5.5_32_hvm_8G.core do not match!
> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>
> Are you sure that you use correct vmlinux file for this vmcore file?
> If yes, could you put both vmcore files (good and bad one) and vmlinux file somwhere for download? I will try to look at this issue in week or two.
>
> Daniel
> _______________________________________________
> Xen-devel mailing list
> Xen-devel(a)lists.xen.org
> http://lists.xen.org/xen-devel
10 years, 5 months
[ANNOUNCE] crash 7.0.7 is available
by Dave Anderson
Download from: http://people.redhat.com/anderson
or
https://github.com/crash-utility/crash/releases
The master branch serves as a development branch that will contain all
patches that are queued for the next release:
$ git clone git://github.com/crash-utility/crash.git
Changelog:
- Export the static ELF and compressed kdump vmcoreinfo_read_string()
functions from netdump.c and kdump.c via a new read_vmcoreinfo()
method in the global program_context structure. The function
get_log_from_vmcoreinfo() will access vmcoreinfo data via the
new pointer instead of requiring its callers to pass pointers to
their dumpfile-specific function.
(anderson(a)redhat.com)
- Linux 3.15 and later kernels configured with CONFIG_RANDOMIZE_BASE
can be now be readily identified because of new kernel symbols that
have been added. For those kernels, the new "--kaslr=<offset>"
and/or "--kaslr=auto" options are not necessary for ELF or compressed
kdump vmcores, or for live systems that have /proc/kallsyms showing
the relocated symbol values. A new KASLR initialization function
called kaslr_init() is now called by symtab_init() prior to the
initial symbol-sorting operation. If kaslr_init() determines that
KASLR may be in effect, it will trigger a search for the relevant
vmlinux symbols during the sorting operation, which in turn will
cause the relocation value to be automatically calculated.
(anderson(a)redhat.com)
- Implemented a new "bt -c cpu(s)" option to display the backtrace
of the active task on one or more cpus. The cpus must be specified
in a comma- and/or dash-separated list; for examples ""3", "1,8,9",
"1-23", or "1,8,9-14". Similar to "bt -a", the option is only
applicable with crash dumps.
(atomlin(a)redhat.com)
- Fix for Linux 3.11 and later ARM kernels, in which all non-panicking
cpus offline themselves during a kdump procedure. This causes an
invalid cpu count determination during crash session initialization
from an ARM vmcore. The patch utilizes the cpu count found in the
cpu_active_map if it is greater than the count in the cpu_online_map.
In addition, the maximum NR_CPUS value for the ARM architecture has
been raised from 4 to 32.
(sdu.liu(a)huawei.com)
- Fix for the X86_64 "bt" command on Linux 3.3 and later kernels to
properly display exception frame register contents on NMI stacks.
Kernel commit 3f3c8b8c4b2a34776c3470142a7c8baafcda6eb0 added 12 more
values to the NMI exception stack to handle nested NMIs caused by
page faults or breakpoints that could occur while handling an NMI
exception. The fix has two parts:
1. Determine if this kernel has the nested NMI layout and set a
machine-specific flag (NESTED_NMI) if it does.
2. When backtracing an NMI stack, use the saved values instead of
those found at the top of stack.
Kernel commit 28696f434fef0efa97534b59986ad33b9c4df7f8 changed
the stack layout again, swapping the location of the "saved" and
"copied" registers. This can be detected automatically, because the
"copied" registers contain either a copy of the "saved" registers,
or point to "repeat_nmi". So, if "repeat_nmi" is found as the return
address, assume that this is the old layout, and adjust the stack
pointer again. Without the patch, incorrect register values are
displayed in the exception frame dump in the NMI stack backtrace.
(ptesarik(a)suse.cz)
- Fix for the built-in "g" alias, which apparently has not worked
correctly since crash-5.1.4. Without the patch, if the "g" alias
and the first argument are separated by one space, then the first
first character of that argument would get stripped prior to being
passed to the embedded gdb module.
(anderson(a)redhat.com)
- Removed the BASELEVEL_REVISION string from defs.h, which serves no
purpose since the deprecation of the remote daemon, and typically
has been out of sync with the crash version.
(anderson(a)redhat.com)
- Fix for the "p", "irq", "struct", "union" and "*" commands if a
cpu specification contains an invalid cpu number. Without the
patch, a segmentation violation may be generated.
(anderson(a)redhat.com)
- Implemented a new capability for the "ptov" command that takes a
per-cpu offset and cpu specification argument and translates it
into the kernel virtual addresses for the cpus specified.
(anderson(a)redhat.com)
- Implemented a new "ps -m" option that is a similar, complementary
option to "ps -l", but which translates the task timestamp value from
a decimal or hexadecimal nanoseconds value into a more human-readable
string consisting of the number of days, hours, minutes, seconds and
milliseconds that have elapsed since the task started executing on a
cpu. More accurately described, it is the time difference between
the timestamp copied from the per-cpu runqueue clock when the task
last started executing compared to the most current value of the
per-cpu runqueue clock.
(anderson(a)redhat.com, bud.brown(a)redhat.com)
- In addition, a new "ps -C <cpu-specifier>" option has been added
that can only be used with "ps -l" and "ps -m", which sorts the
global task list into per-cpu blocks; the cpu-specifier uses the
standard comma or dash separated list, expressed as "-C 1,3,5",
"-C 1-3", "-C 1,3,5-7,10", or "-Call" or "-Ca" for all cpus.
(anderson(a)redhat.com)
- Implemented a new "runq -m" option that is a simliar, complementary
option to "runq -t", but which displays the amount of time that the
active task on each cpu has been running, expressed in a format
consisting of days, hours, minutes, seconds and milliseconds.
(anderson(a)redhat.com)
- Implemented a new "kmem -h" option that displays the address of
each hugepage hstate array entry, its hugepage size, its free and
total counts, and name string.
(anderson(a)redhat.com)
- Implemented a new "ps -S" option that displays a summary consisting
of the number of tasks in a task state.
(anderson(a)redhat.com)
- Fix for the "arguments-input-file" feature to protect against a
called command modifying an argument string. For example, the
"struct" command modifies "-l struct_name.member" argument strings,
and so without the patch, all iterative calls after the first one
will fail.
(anderson(a)redhat.com)
- Fix failure to build from source when compiling the crash utility
with gcc-4.9. Without the patch, the crash utility build generates
the following error:
In file included from opncls.c:26:0:
opncls.c: In function 'bfd_fopen':
bfd.h:529:65: error: right-hand operand of comma expression has no
effect [-Werror=unused-value]
#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE)
^
opncls.c:263:5: note: in expansion of macro 'bfd_set_cacheable'
bfd_set_cacheable (nbfd, TRUE);
cc1: all warnings being treated as errors
(anderson(a)redhat.com, anatol.pomozov(a)gmail.com)
- Fix for displaying enum values that are greater than 32-bits in
size. Without the patch, the upper 32-bits are clipped off and
displayed as integer-sized value.
(anderson(a)redhat.com)
- If the kernel (live or dumpfile) has the "kpatch" module installed,
the tag "[KPATCH]" will be displayed next to the kernel name in the
initial system banner and by the "sys" command.
(anderson(a)redhat.com)
- Fix for the "DEBUG KERNEL:" display in the initial system banner
and by the "sys" command when using a System.map file with a
Linux 3.0 and later debug kernel. Without the patch, the kernel
version is not displayed in parentheses following the debug kernel
name.
(anderson(a)redhat.com)
- If the gdb-<version>.patch file has changed and a rebuild is being
done from within a previously-existing build tree, "patch -N" the
gdb sources, and start the rebuild from the gdb-<version> directory
instead of the gdb-<version>/gdb directory.
(anderson(a)redhat.com)
- Fix to prevent a possible segmentation violation generated by the
"runq -g" command when run on a very active live system due to an
active task on a cpu exiting while the command is running.
(anderson(a)redhat.com)
- Fix for the "runq -g" command on Linux 3.15 and later kernels, where
the cgroup_name() function now utilizes kernfs_name(). Without the
patch, the command fails with the error message "runq: invalid
structure member offset: cgroup_dentry".
(anderson(a)redhat.com)
- Fix for the "extend" command when running with an x86_64 crash binary
that was built with "make target=ARM64" in order to analyze ARM64
dumpfiles on an x86_64 host. Without the patch, if the extend
command is used with an extension module built in the same manner,
it fails with the message "extend: <module>.so: not an ELF format
object file".
(Jan.Karlsson(a)sonymobile.com)
- Introduce support for 32-bit ARM kernels that are configured with
CONFIG_ARM_LPAE. The patch implements the virtual-to-physical
address translation of 64-bit PTEs used by ARM LPAE kernels.
(sdu.liu(a)huawei.com, weijitao(a)huawei.com)
10 years, 5 months
Cannot load extensions for ARM64
by Karlsson, Jan
Hi Dave
I found a problem that crash built for ARM64 and running on an X86_64 cannot load extensions as the type for the shared object is not accepted. There is a fix for this for ARM running on X86 that has to be introduced in case as well.
Function is_shared_object in file symbol.c
case EM_X86_64:
if (machine_type("X86_64"))
return TRUE;
break;
should be changed to
case EM_X86_64:
if (machine_type("X86_64") || machine_type("ARM64"))
return TRUE;
break;
Jan
Jan Karlsson
Senior Software Engineer
System Assurance
Sony Mobile Communications
Tel: +46 703 062 174
jan.karlsson(a)sonymobile.com<mailto:Firstname.Lastname@sonymobile.com>
sonymobile.com<http://sonymobile.com/>
[cid:image001.gif@01CF809F.81560F50]
10 years, 5 months
[PATCH] crash: ARM: support LPAE
by Liu Hua
Hi Dave,
This patch introduces LPAE support for ARM32 platform.
main description:
(1) identify LPAE enabled vmcores:
PG_DIR_SIZE(LPAE:20K,other:16K)
(2) section mapping size changed to 2MiB
(3) virtual to physical address converting:
(a)
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 512
#define PTRS_PER_PGD 4
(b) Each ptr are 8-bit long, so we need sevel new
macros to deal with them: FILL_PGD_LPAE ...
(c) arm_translate_pte changed as x86_translate_pte
(d) arm_lpae_vtop doing this converting
Signed-off-by: Wei Jitao <weijitao(a)huawei.com>
Signed-off-by: Liu Hua <sdu.liu(a)huawei.com>
---
arm.c | 134 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------
defs.h | 77 +++++++++++++++++++++++++++++++++++++
2 files changed, 200 insertions(+), 11 deletions(-)
diff --git a/arm.c b/arm.c
index 84dd3ec..001fe6b 100644
--- a/arm.c
+++ b/arm.c
@@ -83,6 +83,7 @@ static struct arm_pt_regs *panic_task_regs;
#define PMD_TYPE_MASK 3
#define PMD_TYPE_SECT 2
#define PMD_TYPE_TABLE 1
+#define PMD_TYPE_SECT_LPAE 1
static inline ulong *
pmd_page_addr(ulong pmd)
@@ -219,12 +220,19 @@ arm_init(int when)
case PRE_GDB:
if ((machdep->pgd = (char *)malloc(PGDIR_SIZE())) == NULL)
error(FATAL, "cannot malloc pgd space.");
+ if ((machdep->pmd = (char *)malloc(PMDSIZE())) == NULL)
+ error(FATAL, "cannot malloc pmd space.");
if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL)
error(FATAL, "cannot malloc ptbl space.");
/*
- * Kernel text starts 16k after PAGE_OFFSET.
+ *LPAE requires an additional page for the PGD;
+ *So PG_DIR_SIZE = 0x5000 for LPAE
*/
+
+ if (symbol_value("_text") - symbol_value("swapper_pg_dir")
+ == 0x5000)
+ machdep->flags |= PAE;
machdep->kvbase = symbol_value("_stext") & ~KVBASE_MASK;
machdep->identity_map_base = machdep->kvbase;
machdep->is_kvaddr = arm_is_kvaddr;
@@ -269,9 +277,13 @@ arm_init(int when)
if (THIS_KERNEL_VERSION >= LINUX(3,3,0) ||
symbol_exists("idmap_pgd"))
machdep->flags |= IDMAP_PGD;
-
- machdep->section_size_bits = _SECTION_SIZE_BITS;
- machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
+ if (machdep->flags & PAE) {
+ machdep->section_size_bits = _SECTION_SIZE_BITS_LPAE;
+ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_LPAE;
+ } else {
+ machdep->section_size_bits = _SECTION_SIZE_BITS;
+ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS;
+ }
if (symbol_exists("irq_desc"))
ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc,
@@ -834,24 +846,32 @@ arm_processor_speed(void)
* is passed in, don't print anything.
*/
static int
-arm_translate_pte(ulong pte, void *physaddr, ulonglong pae_pte)
+arm_translate_pte(ulong pte, void *physaddr, ulonglong lpae_pte)
{
char ptebuf[BUFSIZE];
char physbuf[BUFSIZE];
char buf[BUFSIZE];
int page_present;
- ulong paddr;
+ ulonglong paddr;
int len1, len2, others;
+ if (machdep->flags & PAE) {
+ paddr = LPAE_PAGEBASE(lpae_pte);
+ sprintf(ptebuf, "%llx", lpae_pte);
+ pte = (ulong)lpae_pte;
+ } else {
+ paddr = PAGEBASE(pte);
+ sprintf(ptebuf, "%lx", pte);
+ }
page_present = pte_present(pte);
- paddr = PAGEBASE(pte);
-
if (physaddr) {
- *((ulong *)physaddr) = paddr;
+ if (machdep->flags & PAE)
+ *((ulonglong *)physaddr) = paddr;
+ else
+ *((ulong *)physaddr) = (ulong)paddr;
return page_present;
}
- sprintf(ptebuf, "%lx", pte);
len1 = MAX(strlen(ptebuf), strlen("PTE"));
fprintf(fp, "%s ", mkstring(buf, len1, CENTER | LJUST, "PTE"));
@@ -860,7 +880,7 @@ arm_translate_pte(ulong pte, void *physaddr, ulonglong pae_pte)
return page_present;
}
- sprintf(physbuf, "%lx", paddr);
+ sprintf(physbuf, "%llx", paddr);
len2 = MAX(strlen(physbuf), strlen("PHYSICAL"));
fprintf(fp, "%s ", mkstring(buf, len2, CENTER | LJUST, "PHYSICAL"));
@@ -1049,6 +1069,91 @@ arm_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose)
}
/*
+ *Virtual to physical memory translation when "CONFIG_ARM_LPAE=y".
+ *This function will be called by both arm_kvtop() and arm_uvtop().
+ */
+static int
+arm_lpae_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose)
+{
+ char buf[BUFSIZE];
+ physaddr_t page_dir;
+ physaddr_t page_middle;
+ physaddr_t page_table;
+ pgd_t pgd_pmd;
+ pmd_t pmd_pte;
+ pte_t pte;
+
+ if (verbose)
+ fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd);
+
+ /*
+ * pgd_offset(pgd, vaddr)
+ */
+ page_dir = LPAE_VTOP((ulong)pgd + LPAE_PGD_OFFSET(vaddr) * 8);
+ FILL_PGD_LPAE(LPAE_VTOP(pgd), PHYSADDR, LPAE_PGDIR_SIZE());
+ pgd_pmd = ULONGLONG(machdep->pgd + LPAE_PGDIR_OFFSET(page_dir));
+
+ if (verbose)
+ fprintf(fp, " PGD: %8llx => %llx\n",
+ (ulonglong)page_dir, pgd_pmd);
+
+ if (!pgd_pmd)
+ return FALSE;
+
+ /*
+ * pmd_offset(pgd, vaddr)
+ */
+ page_middle = LPAE_PAGEBASE(pgd_pmd) + LPAE_PMD_OFFSET(vaddr) * 8;
+ FILL_PMD_LPAE(LPAE_PAGEBASE(pgd_pmd), PHYSADDR, LPAE_PMDIR_SIZE());
+ pmd_pte = ULONGLONG(machdep->pmd + LPAE_PMDIR_OFFSET(page_middle));
+
+ if (!pmd_pte)
+ return FALSE;
+
+ if ((pmd_pte & PMD_TYPE_MASK) == PMD_TYPE_SECT_LPAE) {
+ ulonglong sectionbase = LPAE_PAGEBASE(pmd_pte)
+ & LPAE_SECTION_PAGE_MASK;
+
+ if (verbose) {
+ fprintf(fp, " PAGE: %8llx (2MB)\n\n",
+ (ulonglong)sectionbase);
+ }
+
+ *paddr = sectionbase + (vaddr & ~LPAE_SECTION_PAGE_MASK);
+ return TRUE;
+ }
+ /*
+ * pte_offset_map(pmd, vaddr)
+ */
+ page_table = LPAE_PAGEBASE(pmd_pte) + PTE_OFFSET(vaddr) * 8;
+ FILL_PTBL_LPAE(LPAE_PAGEBASE(pmd_pte), PHYSADDR, LPAE_PTEDIR_SIZE());
+ pte = ULONGLONG(machdep->ptbl + LPAE_PTEDIR_OFFSET(page_table));
+
+ if (verbose) {
+ fprintf(fp, " PTE: %8llx => %llx\n\n",
+ (ulonglong)page_table, pte);
+ }
+
+ if (!pte_present(pte)) {
+ if (pte && verbose) {
+ fprintf(fp, "\n");
+ arm_translate_pte(0, 0, pte);
+ }
+ return FALSE;
+ }
+
+ *paddr = LPAE_PAGEBASE(pte) + PAGEOFFSET(vaddr);
+
+ if (verbose) {
+ fprintf(fp, " PAGE: %s\n\n",
+ mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX,
+ MKSTR(PAGEBASE(pte))));
+ arm_translate_pte(0, 0, pte);
+ }
+ return TRUE;
+}
+
+/*
* Translates a user virtual address to its physical address. cmd_vtop() sets
* the verbose flag so that the pte translation gets displayed; all other
* callers quietly accept the translation.
@@ -1098,6 +1203,9 @@ arm_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose)
FAULT_ON_ERROR);
}
+ if (machdep->flags & PAE)
+ return arm_lpae_vtop(uvaddr, pgd, paddr, verbose);
+
return arm_vtop(uvaddr, pgd, paddr, verbose);
}
@@ -1123,6 +1231,10 @@ arm_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose)
return TRUE;
}
+ if (machdep->flags & PAE)
+ return arm_lpae_vtop(kvaddr,
+ (ulong *)vt->kernel_pgd[0], paddr, verbose);
+
return arm_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose);
}
diff --git a/defs.h b/defs.h
index 0ae4e48..44df6ae 100644
--- a/defs.h
+++ b/defs.h
@@ -2647,6 +2647,80 @@ struct load_module {
#define _SECTION_SIZE_BITS 28
#define _MAX_PHYSMEM_BITS 32
+/*add for LPAE*/
+typedef unsigned long long u64;
+typedef signed int s32;
+typedef u64 pgd_t;
+typedef u64 pmd_t;
+typedef u64 pte_t;
+
+#define PMDSIZE() (PAGESIZE())
+#define LPAE_PGDIR_SHIFT (30)
+#define LPAE_PMDIR_SHIFT (21)
+
+#define LPAE_PGD_OFFSET(vaddr) ((vaddr) >> LPAE_PGDIR_SHIFT)
+#define LPAE_PMD_OFFSET(vaddr) (((vaddr) >> LPAE_PMDIR_SHIFT) & \
+ ((1<<(LPAE_PGDIR_SHIFT-LPAE_PMDIR_SHIFT))-1))
+
+#define _SECTION_SIZE_BITS_LPAE 28
+#define _MAX_PHYSMEM_BITS_LPAE 36
+
+/*
+ * #define PTRS_PER_PTE 512
+ * #define PTRS_PER_PMD 512
+ * #define PTRS_PER_PGD 4
+ *
+ */
+
+#define LPAE_PGDIR_SIZE() 32
+#define LPAE_PGDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PGDIR_SIZE() - 1))
+
+#define LPAE_PMDIR_SIZE() 4096
+#define LPAE_PMDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PMDIR_SIZE() - 1))
+
+#define LPAE_PTEDIR_SIZE() 4096
+#define LPAE_PTEDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PTEDIR_SIZE() - 1))
+
+/*section size for LPAE is 2MiB*/
+#define LPAE_SECTION_PAGE_MASK (~((MEGABYTES(2))-1))
+
+#define _PHYSICAL_MASK_LPAE ((1ULL << _MAX_PHYSMEM_BITS_LPAE) - 1)
+#define PAGE_BASE_MASK ((u64)((s32)machdep->pagemask & _PHYSICAL_MASK_LPAE))
+#define LPAE_PAGEBASE(X) (((ulonglong)(X)) & PAGE_BASE_MASK)
+
+#define LPAE_VTOP(X) \
+ ((unsigned long long)(unsigned long)(X) - \
+ (machdep->kvbase) + (machdep->machspec->phys_base))
+
+#define IS_LAST_PGD_READ_LPAE(pgd) ((pgd) == \
+ machdep->machspec->last_pgd_read_lpae)
+#define IS_LAST_PMD_READ_LPAE(pmd) ((pmd) == \
+ machdep->machspec->last_pmd_read_lpae)
+#define IS_LAST_PTBL_READ_LPAE(ptbl) ((ptbl) == \
+ machdep->machspec->last_ptbl_read_lpae)
+
+#define FILL_PGD_LPAE(PGD, TYPE, SIZE) \
+ if (!IS_LAST_PGD_READ_LPAE(PGD)) { \
+ readmem((ulonglong)(PGD), TYPE, machdep->pgd, \
+ SIZE, "pmd page", FAULT_ON_ERROR); \
+ machdep->machspec->last_pgd_read_lpae \
+ = (ulonglong)(PGD); \
+ }
+#define FILL_PMD_LPAE(PMD, TYPE, SIZE) \
+ if (!IS_LAST_PMD_READ_LPAE(PMD)) { \
+ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \
+ SIZE, "pmd page", FAULT_ON_ERROR); \
+ machdep->machspec->last_pmd_read_lpae \
+ = (ulonglong)(PMD); \
+ }
+
+#define FILL_PTBL_LPAE(PTBL, TYPE, SIZE) \
+ if (!IS_LAST_PTBL_READ_LPAE(PTBL)) { \
+ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \
+ SIZE, "page table", FAULT_ON_ERROR); \
+ machdep->machspec->last_ptbl_read_lpae \
+ = (ulonglong)(PTBL); \
+ }
#endif /* ARM */
#ifndef EM_AARCH64
@@ -4979,6 +5053,9 @@ struct machine_specific {
ulong kernel_text_end;
ulong exception_text_start;
ulong exception_text_end;
+ ulonglong last_pgd_read_lpae;
+ ulonglong last_pmd_read_lpae;
+ ulonglong last_ptbl_read_lpae;
struct arm_pt_regs *crash_task_regs;
int unwind_index_prel31;
};
--
1.9.0
10 years, 5 months