Yes it should. Good catch.
Shouldn't the mask in this block of code be "relocate & 0xFFF"?+ *To avoid mistaking an mismatched kernel version with
+ * a kaslr offset, we make sure that the offset is
+ * aligned by 0x1000, as it always will be for
+ * kaslr.
+ */
+ if ((relocate & 0x1000) == 0) {
+ kt->relocate = relocate;
+ kt->flags |= RELOC_SET;
+ }On Tue, Feb 18, 2014 at 4:56 PM, Andy Honig <ahonig@google.com> wrote:
Automatically detext kernel aslr offset
This patch improves support for kernel aslr, to automatically find the
aslr offset based on the location of the _stext symbol in the vmcore
info.
Signed-off-by: Andrew Honig <ahonig@google.com>
---
netdump.c | 19 ++++++++-----------
symbols.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++---
2 files changed, 54 insertions(+), 14 deletions(-)
diff --git a/netdump.c b/netdump.c
index 8e7ec15..b327649 100644
--- a/netdump.c
+++ b/netdump.c
@@ -411,18 +411,15 @@ is_netdump(char *file, ulong source_query)
get_log_from_vmcoreinfo(file, vmcoreinfo_read_string);
}
- // This is the code where I should read the aslr offset.
+ /*
+ *We may need the _stext_SYMBOL from the vmcore_info to adjust for
+ * kaslr and we may not have gotten it elsewhere.
+ */
if (source_query == KDUMP_LOCAL) {
- long aslr_offset = 0;
- char *aslr_string = vmcoreinfo_read_string("KERNELOFFSET");
- if (aslr_string) {
- aslr_offset = strtoul(aslr_string, NULL, 16);
- free (aslr_string);
- }
- if (!(kt->flags & RELOC_SET) && aslr_offset > 0) {
- kt->flags |= RELOC_SET;
- kt->relocate=aslr_offset * -1;
- }
+ char *tmpstring = vmcoreinfo_read_string("SYMBOL(_stext)");
+ kt->vmcoreinfo._stext_SYMBOL =
+ htol(tmpstring, RETURN_ON_ERROR, NULL);
+ free(tmpstring);
}
return nd->header_size;
diff --git a/symbols.c b/symbols.c
index d5f8199..afe5ed0 100755
--- a/symbols.c
+++ b/symbols.c
@@ -553,6 +553,43 @@ strip_symbol_end(const char *name, char *buf)
}
/*
+ * Derives the kernel aslr offset by comparing the _stext symbol from the
+ * the vmcore_info in the dump file to the _stext symbol in the vmlinux file.
+ */
+static void
+derive_kaslr_offset(bfd *abfd, int dynamic, bfd_byte *start, bfd_byte *end,
+ unsigned int size, asymbol *store)
+{
+ symbol_info syminfo;
+ asymbol *sym;
+ char *name;
+ unsigned long relocate;
+ char buf[BUFSIZE];
+
+ for (; start < end; start += size) {
+ sym = bfd_minisymbol_to_symbol(abfd, dynamic, start, store);
+ if (sym == NULL)
+ error(FATAL, "bfd_minisymbol_to_symbol() failed\n");
+
+ bfd_get_symbol_info(abfd, sym, &syminfo);
+ name = strip_symbol_end(syminfo.name, buf);
+ if (strcmp("_stext", name) == 0) {
+ relocate = syminfo.value - kt->vmcoreinfo._stext_SYMBOL;
+ /*
+ *To avoid mistaking an mismatched kernel version with
+ * a kaslr offset, we make sure that the offset is
+ * aligned by 0x1000, as it always will be for
+ * kaslr.
+ */
+ if ((relocate & 0x1000) == 0) {
+ kt->relocate = relocate;
+ kt->flags |= RELOC_SET;
+ }
+ }
+ }
+}
+
+/*
* Store the symbols gathered by symtab_init(). The symbols are stored
* in increasing numerical order.
*/
@@ -588,15 +625,21 @@ store_symbols(bfd *abfd, int dynamic, void *minisyms, long symcount,
st->symcnt = 0;
sp = st->symtable;
+ first = 0;
+ from = (bfd_byte *) minisyms;
+ fromend = from + symcount * size;
+
if (machine_type("X86") || machine_type("X86_64")) {
+ /* If kernel aslr offset has not been set, try to guess it. */
+ if (kt->relocate == 0)
+ derive_kaslr_offset(abfd, dynamic, from,
+ fromend, size, store);
+
if (!(kt->flags & RELOC_SET))
kt->flags |= RELOC_FORCE;
} else
kt->flags &= ~RELOC_SET;
- first = 0;
- from = (bfd_byte *) minisyms;
- fromend = from + symcount * size;
for (; from < fromend; from += size)
{
if ((sym = bfd_minisymbol_to_symbol(abfd, dynamic, from, store))
--
1.9.0.rc1.175.g0b1dcb5
--
Crash-utility mailing list
Crash-utility@redhat.com
https://www.redhat.com/mailman/listinfo/crash-utility
--Kurtis RaderCaretaker of the exceptional canines Junior and Hank
--
Crash-utility mailing list
Crash-utility@redhat.com
https://www.redhat.com/mailman/listinfo/crash-utility