[PATCH v2] Improve the performance of symbol searching for kernel modules
by Tao Liu
Currently the sequence for crash searching a symbol is: 1) kernel symname
hash table, 2) iterate all kernel symbols, 3) iterate all kernel modules
and their symbols. In the worst case, if a non-exist symbol been searched,
all 3 stages will be went through. The time consuming status for each stage
is like:
stage 1 stage 2 stage 3
0.007000(ms) 0.593000(ms) 2.421000(ms)
stage 3 takes too much time when comparing to stage 1. So let's introduce a
symname hash table for kernel modules to improve the performance of symbol
searching.
Signed-off-by: Tao Liu <ltao(a)redhat.com>
---
v1 -> v2:
- Removed unused variables within the modified functions.
---
defs.h | 1 +
kernel.c | 1 +
symbols.c | 189 +++++++++++++++++++++++++++++++-----------------------
3 files changed, 111 insertions(+), 80 deletions(-)
diff --git a/defs.h b/defs.h
index eb1c71b..58b8546 100644
--- a/defs.h
+++ b/defs.h
@@ -2751,6 +2751,7 @@ struct symbol_table_data {
double val_hash_searches;
double val_hash_iterations;
struct syment *symname_hash[SYMNAME_HASH];
+ struct syment *mod_symname_hash[SYMNAME_HASH];
struct symbol_namespace kernel_namespace;
struct syment *ext_module_symtable;
struct syment *ext_module_symend;
diff --git a/kernel.c b/kernel.c
index 36fdea2..c56cc34 100644
--- a/kernel.c
+++ b/kernel.c
@@ -4663,6 +4663,7 @@ reinit_modules(void)
kt->mods_installed = 0;
clear_text_value_cache();
+ memset(st->mod_symname_hash, 0, sizeof(st->mod_symname_hash));
module_init();
}
diff --git a/symbols.c b/symbols.c
index bf6d94d..9b64734 100644
--- a/symbols.c
+++ b/symbols.c
@@ -64,8 +64,9 @@ static int namespace_ctl(int, struct symbol_namespace *, void *, void *);
static void symval_hash_init(void);
static struct syment *symval_hash_search(ulong);
static void symname_hash_init(void);
-static void symname_hash_install(struct syment *);
-static struct syment *symname_hash_search(char *);
+static void symname_hash_install(struct syment *[], struct syment *);
+static void symname_hash_remove(struct syment *[], struct syment *);
+static struct syment *symname_hash_search(struct syment *[], char *, int (*)(struct syment *, char *));
static void gnu_qsort(bfd *, void *, long, unsigned int, asymbol *, asymbol *);
static int check_gnu_debuglink(bfd *);
static int separate_debug_file_exists(const char *, unsigned long, int *);
@@ -1119,7 +1120,7 @@ symname_hash_init(void)
struct syment *sp;
for (sp = st->symtable; sp < st->symend; sp++)
- symname_hash_install(sp);
+ symname_hash_install(st->symname_hash, sp);
if ((sp = symbol_search("__per_cpu_start")))
st->__per_cpu_start = sp->value;
@@ -1127,21 +1128,48 @@ symname_hash_init(void)
st->__per_cpu_end = sp->value;
}
+static void
+mod_symtable_hash_install_range(struct syment *from, struct syment *to)
+{
+ struct syment *sp;
+
+ for (sp = from; sp <= to; sp++) {
+ if (sp != NULL) {
+ symname_hash_install(st->mod_symname_hash, sp);
+ }
+ }
+}
+
+static void
+mod_symtable_hash_remove_range(struct syment *from, struct syment *to)
+{
+ struct syment *sp;
+
+ for (sp = from; sp <= to; sp++) {
+ if (sp != NULL) {
+ symname_hash_remove(st->mod_symname_hash, sp);
+ }
+ }
+}
+
/*
* Install a single static kernel symbol into the symname_hash.
*/
static void
-symname_hash_install(struct syment *spn)
+symname_hash_install(struct syment *table[], struct syment *spn)
{
struct syment *sp;
int index;
index = SYMNAME_HASH_INDEX(spn->name);
+ index = index > 0 ? index : -index;
+
spn->cnt = 1;
- if ((sp = st->symname_hash[index]) == NULL)
- st->symname_hash[index] = spn;
- else {
+ if ((sp = table[index]) == NULL) {
+ table[index] = spn;
+ spn->name_hash_next = NULL;
+ } else {
while (sp) {
if (STREQ(sp->name, spn->name)) {
sp->cnt++;
@@ -1151,23 +1179,67 @@ symname_hash_install(struct syment *spn)
sp = sp->name_hash_next;
else {
sp->name_hash_next = spn;
+ spn->name_hash_next = NULL;
break;
}
}
}
}
+static void
+symname_hash_remove(struct syment *table[], struct syment *spn)
+{
+ struct syment *sp, **tmp;
+ int index, first_encounter = 1;
+
+ index = SYMNAME_HASH_INDEX(spn->name);
+ index = index > 0 ? index : -index;
+
+ if ((sp = table[index]) == NULL)
+ return;
+
+ for (tmp = &table[index], sp = table[index]; sp; ) {
+ if (STREQ(sp->name, spn->name)) {
+ if (sp != spn) {
+ sp->cnt--;
+ spn->cnt--;
+ } else if (!first_encounter) {
+ sp->cnt--;
+ } else {
+ *tmp = sp->name_hash_next;
+ first_encounter = 0;
+
+ tmp = &(*tmp)->name_hash_next;
+ sp = sp->name_hash_next;
+ spn->name_hash_next = NULL;
+ continue;
+ }
+ }
+ tmp = &sp->name_hash_next;
+ sp = sp->name_hash_next;
+ }
+}
+
/*
* Static kernel symbol value search
*/
static struct syment *
-symname_hash_search(char *name)
+symname_hash_search(struct syment *table[], char *name,
+ int (*skip_condition)(struct syment *, char *))
{
struct syment *sp;
+ int index;
- sp = st->symname_hash[SYMNAME_HASH_INDEX(name)];
+ index = SYMNAME_HASH_INDEX(name);
+ index = index > 0 ? index : -index;
+ sp = table[index];
while (sp) {
+ if (skip_condition && skip_condition(sp, name)) {
+ sp = sp->name_hash_next;
+ continue;
+ }
+
if (STREQ(sp->name, name))
return sp;
sp = sp->name_hash_next;
@@ -1595,6 +1667,7 @@ store_module_symbols_v1(ulong total, int mods_installed)
lm->mod_symend = sp;
}
}
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
}
st->flags |= MODULE_SYMS;
@@ -2075,6 +2148,8 @@ store_module_symbols_v2(ulong total, int mods_installed)
lm->mod_init_symend = sp;
}
}
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
+ mod_symtable_hash_install_range(lm->mod_init_symtable, lm->mod_init_symend);
}
st->flags |= MODULE_SYMS;
@@ -4482,6 +4557,16 @@ symbol_query(char *s, char *print_pad, struct syment **spp)
return(cnt);
}
+static int
+skip_symbols(struct syment *sp, char *s)
+{
+ int pseudos, skip = 0;
+ pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_") ||
+ strstr(s, "_MODULE_INIT_START_") || strstr(s, "_MODULE_INIT_END_"));
+ if (!pseudos && MODULE_PSEUDO_SYMBOL(sp))
+ skip = 1;
+ return skip;
+}
/*
* Return the syment of a symbol.
@@ -4489,58 +4574,16 @@ symbol_query(char *s, char *print_pad, struct syment **spp)
struct syment *
symbol_search(char *s)
{
- int i;
- struct syment *sp_hashed, *sp, *sp_end;
- struct load_module *lm;
- int pseudos, search_init;
+ struct syment *sp_hashed, *sp;
- sp_hashed = symname_hash_search(s);
+ sp_hashed = symname_hash_search(st->symname_hash, s, NULL);
for (sp = sp_hashed ? sp_hashed : st->symtable; sp < st->symend; sp++) {
if (STREQ(s, sp->name))
return(sp);
}
- pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_"));
- search_init = FALSE;
-
- for (i = 0; i < st->mods_installed; i++) {
- lm = &st->load_modules[i];
- if (lm->mod_flags & MOD_INIT)
- search_init = TRUE;
- sp = lm->mod_symtable;
- sp_end = lm->mod_symend;
-
- for ( ; sp <= sp_end; sp++) {
- if (!pseudos && MODULE_PSEUDO_SYMBOL(sp))
- continue;
- if (STREQ(s, sp->name))
- return(sp);
- }
- }
-
- if (!search_init)
- return((struct syment *)NULL);
-
- pseudos = (strstr(s, "_MODULE_INIT_START_") || strstr(s, "_MODULE_INIT_END_"));
-
- for (i = 0; i < st->mods_installed; i++) {
- lm = &st->load_modules[i];
- if (!lm->mod_init_symtable)
- continue;
- sp = lm->mod_init_symtable;
- sp_end = lm->mod_init_symend;
-
- for ( ; sp < sp_end; sp++) {
- if (!pseudos && MODULE_PSEUDO_SYMBOL(sp))
- continue;
-
- if (STREQ(s, sp->name))
- return(sp);
- }
- }
-
- return((struct syment *)NULL);
+ return symname_hash_search(st->mod_symname_hash, s, skip_symbols);
}
/*
@@ -5432,33 +5475,13 @@ value_symbol(ulong value)
int
symbol_exists(char *symbol)
{
- int i;
- struct syment *sp, *sp_end;
- struct load_module *lm;
+ struct syment *sp;
- if ((sp = symname_hash_search(symbol)))
+ if ((sp = symname_hash_search(st->symname_hash, symbol, NULL)))
return TRUE;
- for (i = 0; i < st->mods_installed; i++) {
- lm = &st->load_modules[i];
- sp = lm->mod_symtable;
- sp_end = lm->mod_symend;
-
- for ( ; sp < sp_end; sp++) {
- if (STREQ(symbol, sp->name))
- return(TRUE);
- }
-
- if (lm->mod_init_symtable) {
- sp = lm->mod_init_symtable;
- sp_end = lm->mod_init_symend;
-
- for ( ; sp < sp_end; sp++) {
- if (STREQ(symbol, sp->name))
- return(TRUE);
- }
- }
- }
+ if ((sp = symname_hash_search(st->mod_symname_hash, symbol, NULL)))
+ return TRUE;
return(FALSE);
}
@@ -5515,7 +5538,7 @@ kernel_symbol_exists(char *symbol)
{
struct syment *sp;
- if ((sp = symname_hash_search(symbol)))
+ if ((sp = symname_hash_search(st->symname_hash, symbol, NULL)))
return TRUE;
else
return FALSE;
@@ -5527,7 +5550,7 @@ kernel_symbol_exists(char *symbol)
struct syment *
kernel_symbol_search(char *symbol)
{
- return symname_hash_search(symbol);
+ return symname_hash_search(st->symname_hash, symbol, NULL);
}
/*
@@ -12464,8 +12487,10 @@ store_load_module_symbols(bfd *bfd, int dynamic, void *minisyms,
error(INFO, "%s: last symbol: %s is not _MODULE_END_%s?\n",
lm->mod_name, lm->mod_load_symend->name, lm->mod_name);
+ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend);
lm->mod_symtable = lm->mod_load_symtable;
lm->mod_symend = lm->mod_load_symend;
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
lm->mod_flags &= ~MOD_EXT_SYMS;
lm->mod_flags |= MOD_LOAD_SYMS;
@@ -12495,6 +12520,7 @@ delete_load_module(ulong base_addr)
req->name = lm->mod_namelist;
gdb_interface(req);
}
+ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend);
if (lm->mod_load_symtable) {
free(lm->mod_load_symtable);
namespace_ctl(NAMESPACE_FREE,
@@ -12504,6 +12530,7 @@ delete_load_module(ulong base_addr)
unlink_module(lm);
lm->mod_symtable = lm->mod_ext_symtable;
lm->mod_symend = lm->mod_ext_symend;
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
lm->mod_flags &= ~(MOD_LOAD_SYMS|MOD_REMOTE|MOD_NOPATCH);
lm->mod_flags |= MOD_EXT_SYMS;
lm->mod_load_symtable = NULL;
@@ -12532,6 +12559,7 @@ delete_load_module(ulong base_addr)
req->name = lm->mod_namelist;
gdb_interface(req);
}
+ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend);
if (lm->mod_load_symtable) {
free(lm->mod_load_symtable);
namespace_ctl(NAMESPACE_FREE,
@@ -12541,6 +12569,7 @@ delete_load_module(ulong base_addr)
unlink_module(lm);
lm->mod_symtable = lm->mod_ext_symtable;
lm->mod_symend = lm->mod_ext_symend;
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
lm->mod_flags &= ~(MOD_LOAD_SYMS|MOD_REMOTE|MOD_NOPATCH);
lm->mod_flags |= MOD_EXT_SYMS;
lm->mod_load_symtable = NULL;
--
2.29.2
3 years, 3 months
[PATCH 1/1] fix left bit-shift overflow in __exclude_unnecessary_pages()
by Alexander Egorenkov
Whenever the variables compound_order or private become greater than
31, left bit-shift of 1 overflows, and nr_pages becomes zero. If nr_pages
becomes 0 and pages are being excluded at the end of the PFN loop, the
else branch of the last if statement is entered and pfn is decremented by
1 because nr_pages is 0. Finally, this causes the loop variable pfn to
be assigned the same value as before when the next loop iteration begins
which results in an infinite loop.
This issue appeared on s390 64bit architecture with a dump of 16GB RAM.
This is a simple program to demonstrate the primary issue:
void main(void)
{
unsigned long long n;
unsigned long m;
m = 32;
n = 1 << m;
fprintf(stderr, "%llx\n", n);
n = 1UL << m;
fprintf(stderr, "%llx\n", n);
}
Signed-off-by: Alexander Egorenkov <egorenar(a)linux.ibm.com>
---
makedumpfile.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/makedumpfile.c b/makedumpfile.c
index c063267f15bb..863840b13608 100644
--- a/makedumpfile.c
+++ b/makedumpfile.c
@@ -6210,7 +6210,7 @@ __exclude_unnecessary_pages(unsigned long mem_map,
if (OFFSET(page.private) != NOT_FOUND_STRUCTURE)
private = ULONG(pcache + OFFSET(page.private));
- nr_pages = 1 << compound_order;
+ nr_pages = 1UL << compound_order;
pfn_counter = NULL;
/*
@@ -6227,7 +6227,7 @@ __exclude_unnecessary_pages(unsigned long mem_map,
else if ((info->dump_level & DL_EXCLUDE_FREE)
&& info->page_is_buddy
&& info->page_is_buddy(flags, _mapcount, private, _count)) {
- nr_pages = 1 << private;
+ nr_pages = 1UL << private;
pfn_counter = &pfn_free;
}
/*
--
2.31.1
3 years, 3 months
Re: [Crash-utility] arm64: Get CPU registers without crash_notes symbol
by lijiang
Hi, Kazu and James
Thank you for the reminder.
> > Dear Kazu,
> >
> > Sorry for late reply
> > Thanks for your suggestion and I have prepared a V2 patch, please
> > help
> > to review.
>
> ok, I've modified your v2 patch to fix the following compilation
> warning
> and rewrite the commit log, and attached it.
>
> arm64.c: In function ‘arm64_init’:
> arm64.c:3728:35: warning: ‘note’ may be used uninitialized in this
> function [-Wmaybe-uninitialized]
> offset = roundup(offset + note->n_namesz, 4);
> ^~
> Kazu, thanks for your kind help.
> Lianbo, could you review the attached patch?
>
Yes, sure. I have put it in my review queue.
Thanks.
Lianbo
> Thanks,
> Kazu
3 years, 4 months
arm64: Get CPU registers without crash_notes symbol
by James Hsu (徐慶薰)
Dear Crash maintainers,
We want to improve the crash_tool for the case that need offline cpu register info even Kdump without crash_notes.
For one thing, we will prepare a Kdump with all CPU register info in ELF note.
For another thing, we need to modify crash_tool and make it support adding offline cpu register info even Kdump without crash_notes.
We prepare a patch for the case with ARCH=arm64.
It is verified with our kdump without crash_note and can get all cpu register.
Please take your time to review our patch and look forward to receiving your comments.
Patch detail as below
--- crash-7.3.0/arm64.c 2021-04-27 16:01:07.000000000 +0800
+++ crash-7.3.0.mod/arm64.c 2021-06-15 17:13:54.037273227 +0800
@@ -3667,8 +3667,41 @@ arm64_get_crash_notes(void)
ulong *notes_ptrs;
ulong i, found;
- if (!symbol_exists("crash_notes"))
+ if (!symbol_exists("crash_notes")) {
+ if (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE()) {
+ if (!(ms->panic_task_regs = calloc((size_t)kt->cpus, sizeof(struct arm64_pt_regs))))
+ error(FATAL, "cannot calloc panic_task_regs space\n");
+
+ for (i = found = 0; i < kt->cpus; i++) {
+ if (DISKDUMP_DUMPFILE())
+ note = diskdump_get_prstatus_percpu(i);
+ else if (KDUMP_DUMPFILE())
+ note = netdump_get_prstatus_percpu(i);
+ else {
+ error(WARNING, "cpu %d: cannot find NT_PRSTATUS note\n", i);
+ continue;
+ }
+
+ /*
+ * Find correct location of note data. This contains elf_prstatus
+ * structure which has registers etc. for the crashed task.
+ */
+ offset = sizeof(Elf64_Nhdr);
+ offset = roundup(offset + note->n_namesz, 4);
+ p = (char *)note + offset; /* start of elf_prstatus */
+
+ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &ms->panic_task_regs[i],
+ sizeof(struct arm64_pt_regs));
+
+ found++;
+ }
+ }
+ if (!found) {
+ free(ms->panic_task_regs);
+ ms->panic_task_regs = NULL;
+ }
return;
+ }
crash_notes = symbol_value("crash_notes");
Thanks,
James Hsu
3 years, 4 months
[PATCH] Improve the performance of symbol searching for kernel modules
by Tao Liu
Currently the sequence for crash searching a symbol is: 1) kernel symname
hash table, 2) iterate all kernel symbols, 3) iterate all kernel modules
and their symbols. In the worst case, if a non-exist symbol been searched,
all 3 stages will be went through. The time consuming status for each stage
is like:
stage 1 stage 2 stage 3
0.007000(ms) 0.593000(ms) 2.421000(ms)
stage 3 takes too much time when comparing to stage 1. So let's introduce a
symname hash table for kernel modules to improve the performance of symbol
searching.
Signed-off-by: Tao Liu <ltao(a)redhat.com>
---
defs.h | 1 +
kernel.c | 1 +
symbols.c | 180 ++++++++++++++++++++++++++++++++----------------------
3 files changed, 109 insertions(+), 73 deletions(-)
diff --git a/defs.h b/defs.h
index eb1c71b..58b8546 100644
--- a/defs.h
+++ b/defs.h
@@ -2751,6 +2751,7 @@ struct symbol_table_data {
double val_hash_searches;
double val_hash_iterations;
struct syment *symname_hash[SYMNAME_HASH];
+ struct syment *mod_symname_hash[SYMNAME_HASH];
struct symbol_namespace kernel_namespace;
struct syment *ext_module_symtable;
struct syment *ext_module_symend;
diff --git a/kernel.c b/kernel.c
index 36fdea2..c56cc34 100644
--- a/kernel.c
+++ b/kernel.c
@@ -4663,6 +4663,7 @@ reinit_modules(void)
kt->mods_installed = 0;
clear_text_value_cache();
+ memset(st->mod_symname_hash, 0, sizeof(st->mod_symname_hash));
module_init();
}
diff --git a/symbols.c b/symbols.c
index bf6d94d..11b61f0 100644
--- a/symbols.c
+++ b/symbols.c
@@ -64,8 +64,9 @@ static int namespace_ctl(int, struct symbol_namespace *, void *, void *);
static void symval_hash_init(void);
static struct syment *symval_hash_search(ulong);
static void symname_hash_init(void);
-static void symname_hash_install(struct syment *);
-static struct syment *symname_hash_search(char *);
+static void symname_hash_install(struct syment *[], struct syment *);
+static void symname_hash_remove(struct syment *[], struct syment *);
+static struct syment *symname_hash_search(struct syment *[], char *, int (*)(struct syment *, char *));
static void gnu_qsort(bfd *, void *, long, unsigned int, asymbol *, asymbol *);
static int check_gnu_debuglink(bfd *);
static int separate_debug_file_exists(const char *, unsigned long, int *);
@@ -1119,7 +1120,7 @@ symname_hash_init(void)
struct syment *sp;
for (sp = st->symtable; sp < st->symend; sp++)
- symname_hash_install(sp);
+ symname_hash_install(st->symname_hash, sp);
if ((sp = symbol_search("__per_cpu_start")))
st->__per_cpu_start = sp->value;
@@ -1127,21 +1128,48 @@ symname_hash_init(void)
st->__per_cpu_end = sp->value;
}
+static void
+mod_symtable_hash_install_range(struct syment *from, struct syment *to)
+{
+ struct syment *sp;
+
+ for (sp = from; sp <= to; sp++) {
+ if (sp != NULL) {
+ symname_hash_install(st->mod_symname_hash, sp);
+ }
+ }
+}
+
+static void
+mod_symtable_hash_remove_range(struct syment *from, struct syment *to)
+{
+ struct syment *sp;
+
+ for (sp = from; sp <= to; sp++) {
+ if (sp != NULL) {
+ symname_hash_remove(st->mod_symname_hash, sp);
+ }
+ }
+}
+
/*
* Install a single static kernel symbol into the symname_hash.
*/
static void
-symname_hash_install(struct syment *spn)
+symname_hash_install(struct syment *table[], struct syment *spn)
{
struct syment *sp;
int index;
index = SYMNAME_HASH_INDEX(spn->name);
+ index = index > 0 ? index : -index;
+
spn->cnt = 1;
- if ((sp = st->symname_hash[index]) == NULL)
- st->symname_hash[index] = spn;
- else {
+ if ((sp = table[index]) == NULL) {
+ table[index] = spn;
+ spn->name_hash_next = NULL;
+ } else {
while (sp) {
if (STREQ(sp->name, spn->name)) {
sp->cnt++;
@@ -1151,23 +1179,67 @@ symname_hash_install(struct syment *spn)
sp = sp->name_hash_next;
else {
sp->name_hash_next = spn;
+ spn->name_hash_next = NULL;
break;
}
}
}
}
+static void
+symname_hash_remove(struct syment *table[], struct syment *spn)
+{
+ struct syment *sp, **tmp;
+ int index, first_encounter = 1;
+
+ index = SYMNAME_HASH_INDEX(spn->name);
+ index = index > 0 ? index : -index;
+
+ if ((sp = table[index]) == NULL)
+ return;
+
+ for (tmp = &table[index], sp = table[index]; sp; ) {
+ if (STREQ(sp->name, spn->name)) {
+ if (sp != spn) {
+ sp->cnt--;
+ spn->cnt--;
+ } else if (!first_encounter) {
+ sp->cnt--;
+ } else {
+ *tmp = sp->name_hash_next;
+ first_encounter = 0;
+
+ tmp = &(*tmp)->name_hash_next;
+ sp = sp->name_hash_next;
+ spn->name_hash_next = NULL;
+ continue;
+ }
+ }
+ tmp = &sp->name_hash_next;
+ sp = sp->name_hash_next;
+ }
+}
+
/*
* Static kernel symbol value search
*/
static struct syment *
-symname_hash_search(char *name)
+symname_hash_search(struct syment *table[], char *name,
+ int (*skip_condition)(struct syment *, char *))
{
struct syment *sp;
+ int index;
- sp = st->symname_hash[SYMNAME_HASH_INDEX(name)];
+ index = SYMNAME_HASH_INDEX(name);
+ index = index > 0 ? index : -index;
+ sp = table[index];
while (sp) {
+ if (skip_condition && skip_condition(sp, name)) {
+ sp = sp->name_hash_next;
+ continue;
+ }
+
if (STREQ(sp->name, name))
return sp;
sp = sp->name_hash_next;
@@ -1595,6 +1667,7 @@ store_module_symbols_v1(ulong total, int mods_installed)
lm->mod_symend = sp;
}
}
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
}
st->flags |= MODULE_SYMS;
@@ -2075,6 +2148,8 @@ store_module_symbols_v2(ulong total, int mods_installed)
lm->mod_init_symend = sp;
}
}
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
+ mod_symtable_hash_install_range(lm->mod_init_symtable, lm->mod_init_symend);
}
st->flags |= MODULE_SYMS;
@@ -4482,6 +4557,16 @@ symbol_query(char *s, char *print_pad, struct syment **spp)
return(cnt);
}
+static int
+skip_symbols(struct syment *sp, char *s)
+{
+ int pseudos, skip = 0;
+ pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_") ||
+ strstr(s, "_MODULE_INIT_START_") || strstr(s, "_MODULE_INIT_END_"));
+ if (!pseudos && MODULE_PSEUDO_SYMBOL(sp))
+ skip = 1;
+ return skip;
+}
/*
* Return the syment of a symbol.
@@ -4494,53 +4579,14 @@ symbol_search(char *s)
struct load_module *lm;
int pseudos, search_init;
- sp_hashed = symname_hash_search(s);
+ sp_hashed = symname_hash_search(st->symname_hash, s, NULL);
for (sp = sp_hashed ? sp_hashed : st->symtable; sp < st->symend; sp++) {
if (STREQ(s, sp->name))
return(sp);
}
- pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_"));
- search_init = FALSE;
-
- for (i = 0; i < st->mods_installed; i++) {
- lm = &st->load_modules[i];
- if (lm->mod_flags & MOD_INIT)
- search_init = TRUE;
- sp = lm->mod_symtable;
- sp_end = lm->mod_symend;
-
- for ( ; sp <= sp_end; sp++) {
- if (!pseudos && MODULE_PSEUDO_SYMBOL(sp))
- continue;
- if (STREQ(s, sp->name))
- return(sp);
- }
- }
-
- if (!search_init)
- return((struct syment *)NULL);
-
- pseudos = (strstr(s, "_MODULE_INIT_START_") || strstr(s, "_MODULE_INIT_END_"));
-
- for (i = 0; i < st->mods_installed; i++) {
- lm = &st->load_modules[i];
- if (!lm->mod_init_symtable)
- continue;
- sp = lm->mod_init_symtable;
- sp_end = lm->mod_init_symend;
-
- for ( ; sp < sp_end; sp++) {
- if (!pseudos && MODULE_PSEUDO_SYMBOL(sp))
- continue;
-
- if (STREQ(s, sp->name))
- return(sp);
- }
- }
-
- return((struct syment *)NULL);
+ return symname_hash_search(st->mod_symname_hash, s, skip_symbols);
}
/*
@@ -5436,29 +5482,11 @@ symbol_exists(char *symbol)
struct syment *sp, *sp_end;
struct load_module *lm;
- if ((sp = symname_hash_search(symbol)))
+ if ((sp = symname_hash_search(st->symname_hash, symbol, NULL)))
return TRUE;
- for (i = 0; i < st->mods_installed; i++) {
- lm = &st->load_modules[i];
- sp = lm->mod_symtable;
- sp_end = lm->mod_symend;
-
- for ( ; sp < sp_end; sp++) {
- if (STREQ(symbol, sp->name))
- return(TRUE);
- }
-
- if (lm->mod_init_symtable) {
- sp = lm->mod_init_symtable;
- sp_end = lm->mod_init_symend;
-
- for ( ; sp < sp_end; sp++) {
- if (STREQ(symbol, sp->name))
- return(TRUE);
- }
- }
- }
+ if ((sp = symname_hash_search(st->mod_symname_hash, symbol, NULL)))
+ return TRUE;
return(FALSE);
}
@@ -5515,7 +5543,7 @@ kernel_symbol_exists(char *symbol)
{
struct syment *sp;
- if ((sp = symname_hash_search(symbol)))
+ if ((sp = symname_hash_search(st->symname_hash, symbol, NULL)))
return TRUE;
else
return FALSE;
@@ -5527,7 +5555,7 @@ kernel_symbol_exists(char *symbol)
struct syment *
kernel_symbol_search(char *symbol)
{
- return symname_hash_search(symbol);
+ return symname_hash_search(st->symname_hash, symbol, NULL);
}
/*
@@ -12464,8 +12492,10 @@ store_load_module_symbols(bfd *bfd, int dynamic, void *minisyms,
error(INFO, "%s: last symbol: %s is not _MODULE_END_%s?\n",
lm->mod_name, lm->mod_load_symend->name, lm->mod_name);
+ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend);
lm->mod_symtable = lm->mod_load_symtable;
lm->mod_symend = lm->mod_load_symend;
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
lm->mod_flags &= ~MOD_EXT_SYMS;
lm->mod_flags |= MOD_LOAD_SYMS;
@@ -12495,6 +12525,7 @@ delete_load_module(ulong base_addr)
req->name = lm->mod_namelist;
gdb_interface(req);
}
+ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend);
if (lm->mod_load_symtable) {
free(lm->mod_load_symtable);
namespace_ctl(NAMESPACE_FREE,
@@ -12504,6 +12535,7 @@ delete_load_module(ulong base_addr)
unlink_module(lm);
lm->mod_symtable = lm->mod_ext_symtable;
lm->mod_symend = lm->mod_ext_symend;
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
lm->mod_flags &= ~(MOD_LOAD_SYMS|MOD_REMOTE|MOD_NOPATCH);
lm->mod_flags |= MOD_EXT_SYMS;
lm->mod_load_symtable = NULL;
@@ -12532,6 +12564,7 @@ delete_load_module(ulong base_addr)
req->name = lm->mod_namelist;
gdb_interface(req);
}
+ mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend);
if (lm->mod_load_symtable) {
free(lm->mod_load_symtable);
namespace_ctl(NAMESPACE_FREE,
@@ -12541,6 +12574,7 @@ delete_load_module(ulong base_addr)
unlink_module(lm);
lm->mod_symtable = lm->mod_ext_symtable;
lm->mod_symend = lm->mod_ext_symend;
+ mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend);
lm->mod_flags &= ~(MOD_LOAD_SYMS|MOD_REMOTE|MOD_NOPATCH);
lm->mod_flags |= MOD_EXT_SYMS;
lm->mod_load_symtable = NULL;
--
2.29.2
3 years, 4 months
Re: [Crash-utility] How to use crash utility on a kernel.org kernel built from source
by Santosh
+ list
On Fri, Aug 13, 2021 at 9:09 AM MJ C <conrad.mark2(a)gmail.com> wrote:
>
> Santosh -
> Yes, thank you that worked. I downloaded crash 7.2.4 source, built, and ran.
> Now things work correctly: # crash vmlinux vmcore
>
> Mark C.
>
> On Thu, Aug 12, 2021 at 6:24 PM Santosh <ysan99(a)gmail.com> wrote:
>>
>> On Thu, Aug 12, 2021 at 4:34 PM Amazon Crock Pot Return info
>> <conrad.mark2(a)gmail.com> wrote:
>> >
>> > Re: Subject/Line - How to use crash utility on a kernel.org kernel built from source
>> >
>> >
>> >
>> > I want to use Kdump (easy part) and crash utility (harder part) on a kernel I downloaded, built, ran, and Kdump’ed from kernel.org.
>> >
>> >
>> >
>> > How is this done, please?
>> Build the crash from the latest sources and do "crash vmlinx vmcore".
>>
>> >
>> >
>> >
>> > MJC
>> >
>> >
>> >
>> > Sent from Mail for Windows
>> >
>> >
>> >
>> > --
>> > Crash-utility mailing list
>> > Crash-utility(a)redhat.com
>> > https://listman.redhat.com/mailman/listinfo/crash-utility
3 years, 4 months
[PATCH] x86_64: Fix check for __per_cpu_offset initialisation
by Philipp Rudo
Since at least kernel v2.6.30 the __per_cpu_offset gets initialized to
__per_cpu_load. So first check if the __per_cpu_offset was set to a
proper value before reading any per cpu variable to prevent potential
bugs.
Signed-off-by: Philipp Rudo <prudo(a)redhat.com>
---
x86_64.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/x86_64.c b/x86_64.c
index 6eb7d67..0bb8705 100644
--- a/x86_64.c
+++ b/x86_64.c
@@ -1327,6 +1327,8 @@ x86_64_per_cpu_init(void)
ms->stkinfo.isize = 16384;
for (i = cpus = 0; i < NR_CPUS; i++) {
+ if (kt->__per_cpu_offset[i] == symbol_value("__per_cpu_load"))
+ break;
if (!readmem(cpu_sp->value + kt->__per_cpu_offset[i],
KVADDR, &cpunumber, sizeof(int),
"cpu number (per_cpu)", QUIET|RETURN_ON_ERROR))
@@ -5602,7 +5604,7 @@ x86_64_get_smp_cpus(void)
return 1;
for (i = cpus = 0; i < NR_CPUS; i++) {
- if (kt->__per_cpu_offset[i] == 0)
+ if (kt->__per_cpu_offset[i] == symbol_value("__per_cpu_load"))
break;
if (!readmem(sp->value + kt->__per_cpu_offset[i],
KVADDR, &cpunumber, sizeof(int),
--
2.31.1
3 years, 4 months
Re: [Crash-utility] [PATCH] x86_64: Fix check for __per_cpu_offset initialisation
by lijiang
>
> Date: Wed, 11 Aug 2021 14:24:30 +0200
> From: Philipp Rudo <prudo(a)redhat.com>
> To: lijiang <lijiang(a)redhat.com>
> Cc: "Discussion list for crash utility usage, maintenance and
> development" <crash-utility(a)redhat.com>
> Subject: Re: [Crash-utility] [PATCH] x86_64: Fix check for
> __per_cpu_offset initialisation
> Message-ID: <20210811142430.5e3e1a86@rhtmp>
> Content-Type: text/plain; charset=US-ASCII
>
> Hi Lianbo,
>
> On Wed, 11 Aug 2021 17:05:26 +0800
> lijiang <lijiang(a)redhat.com> wrote:
>
> > >
> > > Date: Thu, 5 Aug 2021 15:19:37 +0200
> > > From: Philipp Rudo <prudo(a)redhat.com>
> > > To: crash-utility(a)redhat.com
> > > Subject: [Crash-utility] [PATCH] x86_64: Fix check for
> > > __per_cpu_offset initialisation
> > > Message-ID: <20210805131937.5051-1-prudo(a)redhat.com>
> > >
> > > Since at least kernel v2.6.30 the __per_cpu_offset gets initialized to
> > > __per_cpu_load. So first check if the __per_cpu_offset was set to a
> > > proper value before reading any per cpu variable to prevent potential
> > > bugs.
> > >
> > >
> > Hi, Philipp
> >
> > Thank you for the patch. Can you help to describe more details about the
> > potential risks? and what conditions might trigger the potential bugs?
>
> the bug is always triggered during initialization of the per-cpu data
> on x86_64. At least for kernels not using struct x8664_pda, which
> AFAIK was also removed with kernel v2.6.30.
>
> The risk for crash is low. Right after the superfluous read there is a
> check if the read cpunumber matches the expected one.
>
> if (cpunumber != cpus)
> break;
>
> So the worst case scenario I see is that crash initializes one
> additional cpu with non-sense data. But given that the bug exists for
> ~12 years and nobody reported such an bug I assume that the check worked
> well so far.
>
>
Thank you for the explanation in detail, Philipp.
> > Did you mean that it's related to the crash live analysis
> issue(1978032)? I
> > tried to reproduce it, but so far I haven't reproduced it with the
> upstream
> > kernel.
>
> Yes, this bug is related to bz1978032. For whatever reason the
> superfluous read triggered the panic.
>
> I could reproduce the bug upstream with CONFIG_IO_URING _disabled_.
> Unfortunately there is a RHEL-only patch [1] that tampers with the
> Kconfig for IO_URING. So when you copy a kernel-ark config to the
> upstream repo and run 'make oldconfig' the IO_URING will silently be
> _enabled_.
>
>
You are right.
> BTW, I tried to reproduce the panic yesterday on kernel-5.14.0-0.rc4
> but failed. Not sure if the bug was fixed in the meantime or I was
> simply "lucky"...
>
>
This issue may have been fixed in the kernel-5.14.0-0.rc4, however, this
patch is still meaningful, and can prevent potential risks.
Acked-by: Lianbo Jiang <lijiang(a)redhat.com>
3 years, 4 months
Re: [Crash-utility] [PATCH] x86_64: Fix check for __per_cpu_offset initialisation
by lijiang
>
> Date: Thu, 5 Aug 2021 15:19:37 +0200
> From: Philipp Rudo <prudo(a)redhat.com>
> To: crash-utility(a)redhat.com
> Subject: [Crash-utility] [PATCH] x86_64: Fix check for
> __per_cpu_offset initialisation
> Message-ID: <20210805131937.5051-1-prudo(a)redhat.com>
>
> Since at least kernel v2.6.30 the __per_cpu_offset gets initialized to
> __per_cpu_load. So first check if the __per_cpu_offset was set to a
> proper value before reading any per cpu variable to prevent potential
> bugs.
>
>
Hi, Philipp
Thank you for the patch. Can you help to describe more details about the
potential risks? and what conditions might trigger the potential bugs?
Did you mean that it's related to the crash live analysis issue(1978032)? I
tried to reproduce it, but so far I haven't reproduced it with the upstream
kernel.
Thanks.
Lianbo
> Signed-off-by: Philipp Rudo <prudo(a)redhat.com>
> ---
> x86_64.c | 4 +++-
> 1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/x86_64.c b/x86_64.c
> index 6eb7d67..0bb8705 100644
> --- a/x86_64.c
> +++ b/x86_64.c
> @@ -1327,6 +1327,8 @@ x86_64_per_cpu_init(void)
> ms->stkinfo.isize = 16384;
>
> for (i = cpus = 0; i < NR_CPUS; i++) {
> + if (kt->__per_cpu_offset[i] ==
> symbol_value("__per_cpu_load"))
> + break;
> if (!readmem(cpu_sp->value + kt->__per_cpu_offset[i],
> KVADDR, &cpunumber, sizeof(int),
> "cpu number (per_cpu)", QUIET|RETURN_ON_ERROR))
> @@ -5602,7 +5604,7 @@ x86_64_get_smp_cpus(void)
> return 1;
>
> for (i = cpus = 0; i < NR_CPUS; i++) {
> - if (kt->__per_cpu_offset[i] == 0)
> + if (kt->__per_cpu_offset[i] ==
> symbol_value("__per_cpu_load"))
> break;
> if (!readmem(sp->value + kt->__per_cpu_offset[i],
> KVADDR, &cpunumber, sizeof(int),
> --
> 2.31.1
>
>
3 years, 4 months