The majority of maple tree code is copied from crash utility. Since currenty
it is not needed by makedumpfile, maple tree is integrated with eppic
extension only.
The minor change of maple tree code are:
1) a cache buffer for maple tree data because eppic script cannot allocate
a buffer currently;
2) an interface for eppic script.
Signed-off-by: Tao Liu <ltao(a)redhat.com>
---
Makefile | 4 +-
eppic_maple.c | 431 ++++++++++++++++++++++++++++++++++++++++++++++++++
eppic_maple.h | 8 +
3 files changed, 441 insertions(+), 2 deletions(-)
create mode 100644 eppic_maple.c
create mode 100644 eppic_maple.h
diff --git a/Makefile b/Makefile
index fbc9f5b..216749f 100644
--- a/Makefile
+++ b/Makefile
@@ -121,8 +121,8 @@ makedumpfile: $(SRC_BASE) $(OBJ_PART) $(OBJ_ARCH)
-e "s/@VERSION@/$(VERSION)/" \
$(VPATH)makedumpfile.conf.5.in > $(VPATH)makedumpfile.conf.5
-eppic_makedumpfile.so: extension_eppic.c
- $(CC) $(CFLAGS) $(LDFLAGS) -shared -rdynamic -o $@ extension_eppic.c -fPIC -leppic
-ltinfo
+eppic_makedumpfile.so: extension_eppic.c eppic_maple.c
+ $(CC) $(CFLAGS) $(LDFLAGS) -shared -rdynamic -o $@ $^ -fPIC -leppic -ltinfo
clean:
rm -f $(OBJ) $(OBJ_PART) $(OBJ_ARCH) makedumpfile makedumpfile.8 makedumpfile.conf.5
diff --git a/eppic_maple.c b/eppic_maple.c
new file mode 100644
index 0000000..ee8d23d
--- /dev/null
+++ b/eppic_maple.c
@@ -0,0 +1,431 @@
+
+#include <stdbool.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <assert.h>
+#include "makedumpfile.h"
+#include "extension_eppic.h"
+#include "btf.h"
+#include "kallsyms.h"
+#include "erase_info.h"
+
+enum maple_type {
+ maple_dense,
+ maple_leaf_64,
+ maple_range_64,
+ maple_arange_64,
+};
+
+#define MAPLE_TREE_COUNT (1)
+#define MAPLE_TREE_SEARCH (2)
+#define MAPLE_TREE_DUMP (3)
+#define MAPLE_TREE_GATHER (4)
+#define MAPLE_TREE_DUMP_CB (5)
+
+#define MAPLE_NODE_MASK 255UL
+#define MT_FLAGS_HEIGHT_OFFSET 0x02
+#define MT_FLAGS_HEIGHT_MASK 0x7C
+#define MAPLE_NODE_TYPE_MASK 0x0F
+#define MAPLE_NODE_TYPE_SHIFT 0x03
+#define MAPLE_BUFSIZE 512
+
+static unsigned char *mt_slots = NULL;
+
+static ulong mt_max[4] = {0};
+
+static long g_maple_tree;
+static long g_maple_node;
+static long g_maple_tree_ma_root;
+static long g_maple_node_ma64;
+static long g_maple_node_mr64;
+static long g_maple_node_slot;
+static long g_maple_arange_64_pivot;
+static long g_maple_arange_64_slot;
+static long g_maple_range_64_pivot;
+static long g_maple_range_64_slot;
+
+/********************maple tree internal**************************/
+
+static inline bool xa_is_internal(ulong entry)
+{
+ return (entry & 3) == 2;
+}
+
+static inline bool xa_is_node(ulong entry)
+{
+ return xa_is_internal(entry) && entry > 4096;
+}
+
+static inline ulong mte_to_node(ulong maple_enode_entry)
+{
+ return maple_enode_entry & ~MAPLE_NODE_MASK;
+}
+
+static inline enum maple_type mte_node_type(ulong maple_enode_entry)
+{
+ return (maple_enode_entry >> MAPLE_NODE_TYPE_SHIFT) &
+ MAPLE_NODE_TYPE_MASK;
+}
+
+static inline ulong mt_slot(void **slots, unsigned char offset)
+{
+ return (ulong)slots[offset];
+}
+
+static inline bool ma_is_leaf(const enum maple_type type)
+{
+ return type < maple_range_64;
+}
+
+struct do_maple_tree_info {
+ ulong count;
+ void *data;
+};
+
+struct maple_tree_ops {
+ void (*entry)(ulong node, void *private);
+ void *private;
+};
+
+static void do_mt_range64(ulong, ulong, ulong,
+ struct maple_tree_ops *);
+static void do_mt_arange64(ulong, ulong, ulong,
+ struct maple_tree_ops *);
+static void do_mt_entry(ulong, struct maple_tree_ops *);
+static void do_mt_node(ulong, ulong, ulong,
+ struct maple_tree_ops *);
+
+static inline bool mte_is_leaf(ulong maple_enode_entry)
+{
+ return ma_is_leaf(mte_node_type(maple_enode_entry));
+}
+
+static void do_mt_range64(ulong entry, ulong min, ulong max,
+ struct maple_tree_ops *ops)
+{
+ ulong maple_node_m_node = mte_to_node(entry);
+ char node_buf[MAPLE_BUFSIZE];
+ bool leaf = mte_is_leaf(entry);
+ ulong first = min, last;
+ int i;
+ char *mr64_buf;
+
+ READMEM(VADDR, maple_node_m_node, node_buf, g_maple_node);
+
+ mr64_buf = node_buf + g_maple_node_mr64;
+
+ for (i = 0; i < mt_slots[maple_range_64]; i++) {
+ last = max;
+
+ if (i < (mt_slots[maple_range_64] - 1))
+ last = ULONG(mr64_buf + g_maple_range_64_pivot +
+ sizeof(ulong) * i);
+
+ else if (!VOID_PTR(mr64_buf + g_maple_range_64_slot +
+ sizeof(void *) * i) &&
+ max != mt_max[mte_node_type(entry)])
+ break;
+ if (last == 0 && i > 0)
+ break;
+ if (leaf)
+ do_mt_entry(mt_slot((void **)(mr64_buf +
+ g_maple_range_64_slot), i),
+ ops);
+ else if (VOID_PTR(mr64_buf + g_maple_range_64_slot +
+ sizeof(void *) * i)) {
+ do_mt_node(mt_slot((void **)(mr64_buf +
+ g_maple_range_64_slot), i),
+ first, last, ops);
+ }
+
+ if (last == max)
+ break;
+ if (last > max) {
+ printf("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ mr64_buf, last, max, i);
+ break;
+ }
+ first = last + 1;
+ }
+}
+
+static void do_mt_arange64(ulong entry, ulong min, ulong max,
+ struct maple_tree_ops *ops)
+{
+ ulong maple_node_m_node = mte_to_node(entry);
+ char node_buf[MAPLE_BUFSIZE];
+ bool leaf = mte_is_leaf(entry);
+ ulong first = min, last;
+ int i;
+ char *ma64_buf;
+
+ READMEM(VADDR, maple_node_m_node, node_buf, g_maple_node);
+
+ ma64_buf = node_buf + g_maple_node_ma64;
+
+ for (i = 0; i < mt_slots[maple_arange_64]; i++) {
+ last = max;
+
+ if (i < (mt_slots[maple_arange_64] - 1))
+ last = ULONG(ma64_buf + g_maple_arange_64_pivot +
+ sizeof(ulong) * i);
+ else if (!VOID_PTR(ma64_buf + g_maple_arange_64_slot +
+ sizeof(void *) * i))
+ break;
+ if (last == 0 && i > 0)
+ break;
+
+ if (leaf)
+ do_mt_entry(mt_slot((void **)(ma64_buf +
+ g_maple_arange_64_slot), i),
+ ops);
+ else if (VOID_PTR(ma64_buf + g_maple_arange_64_slot +
+ sizeof(void *) * i)) {
+ do_mt_node(mt_slot((void **)(ma64_buf +
+ g_maple_arange_64_slot), i),
+ first, last, ops);
+ }
+
+ if (last == max)
+ break;
+ if (last > max) {
+ printf("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ ma64_buf, last, max, i);
+ break;
+ }
+ first = last + 1;
+ }
+}
+
+static void do_mt_entry(ulong entry, struct maple_tree_ops *ops)
+{
+ if (ops->entry && entry)
+ ops->entry(entry, ops->private);
+}
+
+static void do_mt_node(ulong entry, ulong min, ulong max,
+ struct maple_tree_ops *ops)
+{
+ ulong maple_node = mte_to_node(entry);
+ int i, type = mte_node_type(entry);
+ char node_buf[MAPLE_BUFSIZE];
+
+ READMEM(VADDR, maple_node, node_buf, g_maple_node);
+
+ switch (type) {
+ case maple_dense:
+ for (i = 0; i < mt_slots[maple_dense]; i++) {
+ if (min + i > max)
+ printf("OUT OF RANGE: ");
+ do_mt_entry(mt_slot((void **)(node_buf +
+ g_maple_node_slot), i), ops);
+ }
+ break;
+ case maple_leaf_64:
+ case maple_range_64:
+ do_mt_range64(entry, min, max, ops);
+ break;
+ case maple_arange_64:
+ do_mt_arange64(entry, min, max, ops);
+ break;
+ default:
+ printf(" UNKNOWN TYPE\n");
+ }
+}
+
+static int do_maple_tree_traverse(ulong ptr, struct maple_tree_ops *ops)
+{
+ char tree_buf[MAPLE_BUFSIZE];
+ ulong entry;
+
+ assert(MAPLE_BUFSIZE >= g_maple_tree);
+
+ READMEM(VADDR, ptr, tree_buf, g_maple_tree);
+ entry = ULONG(tree_buf + g_maple_tree_ma_root);
+
+ if (!xa_is_node(entry))
+ do_mt_entry(entry, ops);
+ else if (entry) {
+ do_mt_node(entry, 0, mt_max[mte_node_type(entry)], ops);
+ }
+
+ return 0;
+}
+
+static void do_maple_tree_count(ulong node, void *private)
+{
+ struct do_maple_tree_info *info = private;
+ info->count++;
+}
+
+static void do_maple_tree_gather(ulong node, void *private)
+{
+ struct do_maple_tree_info *info = private;
+ ulong *buf = info->data;
+
+ buf[info->count] = node;
+ info->count++;
+}
+
+static ulong do_maple_tree(ulong root, int flag, ulong *buf)
+{
+ struct do_maple_tree_info info = {
+ .count = 0,
+ .data = buf,
+ };
+ struct maple_tree_ops ops = {
+ .private = &info,
+ };
+
+ switch (flag)
+ {
+ case MAPLE_TREE_COUNT:
+ ops.entry = do_maple_tree_count;
+ break;
+
+ case MAPLE_TREE_GATHER:
+ ops.entry = do_maple_tree_gather;
+ break;
+
+ default:
+ fprintf(stderr, "do_maple_tree: invalid flag: %x\n", flag);
+ return 0;
+ }
+
+ do_maple_tree_traverse(root, &ops);
+ return info.count;
+}
+
+#define MAPLE_SIZE_INIT(X, Y) \
+do { \
+ if (is_btf) { \
+ if ((X = cb->get_type_size_by_name(Y, BTF_KIND_STRUCT, NULL)) \
+ == 0) \
+ return FALSE; \
+ } else { \
+ if ((X = cb->get_structure_size(Y, DWARF_INFO_GET_STRUCT_SIZE)) \
+ == FAILED_DWARFINFO) \
+ return FALSE; \
+ } \
+} while (0)
+
+#define MAPLE_OFFSET_INIT(X, Y, Z) \
+do { \
+ if (is_btf) { \
+ struct member_info mi; \
+ memset(&mi, 0, sizeof(mi)); \
+ if (cb->get_struct_member_by_name(Y, Z, &mi) == 0) \
+ return FALSE; \
+ X = mi.bit_pos / 8; \
+ } else { \
+ if ((X = cb->get_member_offset(Y, Z, DWARF_INFO_GET_MEMBER_OFFSET)) \
+ == FAILED_DWARFINFO) \
+ return FALSE; \
+ } \
+} while (0)
+
+/*******************maple tree api***************************/
+
+int maple_init(bool is_btf)
+{
+ int array_len = 16;
+
+ MAPLE_SIZE_INIT(g_maple_tree, "maple_tree");
+ MAPLE_SIZE_INIT(g_maple_node, "maple_node");
+ MAPLE_OFFSET_INIT(g_maple_tree_ma_root, "maple_tree", "ma_root");
+ MAPLE_OFFSET_INIT(g_maple_node_ma64, "maple_node", "ma64");
+ MAPLE_OFFSET_INIT(g_maple_node_mr64, "maple_node", "mr64");
+ MAPLE_OFFSET_INIT(g_maple_node_slot, "maple_node", "slot");
+ MAPLE_OFFSET_INIT(g_maple_arange_64_pivot, "maple_arange_64",
"pivot");
+ MAPLE_OFFSET_INIT(g_maple_arange_64_slot, "maple_arange_64",
"slot");
+ MAPLE_OFFSET_INIT(g_maple_range_64_pivot, "maple_range_64",
"pivot");
+ MAPLE_OFFSET_INIT(g_maple_range_64_slot, "maple_range_64", "slot");
+ mt_slots = calloc(array_len, sizeof(char));
+ if (!mt_slots) {
+ fprintf(stderr, "%s: Not enough memory!\n", __func__);
+ return FALSE;
+ }
+ if (is_btf) {
+ READMEM(VADDR, cb->get_kallsyms_value_by_name("mt_slots"),
+ mt_slots, array_len * sizeof(char));
+ } else {
+ READMEM(VADDR, cb->get_symbol_addr_all("mt_slots"),
+ mt_slots, array_len * sizeof(char));
+ }
+
+ mt_max[maple_dense] = mt_slots[maple_dense];
+ mt_max[maple_leaf_64] = ULONG_MAX;
+ mt_max[maple_range_64] = ULONG_MAX;
+ mt_max[maple_arange_64] = ULONG_MAX;
+ return TRUE;
+}
+
+#define MAPLE_CACHE 16
+
+static struct maple_cache {
+ uint64_t tree;
+ uint64_t hits;
+ int elems_count;
+ uint64_t *elems;
+} maple_cache[MAPLE_CACHE] = {0};
+
+static VALUE_S *maple_tree(VALUE_S *ep_tree, int cmd, VALUE_S *ep_index)
+{
+ uint64_t tree = eppic_getval(ep_tree);
+ int index = eppic_getval(ep_index);
+ int found = -1, target = -1;
+ int min_hits = 0;
+ for (int i = 0; i < MAPLE_CACHE; i++) {
+ min_hits = maple_cache[i].hits < maple_cache[min_hits].hits ?
+ i : min_hits;
+ if (tree == maple_cache[i].tree) {
+ found = i;
+ }
+ }
+
+ if (found >= 0) {
+ maple_cache[found].hits++;
+ target = found;
+ } else {
+ target = min_hits;
+ }
+
+ switch (cmd)
+ {
+ case MAPLE_TREE_COUNT:
+ if (found < 0) {
+ if (maple_cache[target].elems) {
+ free(maple_cache[target].elems);
+ memset(&maple_cache[target], 0, sizeof(struct maple_cache));
+ }
+ found = do_maple_tree(tree, MAPLE_TREE_COUNT, NULL);
+ maple_cache[target].elems = malloc(found * sizeof(u_int64_t));
+ do_maple_tree(tree, MAPLE_TREE_GATHER, maple_cache[target].elems);
+ maple_cache[target].elems_count = found;
+ return eppic_makebtype(found);
+ } else {
+ return eppic_makebtype(maple_cache[target].elems_count);
+ }
+ case MAPLE_TREE_GATHER:
+ if (index > maple_cache[target].elems_count) {
+ printf("Invalid maple index %d(> %d) for tree %lx\n",
+ index, maple_cache[target].elems_count, tree);
+ return eppic_makebtype(0);
+ }
+ return eppic_makebtype(maple_cache[target].elems[index]);
+ default:
+ return eppic_makebtype(0);
+ }
+}
+
+VALUE_S *
+maple_count(VALUE_S *ep_tree)
+{
+ return maple_tree(ep_tree, MAPLE_TREE_COUNT, NULL);
+}
+
+VALUE_S *
+maple_elem(VALUE_S *ep_tree, VALUE_S *ep_index)
+{
+ return maple_tree(ep_tree, MAPLE_TREE_GATHER, ep_index);
+}
diff --git a/eppic_maple.h b/eppic_maple.h
new file mode 100644
index 0000000..61bb32f
--- /dev/null
+++ b/eppic_maple.h
@@ -0,0 +1,8 @@
+#ifndef _EPPIC_MAPLE_H
+#define _EPPIC_MAPLE_H
+#include "makedumpfile.h"
+int maple_init(bool);
+VALUE_S *maple_count(VALUE_S *);
+VALUE_S *maple_elem(VALUE_S *, VALUE_S *);
+#endif /*_EPPIC_MAPLE_H*/
+
--
2.47.0