>From 0bb543470c02e5f741bab2bbbe87443791ff5f24 Mon Sep 17 00:00:00 2001 From: zhangyanfei Date: Tue, 23 Oct 2012 14:49:53 +0800 Subject: [PATCH 2/2] runq: make tasks in throttled cfs_rqs/rt_rqs displayed Signed-off-by: zhangyanfei --- defs.h | 6 + symbols.c | 12 ++ task.c | 350 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 349 insertions(+), 19 deletions(-) diff --git a/defs.h b/defs.h index 4ee550c..17fc82f 100755 --- a/defs.h +++ b/defs.h @@ -1801,6 +1801,12 @@ struct offset_table { /* stash of commonly-used offsets */ long rt_rq_tg; long cgroup_subsys_state_cgroup; long cgroup_dentry; + long task_group_parent; + long task_group_children; + long task_group_siblings; + long cfs_rq_throttled; + long rt_rq_rt_throttled; + long rt_rq_highest_prio_curr; }; struct size_table { /* stash of commonly-used sizes */ diff --git a/symbols.c b/symbols.c index fc2b5c7..716bbf6 100755 --- a/symbols.c +++ b/symbols.c @@ -8838,6 +8838,18 @@ dump_offset_table(char *spec, ulong makestruct) OFFSET(cgroup_subsys_state_cgroup)); fprintf(fp, " cgroup_dentry: %ld\n", OFFSET(cgroup_dentry)); + fprintf(fp, " task_group_parent: %ld\n", + OFFSET(task_group_parent)); + fprintf(fp, " task_group_children: %ld\n", + OFFSET(task_group_children)); + fprintf(fp, " task_group_siblings: %ld\n", + OFFSET(task_group_siblings)); + fprintf(fp, " cfs_rq_throttled: %ld\n", + OFFSET(cfs_rq_throttled)); + fprintf(fp, " rt_rq_rt_throttled: %ld\n", + OFFSET(rt_rq_rt_throttled)); + fprintf(fp, " rt_rq_highest_prio_curr: %ld\n", + OFFSET(rt_rq_highest_prio_curr)); fprintf(fp, "\n size_table:\n"); fprintf(fp, " page: %ld\n", SIZE(page)); diff --git a/task.c b/task.c index e0aff17..24c6d95 100755 --- a/task.c +++ b/task.c @@ -63,11 +63,17 @@ static struct rb_node *rb_next(struct rb_node *); static struct rb_node *rb_parent(struct rb_node *, struct rb_node *); static struct rb_node *rb_right(struct rb_node *, struct rb_node *); static struct rb_node *rb_left(struct rb_node *, struct rb_node *); +static void sort_throttled_rq_array(void *, int); +static void dump_task_group_name(ulong); static void dump_task_runq_entry(struct task_context *); -static int dump_tasks_in_cfs_rq(int, ulong); +static int dump_tasks_in_cfs_rq(int, ulong, int, int); +static void fill_throttled_cfs_rq_array(int, ulong, char *, int); +static void fill_throttled_rt_rq_array(int, ulong, char *, int); +static ulong get_rt_rq_curr_offset(void); +static void task_group_offset_init(void); static void dump_on_rq_tasks(void); static void dump_CFS_runqueues(void); -static void dump_RT_prio_array(int, ulong, char *); +static void dump_RT_prio_array(int, ulong, char *, int, int); static void task_struct_member(struct task_context *,unsigned int, struct reference *); static void signal_reference(struct task_context *, ulong, struct reference *); static void do_sig_thread_group(ulong); @@ -7421,6 +7427,44 @@ rb_next(struct rb_node *node) return parent; } +#define MAX_THROTTLED_RQ 100 +struct throttled_rq { + ulong rq; + int depth; + int prio; +}; +static struct throttled_rq throttled_rt_rq_array[MAX_THROTTLED_RQ]; +static struct throttled_rq throttled_cfs_rq_array[MAX_THROTTLED_RQ]; +static int rt_last = 0; +static int cfs_last = 0; + +#define COPY_THROTTLED(t1, t2) \ +do { \ + t1.rq = t2.rq; \ + t1.depth = t2.depth; \ + t1.prio = t2.prio; \ +} while (0); + +static void +sort_throttled_rq_array(void *a, int len) +{ + int i, j; + struct throttled_rq tmp; + struct throttled_rq *array = (struct throttled_rq *)a; + + for (i = 0; i < len - 1; i++) { + for (j = 0; j < len - i - 1; j++) { + if (array[j].depth > array[j+1].depth || + (array[j].depth == array[j+1].depth && + array[j].prio > array[j+1].prio)) { + COPY_THROTTLED(tmp, array[j+1]); + COPY_THROTTLED(array[j+1], array[j]); + COPY_THROTTLED(array[j], tmp); + } + } + } +} + static void dump_task_group_name(ulong group) { @@ -7459,14 +7503,14 @@ dump_task_runq_entry(struct task_context *tc) } static int -dump_tasks_in_cfs_rq(int depth, ulong cfs_rq) +dump_tasks_in_cfs_rq(int depth, ulong cfs_rq, int cpu, int throttled) { struct task_context *tc; struct rb_root *root; struct rb_node *node; ulong my_q, leftmost, curr, curr_my_q; - int total; - ulong tmp; + int total, c, i, delta; + ulong p1, p2, t1, t2, th_cfs_rq, tmp, *tg_array; total = 0; @@ -7477,6 +7521,8 @@ dump_tasks_in_cfs_rq(int depth, ulong cfs_rq) INDENT(-1 + 6 * depth); fprintf(fp, "GROUP CFS RB_ROOT: %lx", cfs_rq); dump_task_group_name(tmp); + if (throttled) + fprintf(fp, "(THROTTLED)"); fprintf(fp, "\n"); } @@ -7489,7 +7535,7 @@ dump_tasks_in_cfs_rq(int depth, ulong cfs_rq) FAULT_ON_ERROR); if (curr_my_q) total += dump_tasks_in_cfs_rq(depth + 1, - curr_my_q); + curr_my_q, cpu, throttled); } } @@ -7503,7 +7549,8 @@ dump_tasks_in_cfs_rq(int depth, ulong cfs_rq) + OFFSET(sched_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (my_q) { - total += dump_tasks_in_cfs_rq(depth + 1, my_q); + total += dump_tasks_in_cfs_rq(depth + 1, + my_q, cpu, throttled); continue; } } @@ -7523,26 +7570,181 @@ dump_tasks_in_cfs_rq(int depth, ulong cfs_rq) total++; } + for (c = 0; c < cfs_last; c++) { + delta = throttled_cfs_rq_array[c].depth - depth; + if (delta >= 1) { + readmem(cfs_rq + OFFSET(cfs_rq_tg), KVADDR, + &t1, sizeof(ulong), "cfs_rq tg", + FAULT_ON_ERROR); + th_cfs_rq = throttled_cfs_rq_array[c].rq; + readmem(th_cfs_rq + OFFSET(cfs_rq_tg), KVADDR, + &t2, sizeof(ulong), "cfs_rq tg", + FAULT_ON_ERROR); + tg_array = (ulong *)GETBUF(delta * sizeof(ulong)); + for (i = 0; i < delta; i++) { + readmem(t2 + OFFSET(task_group_parent), KVADDR, + &p2, sizeof(ulong), "task_group parent", + FAULT_ON_ERROR); + tg_array[i] = t2 = p2; + } + if (t1 == p2) { + for (i = delta - 1; i > 0; i--) { + INDENT(-1 + 6 * (depth + delta - i)); + readmem(tg_array[i - 1] + OFFSET(task_group_cfs_rq), + KVADDR, &tmp, sizeof(ulong), + "task_group cfs_rq", FAULT_ON_ERROR); + readmem(tmp + sizeof(ulong) * cpu, KVADDR, + &th_cfs_rq, sizeof(ulong), + "task_group cfs_rq", FAULT_ON_ERROR); + fprintf(fp, "GROUP CFS RB_ROOT: %lx", + th_cfs_rq); + dump_task_group_name(tg_array[i-1]); + fprintf(fp, "(DEQUEUED)\n"); + } + throttled_cfs_rq_array[c].depth = -1; + total += dump_tasks_in_cfs_rq(depth + delta, + throttled_cfs_rq_array[c].rq, cpu, 1); + } + FREEBUF(tg_array); + } + } + + if (!total) { + INDENT(5 + 6 * depth); + fprintf(fp, "[no tasks queued]\n"); + } return total; } static void +fill_throttled_cfs_rq_array(int depth, ulong group, char *group_buf, int cpu) +{ + ulong cfs_rq, tmp; + int throttled; + ulong kvaddr, uvaddr, offset; + ulong list_head[2], next; + + tmp = ULONG(group_buf + OFFSET(task_group_cfs_rq)); + readmem(tmp + sizeof(ulong) * cpu, KVADDR, &cfs_rq, + sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); + readmem(cfs_rq + OFFSET(cfs_rq_throttled), KVADDR, &throttled, + sizeof(int), "cfs_rq throttled", FAULT_ON_ERROR); + + if (throttled) { + throttled_cfs_rq_array[cfs_last].rq = cfs_rq; + throttled_cfs_rq_array[cfs_last++].depth = depth; + } + + offset = OFFSET(task_group_children); + kvaddr = group + offset; + uvaddr = (ulong)(group_buf + offset); + BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); + + if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) + return; + + next = list_head[0]; + while (next != kvaddr) { + group = next - OFFSET(task_group_siblings); + readmem(group, KVADDR, group_buf, SIZE(task_group), + "task_group", FAULT_ON_ERROR); + next = ULONG(group_buf + OFFSET(task_group_siblings) + + OFFSET(list_head_next)); + fill_throttled_cfs_rq_array(depth + 1, group, group_buf, cpu); + } +} + +static void +fill_throttled_rt_rq_array(int depth, ulong group, char *group_buf, int cpu) +{ + ulong rt_rq, tmp; + int throttled; + ulong kvaddr, uvaddr, offset; + ulong list_head[2], next; + char *rt_rq_buf; + + tmp = ULONG(group_buf + OFFSET(task_group_rt_rq)); + readmem(tmp + sizeof(ulong) * cpu, KVADDR, &rt_rq, + sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); + rt_rq_buf = GETBUF(SIZE(rt_rq)); + readmem(rt_rq, KVADDR, rt_rq_buf, SIZE(rt_rq), "rt_rq", FAULT_ON_ERROR); + throttled = UINT(rt_rq_buf + OFFSET(rt_rq_rt_throttled)); + + if (throttled) { + throttled_rt_rq_array[rt_last].rq = rt_rq; + throttled_rt_rq_array[rt_last].prio = + INT(rt_rq_buf + OFFSET(rt_rq_highest_prio_curr)); + throttled_rt_rq_array[rt_last++].depth = depth; + } + FREEBUF(rt_rq_buf); + + offset = OFFSET(task_group_children); + kvaddr = group + offset; + uvaddr = (ulong)(group_buf + offset); + BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); + + if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) + return; + + next = list_head[0]; + while (next != kvaddr) { + group = next - OFFSET(task_group_siblings); + readmem(group, KVADDR, group_buf, SIZE(task_group), + "task_group", FAULT_ON_ERROR); + next = ULONG(group_buf + OFFSET(task_group_siblings) + + OFFSET(list_head_next)); + fill_throttled_rt_rq_array(depth + 1, group, group_buf, cpu); + } +} + +static ulong +get_rt_rq_curr_offset(void) +{ + int success; + char buf[BUFSIZE]; + char *tokens[100]; + ulong offset; + + offset = (ulong)-1; + sprintf(buf, "print &((struct rt_rq *)0x0)->highest_prio.curr"); + open_tmpfile(); + success = gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR); + rewind(pc->tmpfile); + if (success && fgets(buf, BUFSIZE, pc->tmpfile)) { + parse_line(buf, tokens); + offset = htol(tokens[3], FAULT_ON_ERROR, NULL); + } + + close_tmpfile(); + + if (!success) + error(FATAL, "gdb request failed: %s\n", buf); + return offset; +} + +static void task_group_offset_init(void) { if (MEMBER_EXISTS("task_group", "rt_bandwidth")) { MEMBER_OFFSET_INIT(task_group_rt_bandwidth, "task_group", "rt_bandwidth"); + MEMBER_OFFSET_INIT(task_group_parent, "task_group", "parent"); + MEMBER_OFFSET_INIT(task_group_children, "task_group", "children"); + MEMBER_OFFSET_INIT(task_group_siblings, "task_group", "siblings"); MEMBER_OFFSET_INIT(task_group_rt_rq, "task_group", "rt_rq"); + MEMBER_OFFSET_INIT(rt_rq_rt_throttled, "rt_rq", "rt_throttled"); MEMBER_OFFSET_INIT(rt_rq_tg, "rt_rq", "tg"); MEMBER_OFFSET_INIT(task_group_css, "task_group", "css"); MEMBER_OFFSET_INIT(cgroup_subsys_state_cgroup, "cgroup_subsys_state", "cgroup"); MEMBER_OFFSET_INIT(cgroup_dentry, "cgroup", "dentry"); + ASSIGN_OFFSET(rt_rq_highest_prio_curr) = get_rt_rq_curr_offset(); } if (MEMBER_EXISTS("task_group", "cfs_bandwidth")) { MEMBER_OFFSET_INIT(task_group_cfs_bandwidth, "task_group", "cfs_bandwidth"); MEMBER_OFFSET_INIT(task_group_cfs_rq, "task_group", "cfs_rq"); + MEMBER_OFFSET_INIT(cfs_rq_throttled, "cfs_rq", "throttled"); MEMBER_OFFSET_INIT(cfs_rq_tg, "cfs_rq", "tg"); } } @@ -7604,13 +7806,15 @@ dump_on_rq_tasks(void) static void dump_CFS_runqueues(void) { - int tot, cpu; + int cpu, i; ulong runq, cfs_rq; char *runqbuf, *cfs_rq_buf; ulong tasks_timeline ATTRIBUTE_UNUSED; struct task_context *tc; struct rb_root *root; struct syment *rq_sp, *init_sp; + ulong root_task_group; + char *group_buf, *group_buf_rt; if (!VALID_STRUCT(cfs_rq)) { STRUCT_SIZE_INIT(cfs_rq, "cfs_rq"); @@ -7657,6 +7861,15 @@ dump_CFS_runqueues(void) else cfs_rq_buf = NULL; + if (VALID_STRUCT(task_group)) { + if (symbol_exists("init_task_group")) + root_task_group = symbol_value("init_task_group"); + else if (symbol_exists("root_task_group")) + root_task_group = symbol_value("root_task_group"); + else + error(FATAL, "cannot determine root task_group\n"); + } + get_active_set(); for (cpu = 0; cpu < kt->cpus; cpu++) { @@ -7697,18 +7910,53 @@ dump_CFS_runqueues(void) OFFSET(cfs_rq_tasks_timeline)); } + if (VALID_MEMBER(task_group_rt_bandwidth)) { + group_buf_rt = GETBUF(SIZE(task_group)); + readmem(root_task_group, KVADDR, group_buf_rt, SIZE(task_group), + "task_group", FAULT_ON_ERROR); + fill_throttled_rt_rq_array(0, root_task_group, + group_buf_rt, cpu); + sort_throttled_rq_array(throttled_rt_rq_array, rt_last); + if (CRASHDEBUG(1)) { + fprintf(fp, "throttled_rt_rq_array:\n"); + for (i = 0; i < rt_last; i++) { + fprintf(fp, " [%2d] = {depth=%d, prio=%d, rt_rq=%lx}\n", + i, throttled_rt_rq_array[i].depth, + throttled_rt_rq_array[i].prio, + throttled_rt_rq_array[i].rq); + } + } + FREEBUF(group_buf_rt); + } + dump_RT_prio_array(0, runq + OFFSET(rq_rt) + OFFSET(rt_rq_active), - &runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)]); + &runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)], cpu, 0); fprintf(fp, " CFS RB_ROOT: %lx\n", (ulong)root); + if (VALID_MEMBER(task_group_cfs_bandwidth)) { + group_buf = GETBUF(SIZE(task_group)); + readmem(root_task_group, KVADDR, group_buf, SIZE(task_group), + "task_group", FAULT_ON_ERROR); + fill_throttled_cfs_rq_array(0, root_task_group, + group_buf, cpu); + sort_throttled_rq_array(throttled_cfs_rq_array, cfs_last); + if (CRASHDEBUG(1)) { + fprintf(fp, "throttled_cfs_rq_array:\n"); + for (i = 0; i < cfs_last; i++) { + fprintf(fp, " [%2d] = {depth=%d, cfs_rq=%lx}\n", + i, throttled_cfs_rq_array[i].depth, + throttled_cfs_rq_array[i].rq); + } + } + FREEBUF(group_buf); + } + hq_open(); - tot = dump_tasks_in_cfs_rq(0, cfs_rq); + dump_tasks_in_cfs_rq(0, cfs_rq, cpu, 0); hq_close(); - if (!tot) { - INDENT(5); - fprintf(fp, "[no tasks queued]\n"); - } + + rt_last = cfs_last = 0; } FREEBUF(runqbuf); @@ -7717,9 +7965,10 @@ dump_CFS_runqueues(void) } static void -dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array) +dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array, + int cpu, int throttled) { - int i, c, tot, cnt, qheads; + int i, c, j, tot, cnt, qheads, delta, prio; ulong offset, kvaddr, uvaddr; ulong list_head[2]; struct list_data list_data, *ld; @@ -7727,7 +7976,7 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array) ulong *tlist; ulong my_q, task_addr; char *rt_rq_buf; - ulong tmp; + ulong p1, p2, t1, t2, rt_rq, tmp, *tg_array; if (!depth) fprintf(fp, " RT PRIO_ARRAY: %lx\n", k_prio_array); @@ -7748,7 +7997,7 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array) i, kvaddr, list_head[0], list_head[1]); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) - continue; + continue; BZERO(ld, sizeof(struct list_data)); ld->start = list_head[0]; @@ -7787,7 +8036,8 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array) tot++; dump_RT_prio_array(depth + 1, my_q + OFFSET(rt_rq_active), - &rt_rq_buf[OFFSET(rt_rq_active)]); + &rt_rq_buf[OFFSET(rt_rq_active)], + cpu, throttled); continue; } else task_addr -= OFFSET(task_struct_rt); @@ -7804,6 +8054,68 @@ dump_RT_prio_array(int depth, ulong k_prio_array, char *u_prio_array) FREEBUF(tlist); } + for (c = 0; c < rt_last; c++) { + delta = throttled_rt_rq_array[c].depth - depth; + if (delta >= 1) { + readmem(k_prio_array - OFFSET(rt_rq_active) + + OFFSET(rt_rq_tg), KVADDR, + &t1, sizeof(ulong), "rt_rq tg", + FAULT_ON_ERROR); + rt_rq = throttled_rt_rq_array[c].rq; + readmem(rt_rq + OFFSET(rt_rq_tg), KVADDR, + &t2, sizeof(ulong), "rt_rq tg", + FAULT_ON_ERROR); + + tg_array = (ulong *)GETBUF(delta * sizeof(ulong)); + tmp = t2; + for (j = 0; j < delta; j++) { + readmem(tmp + OFFSET(task_group_parent), KVADDR, + &p2, sizeof(ulong), "task_group parent", + FAULT_ON_ERROR); + tg_array[j] = tmp = p2; + } + + if (t1 == p2) { + for (j = delta - 1; j > 0; j--) { + INDENT(-1 + 6 * (depth + delta - j)); + readmem(tg_array[j - 1] + OFFSET(task_group_rt_rq), + KVADDR, &tmp, sizeof(ulong), + "task_group rt_rq", FAULT_ON_ERROR); + readmem(tmp + sizeof(ulong) * cpu, KVADDR, + &rt_rq, sizeof(ulong), + "task_group rt_rq", FAULT_ON_ERROR); + readmem(rt_rq + OFFSET(rt_rq_highest_prio_curr), + KVADDR, &prio, sizeof(ulong), + "rt_rq highest_prio curr", FAULT_ON_ERROR); + fprintf(fp, "[%3d] ", prio); + fprintf(fp, "GROUP RT PRIO_ARRAY: %lx", + rt_rq + OFFSET(rt_rq_active)); + dump_task_group_name(tg_array[j - 1]); + fprintf(fp, "(DEQUEUED)\n"); + } + throttled_rt_rq_array[c].depth = -1; + prio = throttled_rt_rq_array[c].prio; + rt_rq = throttled_rt_rq_array[c].rq; + rt_rq_buf = GETBUF(SIZE(rt_rq)); + readmem(rt_rq, KVADDR, rt_rq_buf, SIZE(rt_rq), + "rt_rq", FAULT_ON_ERROR); + INDENT(-1 + 6 * (depth + delta)); + fprintf(fp, "[%3d] ", prio); + fprintf(fp, "GROUP RT PRIO_ARRAY: %lx", + rt_rq + OFFSET(rt_rq_active)); + dump_task_group_name(t2); + fprintf(fp, "(THROTTLED)\n"); + tot++; + dump_RT_prio_array(depth + delta, + rt_rq + OFFSET(rt_rq_active), + &rt_rq_buf[OFFSET(rt_rq_active)], + cpu, 1); + FREEBUF(rt_rq_buf); + } + FREEBUF(tg_array); + } + } + if (!tot) { INDENT(5 + 9 * depth); fprintf(fp, "[no tasks queued]\n"); -- 1.7.1