Since Linux v3.4 (specifically, commit 438ced1720b584000
"ring-buffer:
Add per_cpu ring buffer control files"), the trace buffer size is now
per-cpu. The patch below updates the trace extension to handle this.
Rabin
The patch looks good to me -- deferring to the trace.c maintainer
Lai Jiangshan (cc'd directory) for his ACK.
Thanks,
Dave
diff --git a/extensions/trace.c b/extensions/trace.c
index 831cc77..224c662 100644
--- a/extensions/trace.c
+++ b/extensions/trace.c
@@ -24,6 +24,7 @@ static int nr_cpu_ids;
* lockless ring_buffer and old non-lockless ring_buffer are both
supported.
*/
static int lockless_ring_buffer;
+static int per_cpu_buffer_sizes;
#define koffset(struct, member) struct##_##member##_offset
@@ -37,6 +38,7 @@ static int koffset(ring_buffer, buffers);
static int koffset(ring_buffer_per_cpu, cpu);
static int koffset(ring_buffer_per_cpu, pages);
+static int koffset(ring_buffer_per_cpu, nr_pages);
static int koffset(ring_buffer_per_cpu, head_page);
static int koffset(ring_buffer_per_cpu, tail_page);
static int koffset(ring_buffer_per_cpu, commit_page);
@@ -71,6 +73,7 @@ struct ring_buffer_per_cpu {
ulong real_head_page;
int head_page_index;
+ unsigned int nr_pages;
ulong *pages;
ulong *linear_pages;
@@ -144,7 +147,14 @@ static int init_offsets(void)
init_offset(trace_array, buffer);
init_offset(tracer, name);
- init_offset(ring_buffer, pages);
+ if (MEMBER_EXISTS("ring_buffer_per_cpu", "nr_pages")) {
+ per_cpu_buffer_sizes = 1;
+ if (verbose)
+ fprintf(fp, "per cpu buffer sizes\n");
+ }
+
+ if (!per_cpu_buffer_sizes)
+ init_offset(ring_buffer, pages);
init_offset(ring_buffer, flags);
init_offset(ring_buffer, cpus);
init_offset(ring_buffer, buffers);
@@ -155,6 +165,8 @@ static int init_offsets(void)
fprintf(fp, "lockless\n");
}
+ if (per_cpu_buffer_sizes)
+ init_offset(ring_buffer_per_cpu, nr_pages);
init_offset(ring_buffer_per_cpu, cpu);
init_offset(ring_buffer_per_cpu, pages);
init_offset(ring_buffer_per_cpu, head_page);
@@ -362,6 +374,10 @@ static int ftrace_init_buffers(struct
ring_buffer_per_cpu *buffers,
buffer_read_value(reader_page);
buffer_read_value(overrun);
buffer_read_value(entries);
+ if (per_cpu_buffer_sizes) {
+ buffer_read_value(nr_pages);
+ pages = buffers[i].nr_pages;
+ }
#undef buffer_read_value
if (ftrace_init_pages(buffers + i, pages) < 0)
--
1.7.9.5
--
Crash-utility mailing list
Crash-utility(a)redhat.com
https://www.redhat.com/mailman/listinfo/crash-utility