-----Original Message-----
It's not clear how broken an incomplete dump from the existing
debugging
prints. Aggregate number of valid pages helps to figure out approximate
size of the dump. Size of a complete dump is roughly:
EXPECTED_CORE_SIZE = a few pages (kdump headers + bitmaps + descriptors) +
(total_valid_pages * block_size) * compression rate
An incomplete core would be significantly smaller than:
total_valid_pages * block_size
Signed-off-by: Roman Bolshakov <r.bolshakov(a)yadro.com>
---
diskdump.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/diskdump.c b/diskdump.c
index c05f1ec..1428141 100644
--- a/diskdump.c
+++ b/diskdump.c
@@ -74,6 +74,7 @@ struct diskdump_data {
ulong evictions; /* total evictions done */
ulong cached_reads;
ulong *valid_pages;
+ ulong total_valid_pages; /* expected number of dumpable pages */
ulong accesses;
ulong snapshot_task;
};
@@ -877,11 +878,14 @@ restart:
}
dd->valid_pages = calloc(sizeof(ulong), max_sect_len + 1);
+ dd->total_valid_pages = 0;
for (i = 1; i < max_sect_len + 1; i++) {
dd->valid_pages[i] = dd->valid_pages[i - 1];
for (j = 0; j < BITMAP_SECT_LEN; j++, pfn++)
- if (page_is_dumpable(pfn))
+ if (page_is_dumpable(pfn)) {
dd->valid_pages[i]++;
+ dd->total_valid_pages++;
+ }
}
return TRUE;
@@ -2090,6 +2094,7 @@ __diskdump_memory_dump(FILE *fp)
else
fprintf(fp, "\n");
fprintf(fp, " valid_pages: %lx\n", (ulong)dd->valid_pages);
+ fprintf(fp, " total_valid_pages: %lx\n", dd->total_valid_pages);
I'd like to use %ld for this, will change.
Otherwise, the 2/2 patch looks good to me.
Acked-by: Kazuhito Hagio <k-hagio-ab(a)nec.com>
Thanks,
Kazu