-----Original Message-----
 crash warns that ELF dump is incomplete like this:
 
   WARNING: vmcore: may be truncated or incomplete
            PT_LOAD p_offset: 218960740
                    p_filesz: 4194304
              bytes required: 223155044
               dumpfile size: 219960740
 
 The warning saves a lot of time when figuring out what went wrong, but
 crash misses explicit dump detection for compressed kdumps.
 
 The change adds the warning for compressed kdumps:
 
   WARNING: vmcore: may be truncated or incomplete
                 data_offset: 262144
                  block_size: 65536
           total_valid_pages: 2438
              first_empty_pd: 100
              bytes required: 157467661
               dumpfile size: 265982
 
 bytes required is computed as:
 
 	data_offset (kdump header + bitmaps)
             +
         total_valid_pages * page descriptor size (page descriptors)
             +
         zero page (that immediately follows page descriptors)
             +
         compressed size of every present page in the list of valid pages
             +
         block_size for every incomplete page (i.e page descriptor has
         zero offset) 
hmm, this will be good for debugging, but the "bytes required" will not
match with the size of the complete dump at all if it's compressed, and
it will also be confusing.
So, I would prefer the number of valid page descriptors rather than
the "bytes required" and "dumpfile size".  What do you think?
The other patches look good to me.
Thanks,
Kazu
 
 The warning is also printed for incomplete split dumps.
 
 Signed-off-by: Roman Bolshakov <r.bolshakov(a)yadro.com>
 ---
  diskdump.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
  1 file changed, 69 insertions(+), 2 deletions(-)
 
 diff --git a/diskdump.c b/diskdump.c
 index de3eeb2..5151db1 100644
 --- a/diskdump.c
 +++ b/diskdump.c
 @@ -23,6 +23,8 @@
   * GNU General Public License for more details.
   */
 
 +#define _LARGEFILE64_SOURCE 1  /* stat64() */
 +
  #include "defs.h"
  #include "diskdump.h"
  #include "xen_dom0.h"
 @@ -548,6 +550,12 @@ read_dump_header(char *file)
  	ulong pfn;
  	int i, j, max_sect_len;
  	int is_split = 0;
 +	struct stat64 stat;
 +	page_desc_t pd;
 +	ulong page_idx;
 +	int first_empty_pd = 0;
 +	int zero_page_counted = 0;
 +	size_t expected_size = 0;
 
  	if (block_size < 0)
  		return FALSE;
 @@ -898,13 +906,72 @@ restart:
  		pfn = start;
  	}
 
 +	expected_size = dd->data_offset;
  	dd->valid_pages = calloc(sizeof(ulong), max_sect_len + 1);
  	dd->max_sect_len = max_sect_len;
  	for (i = 1; i < max_sect_len + 1; i++) {
  		dd->valid_pages[i] = dd->valid_pages[i - 1];
  		for (j = 0; j < BITMAP_SECT_LEN; j++, pfn++)
 -			if (page_is_dumpable(pfn))
 -				dd->valid_pages[i]++;
 +			if (page_is_dumpable(pfn)) {
 +				page_idx = dd->valid_pages[i]++;
 +
 +				offset = dd->data_offset +
 +					 page_idx * sizeof(pd);
 +
 +				if (read_pd(dd->dfd, offset, &pd)) {
 +					/*
 +					 * Truncated page descriptor at most
 +					 * references full page.
 +					 */
 +					expected_size += block_size;
 +					goto next;
 +				}
 +
 +				if (pd.offset == 0) {
 +					if (!first_empty_pd)
 +						first_empty_pd = page_idx;
 +					/*
 +					 * Incomplete pages at most use the
 +					 * whole page.
 +					 */
 +					expected_size += block_size;
 +				} else if (!pd.flags) {
 +					/*
 +					 * Zero page has no compression flags.
 +					 */
 +					if (!zero_page_counted) {
 +						expected_size += block_size;
 +						zero_page_counted = 1;
 +					}
 +				} else if (pd.flags) {
 +					/* Regular compressed page */
 +					expected_size += pd.size;
 +				}
 +
 +next:
 +				expected_size += sizeof(pd);
 +			}
 +	}
 +
 +	if (stat64(dd->filename, &stat) < 0) {
 +		error(INFO, "%s: cannot stat %s\n",
 +		      DISKDUMP_VALID() ? "diskdump" : "compressed kdump",
 +		      dd->filename);
 +		goto err;
 +	}
 +
 +	if (expected_size > stat.st_size || first_empty_pd) {
 +		error(WARNING,
 +		      "%s: may be truncated or incomplete\n"
 +		      "              data_offset: %lld\n"
 +		      "               block_size: %lld\n"
 +		      "        total_valid_pages: %lld\n"
 +		      "           first_empty_pd: %lld\n"
 +		      "           bytes required: %lld\n"
 +		      "            dumpfile size: %lld\n\n",
 +		      dd->filename, dd->data_offset,
 +		      dd->block_size, dd->valid_pages[dd->max_sect_len],
 +		      first_empty_pd, expected_size, stat.st_size);
  	}
 
          return TRUE;
 --
 2.32.0