On Mon, Sep 18, 2023 at 7:34 PM lijiang <lijiang@redhat.com> wrote:
Hi, Aditya
Thank you for the patch.

On Mon, Sep 11, 2023 at 8:00 PM <crash-utility-request@redhat.com> wrote:
From: Aditya Gupta <adityag@linux.ibm.com>
To: crash-utility@redhat.com
Cc: Hari Bathini <hbathini@linux.ibm.com>, Mahesh J Salgaonkar
        <mahesh@linux.ibm.com>, Sourabh Jain <sourabhjain@linux.ibm.com>,
        "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Subject: [Crash-utility] [PATCH] ppc64: do page traversal if
        vmemmap_list not populated
Message-ID: <20230911093032.419388-1-adityag@linux.ibm.com>
Content-Type: text/plain; charset="US-ASCII"; x-default=true

Currently 'crash-tool' fails on vmcore collected on upstream kernel on
PowerPC64 with the error:

    crash: invalid kernel virtual address: 0  type: "first list entry

Presently the address translation for vmemmap addresses is done using
the vmemmap_list. But with the below commit in Linux, vmemmap_list can
be empty, in case of Radix MMU on PowerPC64

    368a0590d954: (powerpc/book3s64/vmemmap: switch radix to use a
    different vmemmap handling function)

In case vmemmap_list is empty, then it's head is NULL, which crash tries
to access and fails due to accessing NULL.

Instead of depending on 'vmemmap_list' for address translation for
vmemmap addresses, do a kernel pagetable walk to get the physical
address associated with given virtual address

Reviewed-by: Hari Bathini <hbathini@linux.ibm.com>
Signed-off-by: Aditya Gupta <adityag@linux.ibm.com>

---

Testing
=======

Git tree with patch applied:
https://github.com/adi-g15-ibm/crash/tree/bugzilla-203296-list-v1

This can be tested with '/proc/vmcore' as the vmcore, since makedumpfile
also fails in absence of 'vmemmap_list' in upstream linux

The fix for makedumpfile will also been sent to upstream

---
---
 ppc64.c | 47 +++++++++++++++++++++++++++++++++--------------
 1 file changed, 33 insertions(+), 14 deletions(-)

diff --git a/ppc64.c b/ppc64.c
index fc34006f4863..1e84c5f56773 100644
--- a/ppc64.c
+++ b/ppc64.c
@@ -1280,8 +1280,30 @@ void ppc64_vmemmap_init(void)
        long backing_size, virt_addr_offset, phys_offset, list_offset;
        ulong *vmemmap_list;
        char *vmemmap_buf;
-       struct machine_specific *ms;
-       
+       struct machine_specific *ms = machdep->machspec;
+
+       ld = &list_data;
+       BZERO(ld, sizeof(struct list_data));
+
+       /*
+        * vmemmap_list is missing or set to 0 in the kernel would imply
+        * vmemmap region is mapped in the kernel pagetable. So, read vmemmap_list
+        * anyway and use the translation method accordingly.
+        */
+       readmem(symbol_value("vmemmap_list"), KVADDR, &ld->start, sizeof(void *),
+               "vmemmap_list", RETURN_ON_ERROR);
+       if (!ld->start) {
+               /*
+                * vmemmap_list is set to 0 or missing. Do kernel pagetable walk
+                * for vmemmamp address translation.
+                */
+               ms->vmemmap_list = NULL;
+               ms->vmemmap_cnt = 0;
+
+               machdep->flags |= VMEMMAP_AWARE;
+               return;
+       }
+

I would suggest putting the 'readmem(symbol_value("vmemmap_list"),...)' after the 'kernel_symbol_exists("vmemmap_list")', and also check the returned value of readmem().

And other changes are fine to me.

Thanks
Lianbo

        if (!(kernel_symbol_exists("vmemmap_list")) ||
            !(kernel_symbol_exists("mmu_psize_defs")) ||
            !(kernel_symbol_exists("mmu_vmemmap_psize")) ||
@@ -1293,8 +1315,6 @@ void ppc64_vmemmap_init(void)
            !MEMBER_EXISTS("vmemmap_backing", "list"))
                return;

-       ms = machdep->machspec;
-
        backing_size = STRUCT_SIZE("vmemmap_backing");
        virt_addr_offset = MEMBER_OFFSET("vmemmap_backing", "virt_addr");
        phys_offset = MEMBER_OFFSET("vmemmap_backing", "phys");
@@ -1313,14 +1333,8 @@ void ppc64_vmemmap_init(void)

        ms->vmemmap_psize = 1 << shift;

-        ld =  &list_data;
-        BZERO(ld, sizeof(struct list_data));
-       if (!readmem(symbol_value("vmemmap_list"),
-           KVADDR, &ld->start, sizeof(void *), "vmemmap_list",
-           RETURN_ON_ERROR))
-               return;
-        ld->end = symbol_value("vmemmap_list");
-        ld->list_head_offset = list_offset;
+       ld->end = symbol_value("vmemmap_list");
+       ld->list_head_offset = list_offset;

         hq_open();
        cnt = do_list(ld);
@@ -1366,7 +1380,7 @@ ppc64_vmemmap_to_phys(ulong kvaddr, physaddr_t *paddr, int verbose)
 {
        int i;
        ulong offset;
-       struct machine_specific *ms;
+       struct machine_specific *ms = machdep->machspec;

        if (!(machdep->flags & VMEMMAP_AWARE)) {
                /*
@@ -1386,7 +1400,12 @@ ppc64_vmemmap_to_phys(ulong kvaddr, physaddr_t *paddr, int verbose)
                return FALSE;
        }

-       ms = machdep->machspec;
+       /**
+        * When vmemmap_list is not populated, kernel does the mapping in init_mm
+        * page table, so do a pagetable walk in kernel page table
+        */
+       if (!ms->vmemmap_list)
+               return ppc64_vtop_level4(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose);

        for (i = 0; i < ms->vmemmap_cnt; i++) {
                if ((kvaddr >= ms->vmemmap_list[i].virt) &&
--
2.41.0