On 2023/02/22 15:32, Tao Liu wrote:
 Kernel with maple tree enabled doesn't have mmap as a member of
mm_struct[1],
 so OFFSET(mm_struct_mmap) case needed to be handled differently for
 maple tree kernel.
 
 Before:
 crash> search -u a
 
 search: invalid structure member offset: mm_struct_mmap
          FILE: memory.c  LINE: 14255  FUNCTION: address_space_start()
 
Thank you for the patch.
My test did not have the "search -u", added it...
 
 [crash] error trace: 549500 => 548fff => 5f1c91 => 5f1c13
 
    5f1c13: OFFSET_verify.part.36+51
    5f1c91: OFFSET_verify+49
    548fff: address_space_start+106
    549500: cmd_search+855
 
 search: invalid structure member offset: mm_struct_mmap
          FILE: memory.c  LINE: 14255  FUNCTION: address_space_start()
 
 After:
 crash> search -u a
 7ffea63e6440: a
 
 [1]:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit...
 
 Signed-off-by: Tao Liu <ltao(a)redhat.com>
 ---
   memory.c | 87 ++++++++++++++++++++++++++++++++++++++++++--------------
   1 file changed, 65 insertions(+), 22 deletions(-)
 
 diff --git a/memory.c b/memory.c
 index d9cd616..63ea9f4 100644
 --- a/memory.c
 +++ b/memory.c
 @@ -14245,14 +14245,28 @@ vaddr_type(ulong vaddr, struct task_context *tc)
   static int
   address_space_start(struct task_context *tc, ulong *addr)
   {
 -        ulong vma;
 +	ulong mm_mt, entry_num, i, vma = 0;
           char *vma_buf;
 +	struct list_pair *entry_list;
   
           if (!tc->mm_struct)
                   return FALSE;
   
 -        fill_mm_struct(tc->mm_struct);
 -        vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap));
 +	if (INVALID_MEMBER(mm_struct_mmap) && VALID_MEMBER(mm_struct_mm_mt)) {
 +		mm_mt = tc->mm_struct + OFFSET(mm_struct_mm_mt);
 +		entry_num = do_maple_tree(mm_mt, MAPLE_TREE_COUNT, NULL);
 +		entry_list = (struct list_pair *)GETBUF(entry_num * sizeof(struct list_pair));
 +		do_maple_tree(mm_mt, MAPLE_TREE_GATHER, entry_list);
 +		for (i = 0; i < entry_num; i++) {
 +			if (!!(vma = (ulong)entry_list[i].value))
 +				break;
 +		}
 +		FREEBUF(entry_list);
 +	} else {
 +		fill_mm_struct(tc->mm_struct);
 +		vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap));
 +	}
 +
           if (!vma)
                   return FALSE;
   	vma_buf = fill_vma_cache(vma);
 @@ -15491,6 +15505,30 @@ search_physical(struct searchinfo *si)
   	FREEBUF(pagebuf);
   }
   
 +static bool
 +check_vma(ulong vma, ulong vaddr, ulong *vm_next, ulong *nextvaddr)
 +{
 +	char *vma_buf;
 +	ulong vm_start, vm_end;
 +
 +	vma_buf = fill_vma_cache(vma);
 +
 +	vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start));
 +	vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end));
 +	if (vm_next)
 +		*vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next));
 +
 +	if (vaddr <= vm_start) {
 +		*nextvaddr = vm_start;
 +		return TRUE;
 +	}
 +
 +	if ((vaddr > vm_start) && (vaddr < vm_end)) {
 +		*nextvaddr = vaddr;
 +		return TRUE;
 +	}
 +	return FALSE;
 +}
   
   /*
    *  Return the next mapped user virtual address page that comes after
 @@ -15503,34 +15541,39 @@ next_upage(struct task_context *tc, ulong vaddr, ulong
*nextvaddr)
   	char *vma_buf;
           ulong vm_start, vm_end;
   	ulong vm_next;
 
cc -c -g -DX86_64 -DLZO -DSNAPPY -DZSTD -DGDB_10_2  memory.c -Wall -O2 -Wstrict-prototypes
-Wmissing-prototypes -fstack-protector -Wformat-security
memory.c: In function ‘next_upage’:
memory.c:15542:25: warning: unused variable ‘vm_end’ [-Wunused-variable]
          ulong vm_start, vm_end;
                          ^~~~~~
memory.c:15542:15: warning: unused variable ‘vm_start’ [-Wunused-variable]
          ulong vm_start, vm_end;
                ^~~~~~~~
memory.c:15541:8: warning: unused variable ‘vma_buf’ [-Wunused-variable]
   char *vma_buf;
         ^~~~~~~
but we can remove these when applying.
 +	ulong mm_mt, entry_num, i;
 +	struct list_pair *entry_list;
   
           if (!tc->mm_struct)
                   return FALSE;
   
 -        fill_mm_struct(tc->mm_struct);
 -	vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap));
 +	fill_mm_struct(tc->mm_struct);
 +	vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE();  /* first possible page */
   	total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm));
 -
 -	if (!vma || (total_vm == 0))
 +	if (!total_vm)
   		return FALSE;
   
 -	vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE();  /* first possible page */
 -
 -        for ( ; vma; vma = vm_next) {
 -                vma_buf = fill_vma_cache(vma);
 -
 -                vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start));
 -                vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end));
 -                vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next));
 -
 -		if (vaddr <= vm_start) {
 -			*nextvaddr = vm_start;
 -			return TRUE;
 +	if (INVALID_MEMBER(mm_struct_mmap) && VALID_MEMBER(mm_struct_mm_mt)) {
 +		mm_mt = tc->mm_struct + OFFSET(mm_struct_mm_mt);
 +		entry_num = do_maple_tree(mm_mt, MAPLE_TREE_COUNT, NULL);
 +		entry_list = (struct list_pair *)GETBUF(entry_num * sizeof(struct list_pair));
 +		do_maple_tree(mm_mt, MAPLE_TREE_GATHER, entry_list);
 +		for (i = 0; i < entry_num; i++) {
 +			if (!!(vma = (ulong)entry_list[i].value) &&
 +			    check_vma(vma, vaddr, NULL, nextvaddr)) {
 +				FREEBUF(entry_list);
 +				return TRUE;
 +			}
   		}
 +		FREEBUF(entry_list);
 +	} else {
 +		vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap));
   
 -		if ((vaddr > vm_start) && (vaddr < vm_end)) {
 -			*nextvaddr = vaddr;
 -			return TRUE;
 +		if (!vma)
 +			return FALSE;
 +		for ( ; vma; vma = vm_next) {
 +			if (check_vma(vma, vaddr, &vm_next, nextvaddr))
 +				return TRUE;
   		}
   	}
   
 
The above looks a bit inefficient due to the search function structure
and maple tree ops in the first place, but it's another viewpoint.
Acked-by: Kazuhito Hagio <k-hagio-ab(a)nec.com>
Thanks,
Kazu