On 15.03.23 06:54, lijiang wrote:
 On Mon, Mar 13, 2023 at 9:07 PM <crash-utility-request(a)redhat.com
 <mailto:crash-utility-request@redhat.com>> wrote:
 
     Send Crash-utility mailing list submissions to
     Date: Mon, 13 Mar 2023 14:01:12 +0100
     From: Juergen Gross <jgross(a)suse.com <mailto:jgross@suse.com>>
     To: crash-utility(a)redhat.com <mailto:crash-utility@redhat.com>
     Subject: [Crash-utility] [PATCH 3/3] xen: adjust to new scheduler
              structures
     Message-ID: <20230313130112.15353-4-jgross(a)suse.com
     <mailto:20230313130112.15353-4-jgross@suse.com>>
     Content-Type: text/plain; charset="US-ASCII"; x-default=true
 
     There has been a significant modification regarding scheduler data in
     the Xen hypervisor. Adapt to new structures and removed fields.
 
 I would suggest adding the related hypervisor commit here.
 
     Note that this is only the bare minimum to not let crash error out when
     opening a vmcore in Xen mode with a recent Xen version.
 
     Signed-off-by: Juergen Gross <jgross(a)suse.com <mailto:jgross@suse.com>>
     ---
       xen_hyper.c      | 67 +++++++++++++++++++++++++++++++++---------------
       xen_hyper_defs.h |  4 ++-
       2 files changed, 49 insertions(+), 22 deletions(-)
 
     diff --git a/xen_hyper.c b/xen_hyper.c
     index 72720e2..4c884dd 100644
     --- a/xen_hyper.c
     +++ b/xen_hyper.c
     @@ -417,13 +417,21 @@ void
       xen_hyper_misc_init(void)
       {
              XEN_HYPER_STRUCT_SIZE_INIT(schedule_data, "schedule_data");
     -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
     "schedule_data", "schedule_lock");
     -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr, "schedule_data",
     "curr");
     -       if (MEMBER_EXISTS("schedule_data", "idle"))
     -               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle,
     "schedule_data", "idle");
     -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
     "schedule_data", "sched_priv");
     -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer,
"schedule_data",
     "s_timer");
     -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick, "schedule_data",
     "tick");
     +       XEN_HYPER_STRUCT_SIZE_INIT(sched_resource, "sched_resource");
     +       if (XEN_HYPER_VALID_SIZE(schedule_data)) {
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
     "schedule_data", "schedule_lock");
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr,
     "schedule_data", "curr");
     +               if (MEMBER_EXISTS("schedule_data", "idle"))
     +                       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle,
     "schedule_data", "idle");
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
     "schedule_data", "sched_priv");
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer,
     "schedule_data", "s_timer");
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick,
     "schedule_data", "tick");
     +       } else if (XEN_HYPER_VALID_SIZE(sched_resource)) {
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
     "sched_resource", "schedule_lock");
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr,
     "sched_resource", "curr");
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
     "sched_resource", "sched_priv");
     +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer,
     "sched_resource", "s_timer");
     +       }
 
              XEN_HYPER_STRUCT_SIZE_INIT(scheduler, "scheduler");
              XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_name, "scheduler",
"name");
     @@ -467,6 +475,7 @@ xen_hyper_schedule_init(void)
              long *schedulers_buf;
              int nr_schedulers;
              struct xen_hyper_sched_context *schc;
     +       long buf_size;
              char *buf;
              char opt_name_buf[XEN_HYPER_OPT_SCHED_SIZE];
              int i, cpuid, flag;
     @@ -561,28 +570,43 @@ xen_hyper_schedule_init(void)
              }
              BZERO(xhscht->sched_context_array,
                      sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS());
     -       buf = GETBUF(XEN_HYPER_SIZE(schedule_data));
     -       if (symbol_exists("per_cpu__schedule_data")) {
     +       if (symbol_exists("per_cpu__sched_res")) {
     +               addr = symbol_value("per_cpu__sched_res");
     +               buf_size = XEN_HYPER_SIZE(sched_resource);
     +               flag = 0;
     +       } else if (symbol_exists("per_cpu__schedule_data")) {
                      addr = symbol_value("per_cpu__schedule_data");
     -               flag = TRUE;
     +               buf_size = XEN_HYPER_SIZE(schedule_data);
     +               flag = 1;
              } else {
                      addr = symbol_value("schedule_data");
     -               flag = FALSE;
     +               buf_size = XEN_HYPER_SIZE(schedule_data);
     +               flag = 2;
              }
     +       buf = GETBUF(buf_size);
              for_cpu_indexes(i, cpuid)
              {
                      schc = &xhscht->sched_context_array[cpuid];
                      if (flag) {
     -                       schc->schedule_data =
     -                               xen_hyper_per_cpu(addr, i);
     +                       if (flag == 1) {
     +                               schc->schedule_data =
     +                                       xen_hyper_per_cpu(addr, i);
     +                       } else {
     +                               schc->schedule_data = addr +
     +                                       XEN_HYPER_SIZE(schedule_data) * i;
     +                       }
     +                       if (!readmem(schc->schedule_data,
     +                               KVADDR, buf, XEN_HYPER_SIZE(schedule_data),
     +                               "schedule_data", RETURN_ON_ERROR)) {
     +                               error(FATAL, "cannot read
schedule_data.\n");
     +                       }
 
 
 As we mentioned in patch 2/3, the readmem(..., FAULT_ON_ERROR) looks better for 
 this case.
 
Okay.
Juergen