On Mon, Mar 13, 2023 at 9:07 PM <crash-utility-request(a)redhat.com> wrote:
 Send Crash-utility mailing list submissions to
 Date: Mon, 13 Mar 2023 14:01:12 +0100
 From: Juergen Gross <jgross(a)suse.com>
 To: crash-utility(a)redhat.com
 Subject: [Crash-utility] [PATCH 3/3] xen: adjust to new scheduler
         structures
 Message-ID: <20230313130112.15353-4-jgross(a)suse.com>
 Content-Type: text/plain; charset="US-ASCII"; x-default=true
 There has been a significant modification regarding scheduler data in
 the Xen hypervisor. Adapt to new structures and removed fields.
 
I would suggest adding the related hypervisor commit here.
 Note that this is only the bare minimum to not let crash error out
when
 opening a vmcore in Xen mode with a recent Xen version.
 Signed-off-by: Juergen Gross <jgross(a)suse.com>
 ---
  xen_hyper.c      | 67 +++++++++++++++++++++++++++++++++---------------
  xen_hyper_defs.h |  4 ++-
  2 files changed, 49 insertions(+), 22 deletions(-)
 diff --git a/xen_hyper.c b/xen_hyper.c
 index 72720e2..4c884dd 100644
 --- a/xen_hyper.c
 +++ b/xen_hyper.c
 @@ -417,13 +417,21 @@ void
  xen_hyper_misc_init(void)
  {
         XEN_HYPER_STRUCT_SIZE_INIT(schedule_data, "schedule_data");
 -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
 "schedule_data", "schedule_lock");
 -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr, "schedule_data",
 "curr");
 -       if (MEMBER_EXISTS("schedule_data", "idle"))
 -               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle,
 "schedule_data", "idle");
 -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
 "schedule_data", "sched_priv");
 -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer,
 "schedule_data", "s_timer");
 -       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick, "schedule_data",
 "tick");
 +       XEN_HYPER_STRUCT_SIZE_INIT(sched_resource, "sched_resource");
 +       if (XEN_HYPER_VALID_SIZE(schedule_data)) {
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
 "schedule_data", "schedule_lock");
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr,
 "schedule_data", "curr");
 +               if (MEMBER_EXISTS("schedule_data", "idle"))
 +                       XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle,
 "schedule_data", "idle");
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
 "schedule_data", "sched_priv");
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer,
 "schedule_data", "s_timer");
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick,
 "schedule_data", "tick");
 +       } else if (XEN_HYPER_VALID_SIZE(sched_resource)) {
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock,
 "sched_resource", "schedule_lock");
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr,
 "sched_resource", "curr");
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv,
 "sched_resource", "sched_priv");
 +               XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer,
 "sched_resource", "s_timer");
 +       }
         XEN_HYPER_STRUCT_SIZE_INIT(scheduler, "scheduler");
         XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_name, "scheduler",
"name");
 @@ -467,6 +475,7 @@ xen_hyper_schedule_init(void)
         long *schedulers_buf;
         int nr_schedulers;
         struct xen_hyper_sched_context *schc;
 +       long buf_size;
         char *buf;
         char opt_name_buf[XEN_HYPER_OPT_SCHED_SIZE];
         int i, cpuid, flag;
 @@ -561,28 +570,43 @@ xen_hyper_schedule_init(void)
         }
         BZERO(xhscht->sched_context_array,
                 sizeof(struct xen_hyper_sched_context) *
 XEN_HYPER_MAX_CPUS());
 -       buf = GETBUF(XEN_HYPER_SIZE(schedule_data));
 -       if (symbol_exists("per_cpu__schedule_data")) {
 +       if (symbol_exists("per_cpu__sched_res")) {
 +               addr = symbol_value("per_cpu__sched_res");
 +               buf_size = XEN_HYPER_SIZE(sched_resource);
 +               flag = 0;
 +       } else if (symbol_exists("per_cpu__schedule_data")) {
                 addr = symbol_value("per_cpu__schedule_data");
 -               flag = TRUE;
 +               buf_size = XEN_HYPER_SIZE(schedule_data);
 +               flag = 1;
         } else {
                 addr = symbol_value("schedule_data");
 -               flag = FALSE;
 +               buf_size = XEN_HYPER_SIZE(schedule_data);
 +               flag = 2;
         }
 +       buf = GETBUF(buf_size);
         for_cpu_indexes(i, cpuid)
         {
                 schc = &xhscht->sched_context_array[cpuid];
                 if (flag) {
 -                       schc->schedule_data =
 -                               xen_hyper_per_cpu(addr, i);
 +                       if (flag == 1) {
 +                               schc->schedule_data =
 +                                       xen_hyper_per_cpu(addr, i);
 +                       } else {
 +                               schc->schedule_data = addr +
 +                                       XEN_HYPER_SIZE(schedule_data) * i;
 +                       }
 +                       if (!readmem(schc->schedule_data,
 +                               KVADDR, buf, XEN_HYPER_SIZE(schedule_data),
 +                               "schedule_data", RETURN_ON_ERROR)) {
 +                               error(FATAL, "cannot read
 schedule_data.\n");
 +                       }
 
As we mentioned in patch 2/3, the readmem(..., FAULT_ON_ERROR) looks better
for this case.
                } else {
 -                       schc->schedule_data = addr +
 -                               XEN_HYPER_SIZE(schedule_data) * i;
 -               }
 -               if (!readmem(schc->schedule_data,
 -                       KVADDR, buf, XEN_HYPER_SIZE(schedule_data),
 -               "schedule_data", RETURN_ON_ERROR)) {
 -                       error(FATAL, "cannot read schedule_data.\n");
 +                       schc->sched_resource = xen_hyper_per_cpu(addr, i);
 +                       if (!readmem(schc->sched_resource,
 +                               KVADDR, buf,
 XEN_HYPER_SIZE(sched_resource),
 +                               "sched_resource", RETURN_ON_ERROR)) {
 +                               error(FATAL, "cannot read
 sched_resource.\n");
 +                       }
 
Ditto.
Thanks.
Lianbo
                }
                 schc->cpu_id = cpuid;
                 schc->curr = ULONG(buf +
 XEN_HYPER_OFFSET(schedule_data_curr));
 @@ -1599,7 +1623,8 @@ xen_hyper_store_vcpu_context(struct
 xen_hyper_vcpu_context *vcc,
         vcc->next_in_list = ULONG(vcp +
 XEN_HYPER_OFFSET(vcpu_next_in_list));
         if (XEN_HYPER_VALID_MEMBER(vcpu_sleep_tick))
                 vcc->sleep_tick = ULONG(vcp +
 XEN_HYPER_OFFSET(vcpu_sleep_tick));
 -       vcc->sched_priv = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sched_priv));
 +       if (XEN_HYPER_VALID_MEMBER(vcpu_sched_priv))
 +               vcc->sched_priv = ULONG(vcp +
 XEN_HYPER_OFFSET(vcpu_sched_priv));
         vcc->state = INT(vcp + XEN_HYPER_OFFSET(vcpu_runstate) +
                 XEN_HYPER_OFFSET(vcpu_runstate_info_state));
         vcc->state_entry_time = ULONGLONG(vcp +
 diff --git a/xen_hyper_defs.h b/xen_hyper_defs.h
 index acf910a..908b470 100644
 --- a/xen_hyper_defs.h
 +++ b/xen_hyper_defs.h
 @@ -547,6 +547,7 @@ struct xen_hyper_pcpu_table {
  struct xen_hyper_sched_context {
         uint cpu_id;
         ulong schedule_data;
 +       ulong sched_resource;
         ulong curr;
         ulong idle;
         ulong sched_priv;
 @@ -595,6 +596,7 @@ struct xen_hyper_size_table {
  #endif
         long note_buf_t;                        /* elf note v1 */
         long schedule_data;
 +       long sched_resource;
         long scheduler;
         long shared_info;
         long timer;
 @@ -692,7 +694,7 @@ struct xen_hyper_offset_table {
         /* mm_struct */
         long mm_struct_pgd;
  #endif
 -       /* schedule_data */
 +       /* schedule_data or sched_resource */
         long schedule_data_schedule_lock;
         long schedule_data_curr;
         long schedule_data_idle;
 --
 2.35.3