+ */
+static int
+find_kernel_start(ulong *va, ulong *pa)
+{
+ int i, pgd_idx, pud_idx, pmd_idx, pte_idx;
+ uint64_t pgd_pte, pud_pte, pmd_pte, pte;
+
+ pgd_idx = pgd_index(__START_KERNEL_map);
+ pud_idx = pud_index(__START_KERNEL_map);
+ pmd_idx = pmd_index(__START_KERNEL_map);
+ pte_idx = pte_index(__START_KERNEL_map);
+
+ for (; pgd_idx < PTRS_PER_PGD; pgd_idx++) {
+ pgd_pte = ULONG(machdep->pgd + pgd_idx * sizeof(uint64_t));
machdep->pgd is not guaranteed to be aligned by PAGE_SIZE.
This could refer to the pgd for userland that resides in the next page.
I guess it's necessary to get the 1st pgd entry in the page machdep->pgd belongs to.
Like this?
pgd_pte = ULONG((machdep->pgd & PHYSICAL_PAGE_MASK) + pgd_idx * sizeof(uint64_t));
As I understand machdep->pgd is a buffer, cached value of some pgd table from the dump.
machdep->pgd does not have to be aligned in the memory.
We just need to read at specific offset "pgd_idx * sizeof(uint64_t)” to get our pgd_pte.
I think " & PHYSICAL_PAGE_MASK” is not needed here.
Let me know if I wrong.
But i’m going to introduce pgd prefetch from inside find_kernel_start() to do not depend on
prefetch from the caller. So caller must provide top pgd physical address
@@ -350,7 +3
50,7 @@ quit:
* does not support 5-level paging.
*/
static int
-find_kernel_start(ulong *va, ulong *pa)
+find_kernel_start(uint64_t pgd, ulong *va, ulong *pa)
{
int i, pgd_idx, pud_idx, pmd_idx, pte_idx;
uint64_t pgd_pte, pud_pte, pmd_pte, pte;
@@ -361,6 +358,7 @@ find_kernel_start(ulong *va, ulong *pa)
pmd_idx = pmd_index(__START_KERNEL_map);
pte_idx = pte_index(__START_KERNEL_map);
+ FILL_PGD(pgd & PHYSICAL_PAGE_MASK, PHYSADDR, PAGESIZE());
for (; pgd_idx < PTRS_PER_PGD; pgd_idx++) {
pgd_pte = ULONG(machdep->pgd + pgd_idx * sizeof(uint64_t));
if (pgd_pte & _PAGE_PRESENT)