With enough continuous physical memory we can have 1GB block. This have
been seen only in linear mapping region in kernel space. I have only
verified this for VM_L3_4K.
reference: alloc_init_pud(), use_1G_block() [arch/arm64/mm/mmu.c]
This problem can for instance be seen with vtop command, example:
crash> vtop ffffffd988008000
VIRTUAL PHYSICAL
ffffffd988008000 148008000
PAGE DIRECTORY: ffffff9188c6c000
PGD: ffffff9188c6cb30 => 68000140000711
PMD: ffffffd980000200 => aa0003e2b9402fe1
PAGE: 3e2b9400000 (2MB)
PTE PHYSICAL FLAGS
aa0003e2b9402fe1 3e2b9402000 (VALID|USER|RDONLY|SHARED|AF|NG)
---
arm64.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/arm64.c b/arm64.c
index 37aed07..4787fa6 100644
--- a/arm64.c
+++ b/arm64.c
@@ -1260,11 +1260,14 @@ arm64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t
*paddr, int verbos
#define PTE_TO_PHYS(pteval) (machdep->max_physmem_bits == 52 ? \
(((pteval & PTE_ADDR_LOW) | ((pteval & PTE_ADDR_HIGH) << 36))) :
(pteval & PTE_ADDR_LOW))
+#define PUD_TYPE_MASK 3
+#define PUD_TYPE_SECT 1
#define PMD_TYPE_MASK 3
#define PMD_TYPE_SECT 1
#define PMD_TYPE_TABLE 2
#define SECTION_PAGE_MASK_2MB ((long)(~((MEGABYTES(2))-1)))
#define SECTION_PAGE_MASK_512MB ((long)(~((MEGABYTES(512))-1)))
+#define SECTION_PAGE_MASK_1GB ((long)(~((GIGABYTES(1))-1)))
static int
arm64_vtop_2level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose)
@@ -1420,6 +1423,15 @@ arm64_vtop_3level_4k(ulong pgd, ulong vaddr, physaddr_t *paddr, int
verbose)
if (!pgd_val)
goto no_page;
+ if ((pgd_val & PUD_TYPE_MASK) == PUD_TYPE_SECT) {
+ ulong sectionbase = (pgd_val & SECTION_PAGE_MASK_1GB) &
PHYS_MASK;
+ if (verbose) {
+ fprintf(fp, " PAGE: %lx (1GB)\n\n", sectionbase);
+ arm64_translate_pte(pgd_val, 0, 0);
+ }
+ *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_1GB);
+ return TRUE;
+ }
/*
* #define __PAGETABLE_PUD_FOLDED
*/
--
2.17.1