ARM: 6674/1: LPAE: use long long format when printing physical addresses and ptes
For the Kernel to support 2 level and 3 level page tables, physical addresses (and also page table entries) need to be 32 or 64-bits depending upon the configuration. This patch uses the %08llx conversion specifier for physical addresses and page table entries, ensuring that they are cast to (long long) so that common code can be used regardless of the datatype widths. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Родитель
410f14837a
Коммит
29a38193c1
|
@ -448,7 +448,7 @@ static int __init arm_add_memory(unsigned long start, unsigned long size)
|
|||
|
||||
if (meminfo.nr_banks >= NR_BANKS) {
|
||||
printk(KERN_CRIT "NR_BANKS too low, "
|
||||
"ignoring memory at %#lx\n", start);
|
||||
"ignoring memory at 0x%08llx\n", (long long)start);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -710,17 +710,17 @@ EXPORT_SYMBOL(__readwrite_bug);
|
|||
|
||||
void __pte_error(const char *file, int line, pte_t pte)
|
||||
{
|
||||
printk("%s:%d: bad pte %08lx.\n", file, line, pte_val(pte));
|
||||
printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
|
||||
}
|
||||
|
||||
void __pmd_error(const char *file, int line, pmd_t pmd)
|
||||
{
|
||||
printk("%s:%d: bad pmd %08lx.\n", file, line, pmd_val(pmd));
|
||||
printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
|
||||
}
|
||||
|
||||
void __pgd_error(const char *file, int line, pgd_t pgd)
|
||||
{
|
||||
printk("%s:%d: bad pgd %08lx.\n", file, line, pgd_val(pgd));
|
||||
printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
|
||||
}
|
||||
|
||||
asmlinkage void __div0(void)
|
||||
|
|
|
@ -76,7 +76,8 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
|
|||
|
||||
printk(KERN_ALERT "pgd = %p\n", mm->pgd);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
|
||||
printk(KERN_ALERT "[%08lx] *pgd=%08llx",
|
||||
addr, (long long)pgd_val(*pgd));
|
||||
|
||||
do {
|
||||
pmd_t *pmd;
|
||||
|
@ -92,7 +93,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
|
|||
|
||||
pmd = pmd_offset(pgd, addr);
|
||||
if (PTRS_PER_PMD != 1)
|
||||
printk(", *pmd=%08lx", pmd_val(*pmd));
|
||||
printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
break;
|
||||
|
@ -107,8 +108,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
|
|||
break;
|
||||
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
printk(", *pte=%08lx", pte_val(*pte));
|
||||
printk(", *ppte=%08lx", pte_val(pte[PTE_HWTABLE_PTRS]));
|
||||
printk(", *pte=%08llx", (long long)pte_val(*pte));
|
||||
printk(", *ppte=%08llx",
|
||||
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
|
||||
pte_unmap(pte);
|
||||
} while(0);
|
||||
|
||||
|
|
|
@ -597,7 +597,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
|
|||
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
|
||||
printk(KERN_ERR "MM: CPU does not support supersection "
|
||||
"mapping for 0x%08llx at 0x%08lx\n",
|
||||
__pfn_to_phys((u64)md->pfn), addr);
|
||||
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -610,14 +610,14 @@ static void __init create_36bit_mapping(struct map_desc *md,
|
|||
if (type->domain) {
|
||||
printk(KERN_ERR "MM: invalid domain in supersection "
|
||||
"mapping for 0x%08llx at 0x%08lx\n",
|
||||
__pfn_to_phys((u64)md->pfn), addr);
|
||||
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
|
||||
printk(KERN_ERR "MM: cannot create mapping for "
|
||||
"0x%08llx at 0x%08lx invalid alignment\n",
|
||||
__pfn_to_phys((u64)md->pfn), addr);
|
||||
printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
|
||||
" at 0x%08lx invalid alignment\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), addr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -656,17 +656,17 @@ static void __init create_mapping(struct map_desc *md)
|
|||
pgd_t *pgd;
|
||||
|
||||
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
|
||||
printk(KERN_WARNING "BUG: not creating mapping for "
|
||||
"0x%08llx at 0x%08lx in user region\n",
|
||||
__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
|
||||
" at 0x%08lx in user region\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
|
||||
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
|
||||
printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
|
||||
"overlaps vmalloc space\n",
|
||||
__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
printk(KERN_WARNING "BUG: mapping for 0x%08llx"
|
||||
" at 0x%08lx overlaps vmalloc space\n",
|
||||
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
|
||||
}
|
||||
|
||||
type = &mem_types[md->type];
|
||||
|
@ -684,9 +684,9 @@ static void __init create_mapping(struct map_desc *md)
|
|||
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
|
||||
|
||||
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
|
||||
printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
|
||||
printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
|
||||
"be mapped using pages, ignoring.\n",
|
||||
__pfn_to_phys(md->pfn), addr);
|
||||
(long long)__pfn_to_phys(md->pfn), addr);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче