sparc64: Document the shift counts used to validate linear kernel addresses.

This way we can see exactly what they are derived from, and in particular
how they would change if we were to use a different PAGE_OFFSET value.

Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Bob Picco <bob.picco@oracle.com>
This commit is contained in:
David S. Miller 2013-09-18 15:39:06 -07:00
Родитель e0a45e3580
Коммит bb7b435388
3 изменённых файлов: 24 добавлений и 6 удалений

Просмотреть файл

@ -121,6 +121,22 @@ typedef pte_t *pgtable_t;
#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X))) #define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X)))
#define PAGE_OFFSET PAGE_OFFSET_BY_BITS(MAX_SUPPORTED_PA_BITS) #define PAGE_OFFSET PAGE_OFFSET_BY_BITS(MAX_SUPPORTED_PA_BITS)
/* The "virtual" portion of PAGE_OFFSET, used to clip off the non-physical
* bits of a linear kernel address.
*/
#define PAGE_OFFSET_VA_BITS (64 - MAX_SUPPORTED_PA_BITS)
/* The actual number of physical memory address bits we support, this is
* used to size various tables used to manage kernel TLB misses.
*/
#define MAX_PHYS_ADDRESS_BITS 41
/* These two shift counts are used when indexing sparc64_valid_addr_bitmap
* and kpte_linear_bitmap.
*/
#define ILOG2_4MB 22
#define ILOG2_256MB 28
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)

Просмотреть файл

@ -153,12 +153,12 @@ kvmap_dtlb_tsb4m_miss:
/* Clear the PAGE_OFFSET top virtual bits, shift /* Clear the PAGE_OFFSET top virtual bits, shift
* down to get PFN, and make sure PFN is in range. * down to get PFN, and make sure PFN is in range.
*/ */
sllx %g4, 21, %g5 sllx %g4, PAGE_OFFSET_VA_BITS, %g5
/* Check to see if we know about valid memory at the 4MB /* Check to see if we know about valid memory at the 4MB
* chunk this physical address will reside within. * chunk this physical address will reside within.
*/ */
srlx %g5, 21 + 41, %g2 srlx %g5, PAGE_OFFSET_VA_BITS + MAX_PHYS_ADDRESS_BITS, %g2
brnz,pn %g2, kvmap_dtlb_longpath brnz,pn %g2, kvmap_dtlb_longpath
nop nop
@ -176,7 +176,7 @@ valid_addr_bitmap_patch:
or %g7, %lo(sparc64_valid_addr_bitmap), %g7 or %g7, %lo(sparc64_valid_addr_bitmap), %g7
.previous .previous
srlx %g5, 21 + 22, %g2 srlx %g5, PAGE_OFFSET_VA_BITS + ILOG2_4MB, %g2
srlx %g2, 6, %g5 srlx %g2, 6, %g5
and %g2, 63, %g2 and %g2, 63, %g2
sllx %g5, 3, %g5 sllx %g5, 3, %g5
@ -189,9 +189,9 @@ valid_addr_bitmap_patch:
2: sethi %hi(kpte_linear_bitmap), %g2 2: sethi %hi(kpte_linear_bitmap), %g2
/* Get the 256MB physical address index. */ /* Get the 256MB physical address index. */
sllx %g4, 21, %g5 sllx %g4, PAGE_OFFSET_VA_BITS, %g5
or %g2, %lo(kpte_linear_bitmap), %g2 or %g2, %lo(kpte_linear_bitmap), %g2
srlx %g5, 21 + 28, %g5 srlx %g5, PAGE_OFFSET_VA_BITS + ILOG2_256MB, %g5
and %g5, (32 - 1), %g7 and %g5, (32 - 1), %g7
/* Divide by 32 to get the offset into the bitmask. */ /* Divide by 32 to get the offset into the bitmask. */

Просмотреть файл

@ -1,11 +1,13 @@
#ifndef _SPARC64_MM_INIT_H #ifndef _SPARC64_MM_INIT_H
#define _SPARC64_MM_INIT_H #define _SPARC64_MM_INIT_H
#include <asm/page.h>
/* Most of the symbols in this file are defined in init.c and /* Most of the symbols in this file are defined in init.c and
* marked non-static so that assembler code can get at them. * marked non-static so that assembler code can get at them.
*/ */
#define MAX_PHYS_ADDRESS (1UL << 41UL) #define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS)
#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
#define KPTE_BITMAP_BYTES \ #define KPTE_BITMAP_BYTES \
((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4) ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)