x86/mm: Fix __split_large_page() to handle large PAT bit

__split_large_page() is called from __change_page_attr() to change
the mapping attribute by splitting a given large page into smaller
pages.  This function uses pte_pfn() and pte_pgprot() for PUD/PMD,
which do not handle the large PAT bit properly.

Fix __split_large_page() by using the corresponding pud/pmd pfn/
pgprot interfaces.

Also remove '#ifdef CONFIG_X86_64', which is not necessary.

Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Konrad Wilk <konrad.wilk@oracle.com>
Cc: Robert Elliot <elliott@hpe.com>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1442514264-12475-11-git-send-email-toshi.kani@hpe.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Toshi Kani 2015-09-17 12:24:23 -06:00 коммит произвёл Thomas Gleixner
Родитель 3a19109efb
Коммит d551aaa2f7
1 изменённых файлов: 19 добавлений и 12 удалений

Просмотреть файл

@ -605,7 +605,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
struct page *base) struct page *base)
{ {
pte_t *pbase = (pte_t *)page_address(base); pte_t *pbase = (pte_t *)page_address(base);
unsigned long pfn, pfninc = 1; unsigned long ref_pfn, pfn, pfninc = 1;
unsigned int i, level; unsigned int i, level;
pte_t *tmp; pte_t *tmp;
pgprot_t ref_prot; pgprot_t ref_prot;
@ -622,26 +622,33 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
} }
paravirt_alloc_pte(&init_mm, page_to_pfn(base)); paravirt_alloc_pte(&init_mm, page_to_pfn(base));
ref_prot = pte_pgprot(pte_clrhuge(*kpte));
/* promote PAT bit to correct position */ switch (level) {
if (level == PG_LEVEL_2M) case PG_LEVEL_2M:
ref_prot = pmd_pgprot(*(pmd_t *)kpte);
/* clear PSE and promote PAT bit to correct position */
ref_prot = pgprot_large_2_4k(ref_prot); ref_prot = pgprot_large_2_4k(ref_prot);
ref_pfn = pmd_pfn(*(pmd_t *)kpte);
break;
#ifdef CONFIG_X86_64 case PG_LEVEL_1G:
if (level == PG_LEVEL_1G) { ref_prot = pud_pgprot(*(pud_t *)kpte);
ref_pfn = pud_pfn(*(pud_t *)kpte);
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
/* /*
* Set the PSE flags only if the PRESENT flag is set * Clear the PSE flags if the PRESENT flag is not set
* otherwise pmd_present/pmd_huge will return true * otherwise pmd_present/pmd_huge will return true
* even on a non present pmd. * even on a non present pmd.
*/ */
if (pgprot_val(ref_prot) & _PAGE_PRESENT) if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
pgprot_val(ref_prot) |= _PAGE_PSE;
else
pgprot_val(ref_prot) &= ~_PAGE_PSE; pgprot_val(ref_prot) &= ~_PAGE_PSE;
break;
default:
spin_unlock(&pgd_lock);
return 1;
} }
#endif
/* /*
* Set the GLOBAL flags only if the PRESENT flag is set * Set the GLOBAL flags only if the PRESENT flag is set
@ -657,7 +664,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
/* /*
* Get the target pfn from the original entry: * Get the target pfn from the original entry:
*/ */
pfn = pte_pfn(*kpte); pfn = ref_pfn;
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot))); set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));