powerpc: Fixes for CONFIG_PTE_64BIT for SMP support
There are some minor issues with support 64-bit PTEs on a 32-bit processor when dealing with SMP. * We need to order the stores in set_pte_at to make sure the flag word is set second. * Change pte_clear to use pte_update so only the flag word is cleared * Added a WARN_ON to set_pte_at to ensure the pte isn't present for the 64-bit pte/SMP case (to ensure our assumption of this fact). Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Acked-by: Becky Bruce <becky.bruce@freescale.com>
This commit is contained in:
Родитель
0ba3418b8b
Коммит
9bf2b5cdc5
|
@ -84,7 +84,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
|
|||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
||||
#endif
|
||||
set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
|
||||
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
|
||||
flush_tlb_page(NULL, vaddr);
|
||||
|
||||
return (void*) vaddr;
|
||||
|
|
|
@ -517,7 +517,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
|
|||
|
||||
#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
|
||||
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
|
||||
#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
|
||||
#define pte_clear(mm, addr, ptep) \
|
||||
do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
|
||||
|
||||
#define pmd_none(pmd) (!pmd_val(pmd))
|
||||
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
|
||||
|
@ -612,9 +613,6 @@ static inline unsigned long pte_update(pte_t *p,
|
|||
return old;
|
||||
}
|
||||
#else /* CONFIG_PTE_64BIT */
|
||||
/* TODO: Change that to only modify the low word and move set_pte_at()
|
||||
* out of line
|
||||
*/
|
||||
static inline unsigned long long pte_update(pte_t *p,
|
||||
unsigned long clr,
|
||||
unsigned long set)
|
||||
|
@ -652,16 +650,33 @@ static inline unsigned long long pte_update(pte_t *p,
|
|||
* On machines which use an MMU hash table we avoid changing the
|
||||
* _PAGE_HASHPTE bit.
|
||||
*/
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
#if _PAGE_HASHPTE != 0
|
||||
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
|
||||
#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
|
||||
__asm__ __volatile__("\
|
||||
stw%U0%X0 %2,%0\n\
|
||||
eieio\n\
|
||||
stw%U0%X0 %L2,%1"
|
||||
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
|
||||
: "r" (pte) : "memory");
|
||||
#else
|
||||
*ptep = pte;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
|
||||
WARN_ON(pte_present(*ptep));
|
||||
#endif
|
||||
__set_pte_at(mm, addr, ptep, pte);
|
||||
}
|
||||
|
||||
/*
|
||||
* 2.6 calls this without flushing the TLB entry; this is wrong
|
||||
* for our hash-based implementation, we fix that up here.
|
||||
|
|
Загрузка…
Ссылка в новой задаче