powerpc fixes for 4.7 #4
- mm/radix: Update to tlb functions ric argument from Aneesh Kumar K.V - mm/radix: Flush page walk cache when freeing page table from Aneesh Kumar K.V - mm/hash: Use the correct PPP mask when updating HPTE from Aneesh Kumar K.V - mm/hash: Don't add memory coherence if cache inhibited is set from Aneesh Kumar K.V - mm/radix: Update Radix tree size as per ISA 3.0 from Aneesh Kumar K.V - eeh: Fix invalid cached PE primary bus from Gavin Shan - Fix faults caused by radix patching of SLB miss handler from Michael Ellerman - bpf/jit: Disable classic BPF JIT on ppc64le from Naveen N. Rao -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJXbmITAAoJEFHr6jzI4aWANXwQAIUzjKcLpyQWEKwOKfMqBT5T EfsWDqJA/J3mYKNcZyiB7qv1NVPPkU9DSBK0OaAKwYdg5YWKDBl6R3mW+j4di0bP SkFACCyE2WbLTCiz5fzd8l974RUh5jKQpIrObp4/8xp40d0vsyAzz4J7d4HVRsrr BnoTS/KmytsaDQls5kYArxhW6U+Shag586Au1hNt3SS/be8lCNEXLfa3ltCr7WLJ k+xM0KM5kpO9/OK40A64TH7xUZKQIgPMUR5Ct43IJhMeHNnQctLmGQQjRWTrajv1 K/TfrYwCl66xzKaH5G3MKJgqJAJm1LTwGs+2aOn91x5hPrbmW+bLqr1Mm0ukjROz oaANO5fgEQjl0JRGCNAhLHvaoqJX6v5/7GbmFRoaigX4UKJ63nK1ABiwAgKDGnyj OchwwJywU5UIX/+9Qpig3CxQNhEV33Nnp8t+dsg8CPd9o/G0mIe0QP1eGdhD09mM X9eMfN08hLj5ERKvlpW0rrq1b/wizOGmUXbmt02HZi7iLNsyQMwShiOvwOaAvH6/ SzEFBJdp11jNoe4GJDt5rH4HlnTnTAYwcLFMTDCCPdJXy7voI/J+MaAmG89S30dQ ph0+4v/8K2N0VDZ7kkgi0GL1gp9ULkgtimrN5Z0R8U7qEapEW6ybvv+0Ewln742f SCRNVMZgzcwe3CcCKzdn =fxml -----END PGP SIGNATURE----- Merge tag 'powerpc-4.7-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: "mm/radix (Aneesh Kumar K.V): - Update to tlb functions ric argument - Flush page walk cache when freeing page table - Update Radix tree size as per ISA 3.0 mm/hash (Aneesh Kumar K.V): - Use the correct PPP mask when updating HPTE - Don't add memory coherence if cache inhibited is set eeh (Gavin Shan): - Fix invalid cached PE primary bus bpf/jit (Naveen N. Rao): - Disable classic BPF JIT on ppc64le .. and fix faults caused by radix patching of SLB miss handler (Michael Ellerman)" * tag 'powerpc-4.7-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/bpf/jit: Disable classic BPF JIT on ppc64le powerpc: Fix faults caused by radix patching of SLB miss handler powerpc/eeh: Fix invalid cached PE primary bus powerpc/mm/radix: Update Radix tree size as per ISA 3.0 powerpc/mm/hash: Don't add memory coherence if cache inhibited is set powerpc/mm/hash: Use the correct PPP mask when updating HPTE powerpc/mm/radix: Flush page walk cache when freeing page table powerpc/mm/radix: Update to tlb functions ric argument
This commit is contained in:
Коммит
2f6e97477b
|
@ -128,7 +128,7 @@ config PPC
|
|||
select IRQ_FORCED_THREADING
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_CBPF_JIT
|
||||
select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
|
|
@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
|
|||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
unsigned long address)
|
||||
{
|
||||
tlb_flush_pgtable(tlb, address);
|
||||
pgtable_page_dtor(table);
|
||||
pgtable_free_tlb(tlb, page_address(table), 0);
|
||||
}
|
||||
|
|
|
@ -88,6 +88,7 @@
|
|||
#define HPTE_R_RPN_SHIFT 12
|
||||
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
|
||||
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
|
||||
#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
|
||||
#define HPTE_R_N ASM_CONST(0x0000000000000004)
|
||||
#define HPTE_R_G ASM_CONST(0x0000000000000008)
|
||||
#define HPTE_R_M ASM_CONST(0x0000000000000010)
|
||||
|
|
|
@ -109,6 +109,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|||
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
||||
unsigned long address)
|
||||
{
|
||||
/*
|
||||
* By now all the pud entries should be none entries. So go
|
||||
* ahead and flush the page walk cache
|
||||
*/
|
||||
flush_tlb_pgtable(tlb, address);
|
||||
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
|
||||
}
|
||||
|
||||
|
@ -125,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|||
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
||||
unsigned long address)
|
||||
{
|
||||
/*
|
||||
* By now all the pud entries should be none entries. So go
|
||||
* ahead and flush the page walk cache
|
||||
*/
|
||||
flush_tlb_pgtable(tlb, address);
|
||||
return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
|
||||
}
|
||||
|
||||
|
@ -196,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
|||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
unsigned long address)
|
||||
{
|
||||
tlb_flush_pgtable(tlb, address);
|
||||
/*
|
||||
* By now all the pud entries should be none entries. So go
|
||||
* ahead and flush the page walk cache
|
||||
*/
|
||||
flush_tlb_pgtable(tlb, address);
|
||||
pgtable_free_tlb(tlb, table, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
|
|||
|
||||
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
pgprot_t flags, unsigned int psz);
|
||||
|
||||
static inline unsigned long radix__get_tree_size(void)
|
||||
{
|
||||
unsigned long rts_field;
|
||||
/*
|
||||
* we support 52 bits, hence 52-31 = 21, 0b10101
|
||||
* RTS encoding details
|
||||
* bits 0 - 3 of rts -> bits 6 - 8 unsigned long
|
||||
* bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
|
||||
*/
|
||||
rts_field = (0x5UL << 5); /* 6 - 8 bits */
|
||||
rts_field |= (0x2UL << 61);
|
||||
|
||||
return rts_field;
|
||||
}
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif
|
||||
|
|
|
@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
|
|||
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
|
||||
extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||
unsigned long ap, int nid);
|
||||
extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
|
||||
extern void radix__tlb_flush(struct mmu_gather *tlb);
|
||||
#ifdef CONFIG_SMP
|
||||
extern void radix__flush_tlb_mm(struct mm_struct *mm);
|
||||
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
|
||||
extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||
unsigned long ap, int nid);
|
||||
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
|
||||
#else
|
||||
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
|
||||
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
|
||||
#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i)
|
||||
#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|||
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
||||
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
|
||||
#endif /* CONFIG_SMP */
|
||||
/*
|
||||
* flush the page walk cache for the address
|
||||
*/
|
||||
static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
|
||||
{
|
||||
/*
|
||||
* Flush the page table walk cache on freeing a page table. We already
|
||||
* have marked the upper/higher level page table entry none by now.
|
||||
* So it is safe to flush PWC here.
|
||||
*/
|
||||
if (!radix_enabled())
|
||||
return;
|
||||
|
||||
radix__flush_tlb_pwc(tlb, address);
|
||||
}
|
||||
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
|
||||
|
|
|
@ -4,11 +4,6 @@
|
|||
#include <linux/mm.h>
|
||||
|
||||
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
||||
static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
|
||||
unsigned long address)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#include <asm/book3s/64/pgalloc.h>
|
||||
|
|
|
@ -642,7 +642,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
|
|||
if (pe->type & EEH_PE_VF) {
|
||||
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
|
||||
} else {
|
||||
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
|
||||
pci_lock_rescan_remove();
|
||||
pci_hp_remove_devices(bus);
|
||||
pci_unlock_rescan_remove();
|
||||
|
@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
|
|||
*/
|
||||
edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
|
||||
eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
|
||||
if (pe->type & EEH_PE_VF)
|
||||
if (pe->type & EEH_PE_VF) {
|
||||
eeh_add_virt_device(edev, NULL);
|
||||
else
|
||||
} else {
|
||||
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
|
||||
pci_hp_add_devices(bus);
|
||||
}
|
||||
} else if (frozen_bus && rmv_data->removed) {
|
||||
pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
|
||||
ssleep(5);
|
||||
|
|
|
@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
|
|||
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
|
||||
|
||||
mtlr r10
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
b 2f
|
||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
|
||||
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
beq- 2f
|
||||
FTR_SECTION_ELSE
|
||||
b 2f
|
||||
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
|
||||
|
||||
.machine push
|
||||
.machine "power4"
|
||||
|
|
|
@ -316,8 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
|||
DBG_LOW(" -> hit\n");
|
||||
/* Update the HPTE */
|
||||
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
|
||||
~(HPTE_R_PP | HPTE_R_N)) |
|
||||
(newpp & (HPTE_R_PP | HPTE_R_N |
|
||||
~(HPTE_R_PPP | HPTE_R_N)) |
|
||||
(newpp & (HPTE_R_PPP | HPTE_R_N |
|
||||
HPTE_R_C)));
|
||||
}
|
||||
native_unlock_hpte(hptep);
|
||||
|
@ -385,8 +385,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
|||
|
||||
/* Update the HPTE */
|
||||
hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
|
||||
~(HPTE_R_PP | HPTE_R_N)) |
|
||||
(newpp & (HPTE_R_PP | HPTE_R_N)));
|
||||
~(HPTE_R_PPP | HPTE_R_N)) |
|
||||
(newpp & (HPTE_R_PPP | HPTE_R_N)));
|
||||
/*
|
||||
* Ensure it is out of the tlb too. Bolted entries base and
|
||||
* actual page size will be same.
|
||||
|
|
|
@ -201,9 +201,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
|||
/*
|
||||
* We can't allow hardware to update hpte bits. Hence always
|
||||
* set 'R' bit and set 'C' if it is a write fault
|
||||
* Memory coherence is always enabled
|
||||
*/
|
||||
rflags |= HPTE_R_R | HPTE_R_M;
|
||||
rflags |= HPTE_R_R;
|
||||
|
||||
if (pteflags & _PAGE_DIRTY)
|
||||
rflags |= HPTE_R_C;
|
||||
|
@ -213,10 +212,15 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
|||
|
||||
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
|
||||
rflags |= HPTE_R_I;
|
||||
if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
|
||||
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
|
||||
rflags |= (HPTE_R_I | HPTE_R_G);
|
||||
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
|
||||
rflags |= (HPTE_R_I | HPTE_R_W);
|
||||
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
|
||||
rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
|
||||
else
|
||||
/*
|
||||
* Add memory coherence if cache inhibited is not set
|
||||
*/
|
||||
rflags |= HPTE_R_M;
|
||||
|
||||
return rflags;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index)
|
|||
/*
|
||||
* set the process table entry,
|
||||
*/
|
||||
rts_field = 3ull << PPC_BITLSHIFT(2);
|
||||
rts_field = radix__get_tree_size();
|
||||
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -160,9 +160,8 @@ redo:
|
|||
process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
|
||||
/*
|
||||
* Fill in the process table.
|
||||
* we support 52 bits, hence 52-28 = 24, 11000
|
||||
*/
|
||||
rts_field = 3ull << PPC_BITLSHIFT(2);
|
||||
rts_field = radix__get_tree_size();
|
||||
process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
|
||||
/*
|
||||
* Fill in the partition table. We are suppose to use effective address
|
||||
|
@ -176,10 +175,8 @@ redo:
|
|||
static void __init radix_init_partition_table(void)
|
||||
{
|
||||
unsigned long rts_field;
|
||||
/*
|
||||
* we support 52 bits, hence 52-28 = 24, 11000
|
||||
*/
|
||||
rts_field = 3ull << PPC_BITLSHIFT(2);
|
||||
|
||||
rts_field = radix__get_tree_size();
|
||||
|
||||
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
|
||||
partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
|
||||
|
|
|
@ -18,16 +18,20 @@
|
|||
|
||||
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
|
||||
|
||||
static inline void __tlbiel_pid(unsigned long pid, int set)
|
||||
#define RIC_FLUSH_TLB 0
|
||||
#define RIC_FLUSH_PWC 1
|
||||
#define RIC_FLUSH_ALL 2
|
||||
|
||||
static inline void __tlbiel_pid(unsigned long pid, int set,
|
||||
unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,ric,prs,r;
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = PPC_BIT(53); /* IS = 1 */
|
||||
rb |= set << PPC_BITLSHIFT(51);
|
||||
rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
ric = 2; /* invalidate all the caches */
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
|
||||
|
@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
|
|||
/*
|
||||
* We use 128 set in radix mode and 256 set in hpt mode.
|
||||
*/
|
||||
static inline void _tlbiel_pid(unsigned long pid)
|
||||
static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
|
||||
{
|
||||
int set;
|
||||
|
||||
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
|
||||
__tlbiel_pid(pid, set);
|
||||
__tlbiel_pid(pid, set, ric);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void _tlbie_pid(unsigned long pid)
|
||||
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,ric,prs,r;
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = PPC_BIT(53); /* IS = 1 */
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
ric = 2; /* invalidate all the caches */
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
|
||||
|
@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
|
|||
}
|
||||
|
||||
static inline void _tlbiel_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap)
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,ric,prs,r;
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
ric = 0; /* no cluster flush yet */
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
|
||||
|
@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
|
|||
}
|
||||
|
||||
static inline void _tlbie_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap)
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,ric,prs,r;
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
ric = 0; /* no cluster flush yet */
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
|
||||
|
@ -122,11 +123,26 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
|
|||
preempt_disable();
|
||||
pid = mm->context.id;
|
||||
if (pid != MMU_NO_CONTEXT)
|
||||
_tlbiel_pid(pid);
|
||||
_tlbiel_pid(pid, RIC_FLUSH_ALL);
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(radix__local_flush_tlb_mm);
|
||||
|
||||
void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
|
||||
{
|
||||
unsigned long pid;
|
||||
struct mm_struct *mm = tlb->mm;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
pid = mm->context.id;
|
||||
if (pid != MMU_NO_CONTEXT)
|
||||
_tlbiel_pid(pid, RIC_FLUSH_PWC);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
|
||||
|
||||
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||
unsigned long ap, int nid)
|
||||
{
|
||||
|
@ -135,7 +151,7 @@ void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
|||
preempt_disable();
|
||||
pid = mm ? mm->context.id : 0;
|
||||
if (pid != MMU_NO_CONTEXT)
|
||||
_tlbiel_va(vmaddr, pid, ap);
|
||||
_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
@ -172,16 +188,42 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
|
|||
|
||||
if (lock_tlbie)
|
||||
raw_spin_lock(&native_tlbie_lock);
|
||||
_tlbie_pid(pid);
|
||||
_tlbie_pid(pid, RIC_FLUSH_ALL);
|
||||
if (lock_tlbie)
|
||||
raw_spin_unlock(&native_tlbie_lock);
|
||||
} else
|
||||
_tlbiel_pid(pid);
|
||||
_tlbiel_pid(pid, RIC_FLUSH_ALL);
|
||||
no_context:
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_mm);
|
||||
|
||||
void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
|
||||
{
|
||||
unsigned long pid;
|
||||
struct mm_struct *mm = tlb->mm;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
pid = mm->context.id;
|
||||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
goto no_context;
|
||||
|
||||
if (!mm_is_core_local(mm)) {
|
||||
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
||||
|
||||
if (lock_tlbie)
|
||||
raw_spin_lock(&native_tlbie_lock);
|
||||
_tlbie_pid(pid, RIC_FLUSH_PWC);
|
||||
if (lock_tlbie)
|
||||
raw_spin_unlock(&native_tlbie_lock);
|
||||
} else
|
||||
_tlbiel_pid(pid, RIC_FLUSH_PWC);
|
||||
no_context:
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_pwc);
|
||||
|
||||
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||
unsigned long ap, int nid)
|
||||
{
|
||||
|
@ -196,11 +238,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
|||
|
||||
if (lock_tlbie)
|
||||
raw_spin_lock(&native_tlbie_lock);
|
||||
_tlbie_va(vmaddr, pid, ap);
|
||||
_tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
|
||||
if (lock_tlbie)
|
||||
raw_spin_unlock(&native_tlbie_lock);
|
||||
} else
|
||||
_tlbiel_va(vmaddr, pid, ap);
|
||||
_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
|
||||
bail:
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -224,7 +266,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|||
|
||||
if (lock_tlbie)
|
||||
raw_spin_lock(&native_tlbie_lock);
|
||||
_tlbie_pid(0);
|
||||
_tlbie_pid(0, RIC_FLUSH_ALL);
|
||||
if (lock_tlbie)
|
||||
raw_spin_unlock(&native_tlbie_lock);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче