Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller: "Some 32-bit kgdb cleanups from Sam Ravnborg, and a hugepage TLB flush overhead fix on 64-bit from Nitin Gupta" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: Reduce TLB flushes during hugepte changes aeroflex/greth: fix warning about unused variable openprom: fix warning sparc32: drop superfluous cast in calls to __nocache_pa() sparc32: fix build with STRICT_MM_TYPECHECKS sparc32: use proper prototype for trapbase sparc32: drop local prototype in kgdb_32 sparc32: drop hardcoding trap_level in kgdb_trap
This commit is contained in:
Коммит
21f9debf74
|
@ -43,10 +43,10 @@
|
|||
nop;
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
#define KGDB_TRAP(num) \
|
||||
b kgdb_trap_low; \
|
||||
rd %psr,%l0; \
|
||||
nop; \
|
||||
#define KGDB_TRAP(num) \
|
||||
mov num, %l7; \
|
||||
b kgdb_trap_low; \
|
||||
rd %psr,%l0; \
|
||||
nop;
|
||||
#else
|
||||
#define KGDB_TRAP(num) \
|
||||
|
|
|
@ -28,10 +28,10 @@ enum regnames {
|
|||
#define NUMREGBYTES ((GDB_CSR + 1) * 4)
|
||||
#else
|
||||
#define NUMREGBYTES ((GDB_Y + 1) * 8)
|
||||
#endif
|
||||
|
||||
struct pt_regs;
|
||||
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs);
|
||||
#endif
|
||||
|
||||
void arch_kgdb_breakpoint(void);
|
||||
|
||||
|
|
|
@ -69,7 +69,6 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
|
|||
|
||||
#define __pte(x) ((pte_t) { (x) } )
|
||||
#define __iopte(x) ((iopte_t) { (x) } )
|
||||
/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
|
||||
#define __pgd(x) ((pgd_t) { (x) } )
|
||||
#define __ctxd(x) ((ctxd_t) { (x) } )
|
||||
#define __pgprot(x) ((pgprot_t) { (x) } )
|
||||
|
@ -97,7 +96,6 @@ typedef unsigned long iopgprot_t;
|
|||
|
||||
#define __pte(x) (x)
|
||||
#define __iopte(x) (x)
|
||||
/* #define __pmd(x) (x) */ /* XXX later */
|
||||
#define __pgd(x) (x)
|
||||
#define __ctxd(x) (x)
|
||||
#define __pgprot(x) (x)
|
||||
|
|
|
@ -29,9 +29,9 @@ static inline void free_pgd_fast(pgd_t *pgd)
|
|||
|
||||
static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
|
||||
{
|
||||
unsigned long pa = __nocache_pa((unsigned long)pmdp);
|
||||
unsigned long pa = __nocache_pa(pmdp);
|
||||
|
||||
set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (pa >> 4)));
|
||||
set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4))));
|
||||
}
|
||||
|
||||
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
|
||||
|
|
|
@ -298,7 +298,7 @@ static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
|
|||
#define pgprot_noncached pgprot_noncached
|
||||
static inline pgprot_t pgprot_noncached(pgprot_t prot)
|
||||
{
|
||||
prot &= ~__pgprot(SRMMU_CACHE);
|
||||
pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
|
||||
return prot;
|
||||
}
|
||||
|
||||
|
|
|
@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
|
|||
#define pgprot_noncached pgprot_noncached
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
static inline unsigned long __pte_huge_mask(void)
|
||||
{
|
||||
unsigned long mask;
|
||||
|
||||
|
@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
|
|||
: "=r" (mask)
|
||||
: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
|
||||
|
||||
return __pte(pte_val(pte) | mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
{
|
||||
return __pte(pte_val(pte) | __pte_huge_mask());
|
||||
}
|
||||
|
||||
static inline bool is_hugetlb_pte(pte_t pte)
|
||||
{
|
||||
return !!(pte_val(pte) & __pte_huge_mask());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
|
@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
|||
return __pmd(pte_val(pte));
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
static inline bool is_hugetlb_pte(pte_t pte)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
|
@ -856,6 +872,19 @@ static inline unsigned long pud_pfn(pud_t pud)
|
|||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm);
|
||||
|
||||
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm)
|
||||
{
|
||||
/* It is more efficient to let flush_tlb_kernel_range()
|
||||
* handle init_mm tlb flushes.
|
||||
*
|
||||
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
|
||||
tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
|
@ -872,15 +901,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t orig = *ptep;
|
||||
|
||||
*ptep = pte;
|
||||
|
||||
/* It is more efficient to let flush_tlb_kernel_range()
|
||||
* handle init_mm tlb flushes.
|
||||
*
|
||||
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
|
||||
* and SUN4V pte layout, so this inline test is fine.
|
||||
*/
|
||||
if (likely(mm != &init_mm) && pte_accessible(mm, orig))
|
||||
tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
|
||||
}
|
||||
|
||||
#define set_pte_at(mm,addr,ptep,pte) \
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#define TLB_BATCH_NR 192
|
||||
|
||||
struct tlb_batch {
|
||||
bool huge;
|
||||
struct mm_struct *mm;
|
||||
unsigned long tlb_nr;
|
||||
unsigned long active;
|
||||
|
@ -16,7 +17,7 @@ struct tlb_batch {
|
|||
|
||||
void flush_tsb_kernel_range(unsigned long start, unsigned long end);
|
||||
void flush_tsb_user(struct tlb_batch *tb);
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
|
||||
|
||||
/* TLB flush operations. */
|
||||
|
||||
|
|
|
@ -1225,20 +1225,18 @@ breakpoint_trap:
|
|||
RESTORE_ALL
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
.align 4
|
||||
.globl kgdb_trap_low
|
||||
.type kgdb_trap_low,#function
|
||||
kgdb_trap_low:
|
||||
ENTRY(kgdb_trap_low)
|
||||
rd %wim,%l3
|
||||
SAVE_ALL
|
||||
wr %l0, PSR_ET, %psr
|
||||
WRITE_PAUSE
|
||||
|
||||
mov %l7, %o0 ! trap_level
|
||||
call kgdb_trap
|
||||
add %sp, STACKFRAME_SZ, %o0
|
||||
add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
|
||||
|
||||
RESTORE_ALL
|
||||
.size kgdb_trap_low,.-kgdb_trap_low
|
||||
ENDPROC(kgdb_trap_low)
|
||||
#endif
|
||||
|
||||
.align 4
|
||||
|
|
|
@ -127,6 +127,7 @@ extern unsigned int t_nmi[];
|
|||
extern unsigned int linux_trap_ipi15_sun4d[];
|
||||
extern unsigned int linux_trap_ipi15_sun4m[];
|
||||
|
||||
extern struct tt_entry trapbase;
|
||||
extern struct tt_entry trapbase_cpu1;
|
||||
extern struct tt_entry trapbase_cpu2;
|
||||
extern struct tt_entry trapbase_cpu3;
|
||||
|
|
|
@ -12,7 +12,8 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
extern unsigned long trapbase;
|
||||
#include "kernel.h"
|
||||
#include "entry.h"
|
||||
|
||||
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
||||
{
|
||||
|
@ -133,21 +134,19 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
|||
return -1;
|
||||
}
|
||||
|
||||
extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
|
||||
|
||||
asmlinkage void kgdb_trap(struct pt_regs *regs)
|
||||
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
do_hw_interrupt(regs, 0xfd);
|
||||
do_hw_interrupt(regs, trap_level);
|
||||
return;
|
||||
}
|
||||
|
||||
flushw_all();
|
||||
|
||||
local_irq_save(flags);
|
||||
kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
|
||||
kgdb_handle_exception(trap_level, SIGTRAP, 0, regs);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -68,8 +68,6 @@ struct screen_info screen_info = {
|
|||
* prints out pretty messages and returns.
|
||||
*/
|
||||
|
||||
extern unsigned long trapbase;
|
||||
|
||||
/* Pretty sick eh? */
|
||||
static void prom_sync_me(void)
|
||||
{
|
||||
|
@ -300,7 +298,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
int i;
|
||||
unsigned long highest_paddr;
|
||||
|
||||
sparc_ttable = (struct tt_entry *) &trapbase;
|
||||
sparc_ttable = &trapbase;
|
||||
|
||||
/* Initialize PROM console and command line. */
|
||||
*cmdline_p = prom_getbootargs();
|
||||
|
|
|
@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t *ptep, pte_t entry)
|
||||
{
|
||||
int i;
|
||||
pte_t orig[2];
|
||||
unsigned long nptes;
|
||||
|
||||
if (!pte_present(*ptep) && pte_present(entry))
|
||||
mm->context.huge_pte_count++;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
|
||||
set_pte_at(mm, addr, ptep, entry);
|
||||
|
||||
nptes = 1 << HUGETLB_PAGE_ORDER;
|
||||
orig[0] = *ptep;
|
||||
orig[1] = *(ptep + nptes / 2);
|
||||
for (i = 0; i < nptes; i++) {
|
||||
*ptep = entry;
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
pte_val(entry) += PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
|
||||
addr -= REAL_HPAGE_SIZE;
|
||||
ptep -= nptes / 2;
|
||||
maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
|
||||
addr -= REAL_HPAGE_SIZE;
|
||||
ptep -= nptes / 2;
|
||||
maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
|
@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|||
{
|
||||
pte_t entry;
|
||||
int i;
|
||||
unsigned long nptes;
|
||||
|
||||
entry = *ptep;
|
||||
if (pte_present(entry))
|
||||
mm->context.huge_pte_count--;
|
||||
|
||||
addr &= HPAGE_MASK;
|
||||
|
||||
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
|
||||
pte_clear(mm, addr, ptep);
|
||||
nptes = 1 << HUGETLB_PAGE_ORDER;
|
||||
for (i = 0; i < nptes; i++) {
|
||||
*ptep = __pte(0UL);
|
||||
addr += PAGE_SIZE;
|
||||
ptep++;
|
||||
}
|
||||
|
||||
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
|
||||
addr -= REAL_HPAGE_SIZE;
|
||||
ptep -= nptes / 2;
|
||||
maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
|
||||
addr -= REAL_HPAGE_SIZE;
|
||||
ptep -= nptes / 2;
|
||||
maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
|
|
@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
|
|||
tsb_insert(tsb, tag, tte);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
static inline bool is_hugetlb_pte(pte_t pte)
|
||||
{
|
||||
if ((tlb_type == hypervisor &&
|
||||
(pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
|
||||
(tlb_type != hypervisor &&
|
||||
(pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
|
|
|
@ -133,7 +133,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
|
|||
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
|
||||
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
|
||||
set_bit(scan, iounit->bmap);
|
||||
sbus_writel(iopte, &iounit->page_table[scan]);
|
||||
sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
|
||||
}
|
||||
IOD(("%08lx\n", vaddr));
|
||||
return vaddr;
|
||||
|
@ -228,7 +228,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
|
|||
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
|
||||
|
||||
iopte = iounit->page_table + i;
|
||||
sbus_writel(MKIOPTE(__pa(page)), iopte);
|
||||
sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
|
||||
}
|
||||
addr += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
|
|
|
@ -107,17 +107,22 @@ static inline int srmmu_pmd_none(pmd_t pmd)
|
|||
|
||||
/* XXX should we hyper_flush_whole_icache here - Anton */
|
||||
static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
|
||||
{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
|
||||
{
|
||||
pte_t pte;
|
||||
|
||||
pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
|
||||
set_pte((pte_t *)ctxp, pte);
|
||||
}
|
||||
|
||||
void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
||||
{
|
||||
unsigned long ptp; /* Physical address, shifted right by 4 */
|
||||
int i;
|
||||
|
||||
ptp = __nocache_pa((unsigned long) ptep) >> 4;
|
||||
ptp = __nocache_pa(ptep) >> 4;
|
||||
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
|
||||
set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
|
||||
ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
|
||||
set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
|
||||
ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -128,8 +133,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
|
|||
|
||||
ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
|
||||
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
|
||||
set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
|
||||
ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
|
||||
set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
|
||||
ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -911,7 +916,7 @@ void __init srmmu_paging_init(void)
|
|||
|
||||
/* ctx table has to be physically aligned to its size */
|
||||
srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
|
||||
srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
|
||||
srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
|
||||
|
||||
for (i = 0; i < num_contexts; i++)
|
||||
srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
|
||||
|
|
|
@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
|
|||
}
|
||||
|
||||
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
bool exec)
|
||||
bool exec, bool huge)
|
||||
{
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
unsigned long nr;
|
||||
|
@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
|||
}
|
||||
|
||||
if (!tb->active) {
|
||||
flush_tsb_user_page(mm, vaddr);
|
||||
flush_tsb_user_page(mm, vaddr, huge);
|
||||
global_flush_tlb_page(mm, vaddr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nr == 0)
|
||||
if (nr == 0) {
|
||||
tb->mm = mm;
|
||||
tb->huge = huge;
|
||||
}
|
||||
|
||||
if (tb->huge != huge) {
|
||||
flush_tlb_pending();
|
||||
tb->huge = huge;
|
||||
nr = 0;
|
||||
}
|
||||
|
||||
tb->vaddrs[nr] = vaddr;
|
||||
tb->tlb_nr = ++nr;
|
||||
|
@ -104,6 +112,8 @@ out:
|
|||
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
||||
pte_t *ptep, pte_t orig, int fullmm)
|
||||
{
|
||||
bool huge = is_hugetlb_pte(orig);
|
||||
|
||||
if (tlb_type != hypervisor &&
|
||||
pte_dirty(orig)) {
|
||||
unsigned long paddr, pfn = pte_pfn(orig);
|
||||
|
@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
|
|||
|
||||
no_cache_flush:
|
||||
if (!fullmm)
|
||||
tlb_batch_add_one(mm, vaddr, pte_exec(orig));
|
||||
tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
|||
if (pte_val(*pte) & _PAGE_VALID) {
|
||||
bool exec = pte_exec(*pte);
|
||||
|
||||
tlb_batch_add_one(mm, vaddr, exec);
|
||||
tlb_batch_add_one(mm, vaddr, exec, false);
|
||||
}
|
||||
pte++;
|
||||
vaddr += PAGE_SIZE;
|
||||
|
@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t orig_pte = __pte(pmd_val(orig));
|
||||
bool exec = pte_exec(orig_pte);
|
||||
|
||||
tlb_batch_add_one(mm, addr, exec);
|
||||
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
|
||||
tlb_batch_add_one(mm, addr, exec, true);
|
||||
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
|
||||
true);
|
||||
} else {
|
||||
tlb_batch_pmd_scan(mm, addr, orig);
|
||||
}
|
||||
|
|
|
@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb)
|
|||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
|
||||
|
||||
if (!tb->huge) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
|
||||
}
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
|
@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb)
|
|||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge)
|
||||
{
|
||||
unsigned long nentries, base, flags;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
|
||||
|
||||
if (!huge) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
|
||||
}
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
|
|
|
@ -1323,7 +1323,7 @@ static inline int phy_aneg_done(struct phy_device *phydev)
|
|||
|
||||
static int greth_mdio_init(struct greth_private *greth)
|
||||
{
|
||||
int ret, phy;
|
||||
int ret;
|
||||
unsigned long timeout;
|
||||
|
||||
greth->mdio = mdiobus_alloc();
|
||||
|
|
|
@ -383,20 +383,12 @@ static struct device_node *get_node(phandle n, DATA *data)
|
|||
}
|
||||
|
||||
/* Copy in a whole string from userspace into kernelspace. */
|
||||
static int copyin_string(char __user *user, size_t len, char **ptr)
|
||||
static char * copyin_string(char __user *user, size_t len)
|
||||
{
|
||||
char *tmp;
|
||||
|
||||
if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0)
|
||||
return -EINVAL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
tmp = memdup_user_nul(user, len);
|
||||
if (IS_ERR(tmp))
|
||||
return PTR_ERR(tmp);
|
||||
|
||||
*ptr = tmp;
|
||||
|
||||
return 0;
|
||||
return memdup_user_nul(user, len);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -415,9 +407,9 @@ static int opiocget(void __user *argp, DATA *data)
|
|||
|
||||
dp = get_node(op.op_nodeid, data);
|
||||
|
||||
err = copyin_string(op.op_name, op.op_namelen, &str);
|
||||
if (err)
|
||||
return err;
|
||||
str = copyin_string(op.op_name, op.op_namelen);
|
||||
if (IS_ERR(str))
|
||||
return PTR_ERR(str);
|
||||
|
||||
pval = of_get_property(dp, str, &len);
|
||||
err = 0;
|
||||
|
@ -440,7 +432,7 @@ static int opiocnextprop(void __user *argp, DATA *data)
|
|||
struct device_node *dp;
|
||||
struct property *prop;
|
||||
char *str;
|
||||
int err, len;
|
||||
int len;
|
||||
|
||||
if (copy_from_user(&op, argp, sizeof(op)))
|
||||
return -EFAULT;
|
||||
|
@ -449,9 +441,9 @@ static int opiocnextprop(void __user *argp, DATA *data)
|
|||
if (!dp)
|
||||
return -EINVAL;
|
||||
|
||||
err = copyin_string(op.op_name, op.op_namelen, &str);
|
||||
if (err)
|
||||
return err;
|
||||
str = copyin_string(op.op_name, op.op_namelen);
|
||||
if (IS_ERR(str))
|
||||
return PTR_ERR(str);
|
||||
|
||||
if (str[0] == '\0') {
|
||||
prop = dp->properties;
|
||||
|
@ -494,14 +486,14 @@ static int opiocset(void __user *argp, DATA *data)
|
|||
if (!dp)
|
||||
return -EINVAL;
|
||||
|
||||
err = copyin_string(op.op_name, op.op_namelen, &str);
|
||||
if (err)
|
||||
return err;
|
||||
str = copyin_string(op.op_name, op.op_namelen);
|
||||
if (IS_ERR(str))
|
||||
return PTR_ERR(str);
|
||||
|
||||
err = copyin_string(op.op_buf, op.op_buflen, &tmp);
|
||||
if (err) {
|
||||
tmp = copyin_string(op.op_buf, op.op_buflen);
|
||||
if (IS_ERR(tmp)) {
|
||||
kfree(str);
|
||||
return err;
|
||||
return PTR_ERR(tmp);
|
||||
}
|
||||
|
||||
err = of_set_property(dp, str, tmp, op.op_buflen);
|
||||
|
|
Загрузка…
Ссылка в новой задаче