cpumask: Use mm_cpumask() wrapper instead of cpu_vm_mask
Makes code futureproof against the impending change to mm->cpu_vm_mask. It's also a chance to use the new cpumask_ ops which take a pointer (the older ones are deprecated, but there's no hurry for arch code). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Родитель
f5ac590e79
Коммит
56aa4129e8
|
@ -31,7 +31,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
struct task_struct *tsk)
|
||||
{
|
||||
/* Mark this context has been used on the new CPU */
|
||||
cpu_set(smp_processor_id(), next->cpu_vm_mask);
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
||||
|
||||
/* 32-bit keeps track of the current PGDIR in the thread struct */
|
||||
#ifdef CONFIG_PPC32
|
||||
|
|
|
@ -859,7 +859,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
|||
unsigned long vsid;
|
||||
struct mm_struct *mm;
|
||||
pte_t *ptep;
|
||||
cpumask_t tmp;
|
||||
const struct cpumask *tmp;
|
||||
int rc, user_region = 0, local = 0;
|
||||
int psize, ssize;
|
||||
|
||||
|
@ -907,8 +907,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
|
|||
return 1;
|
||||
|
||||
/* Check CPU locality */
|
||||
tmp = cpumask_of_cpu(smp_processor_id());
|
||||
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
|
||||
tmp = cpumask_of(smp_processor_id());
|
||||
if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
|
||||
local = 1;
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
|
@ -1024,7 +1024,6 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
|||
unsigned long vsid;
|
||||
void *pgdir;
|
||||
pte_t *ptep;
|
||||
cpumask_t mask;
|
||||
unsigned long flags;
|
||||
int local = 0;
|
||||
int ssize;
|
||||
|
@ -1067,8 +1066,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
|
|||
local_irq_save(flags);
|
||||
|
||||
/* Is that local to this CPU ? */
|
||||
mask = cpumask_of_cpu(smp_processor_id());
|
||||
if (cpus_equal(mm->cpu_vm_mask, mask))
|
||||
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
||||
local = 1;
|
||||
|
||||
/* Hash it in */
|
||||
|
|
|
@ -97,7 +97,7 @@ static unsigned int steal_context_smp(unsigned int id)
|
|||
mm->context.id = MMU_NO_CONTEXT;
|
||||
|
||||
/* Mark it stale on all CPUs that used this mm */
|
||||
for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask)
|
||||
for_each_cpu(cpu, mm_cpumask(mm))
|
||||
__set_bit(id, stale_map[cpu]);
|
||||
return id;
|
||||
}
|
||||
|
|
|
@ -82,11 +82,10 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
|
|||
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
|
||||
{
|
||||
/* This is safe since tlb_gather_mmu has disabled preemption */
|
||||
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
|
||||
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
|
||||
|
||||
if (atomic_read(&tlb->mm->mm_users) < 2 ||
|
||||
cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
|
||||
cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
|
||||
pgtable_free(pgf);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -139,12 +139,12 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
|||
*/
|
||||
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
|
||||
{
|
||||
cpumask_t tmp;
|
||||
const struct cpumask *tmp;
|
||||
int i, local = 0;
|
||||
|
||||
i = batch->index;
|
||||
tmp = cpumask_of_cpu(smp_processor_id());
|
||||
if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
|
||||
tmp = cpumask_of(smp_processor_id());
|
||||
if (cpumask_equal(mm_cpumask(batch->mm), tmp))
|
||||
local = 1;
|
||||
if (i == 1)
|
||||
flush_hash_page(batch->vaddr[0], batch->pte[0],
|
||||
|
|
|
@ -132,11 +132,11 @@ void flush_tlb_mm(struct mm_struct *mm)
|
|||
pid = mm->context.id;
|
||||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
goto no_context;
|
||||
cpu_mask = mm->cpu_vm_mask;
|
||||
cpu_clear(smp_processor_id(), cpu_mask);
|
||||
if (!cpus_empty(cpu_mask)) {
|
||||
if (!cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
|
||||
struct tlb_flush_param p = { .pid = pid };
|
||||
smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1);
|
||||
/* Ignores smp_processor_id() even if set. */
|
||||
smp_call_function_many(mm_cpumask(mm),
|
||||
do_flush_tlb_mm_ipi, &p, 1);
|
||||
}
|
||||
_tlbil_pid(pid);
|
||||
no_context:
|
||||
|
@ -146,16 +146,15 @@ EXPORT_SYMBOL(flush_tlb_mm);
|
|||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
{
|
||||
cpumask_t cpu_mask;
|
||||
struct cpumask *cpu_mask;
|
||||
unsigned int pid;
|
||||
|
||||
preempt_disable();
|
||||
pid = vma ? vma->vm_mm->context.id : 0;
|
||||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
goto bail;
|
||||
cpu_mask = vma->vm_mm->cpu_vm_mask;
|
||||
cpu_clear(smp_processor_id(), cpu_mask);
|
||||
if (!cpus_empty(cpu_mask)) {
|
||||
cpu_mask = mm_cpumask(vma->vm_mm);
|
||||
if (!cpumask_equal(cpu_mask, cpumask_of(smp_processor_id()))) {
|
||||
/* If broadcast tlbivax is supported, use it */
|
||||
if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
|
||||
int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
|
||||
|
@ -167,7 +166,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
|||
goto bail;
|
||||
} else {
|
||||
struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
|
||||
smp_call_function_mask(cpu_mask,
|
||||
/* Ignores smp_processor_id() even if set in cpu_mask */
|
||||
smp_call_function_many(cpu_mask,
|
||||
do_flush_tlb_page_ipi, &p, 1);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ static inline void mm_needs_global_tlbie(struct mm_struct *mm)
|
|||
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
|
||||
|
||||
/* Global TLBIE broadcast required with SPEs. */
|
||||
__cpus_setall(&mm->cpu_vm_mask, nr);
|
||||
bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
|
||||
}
|
||||
|
||||
void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
|
||||
|
|
Загрузка…
Ссылка в новой задаче