powerpc/64s: Allocate slb_shadow structures individually
slb_shadow structures are avoided for radix environment. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
499dcd4137
Коммит
384e806784
|
@ -72,41 +72,28 @@ static void __init free_lppaca(struct lppaca *lp)
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 3 persistent SLBs are registered here. The buffer will be zero
|
* 3 persistent SLBs are allocated here. The buffer will be zero
|
||||||
* initially, hence will all be invaild until we actually write them.
|
* initially, hence will all be invaild until we actually write them.
|
||||||
*
|
*
|
||||||
* If you make the number of persistent SLB entries dynamic, please also
|
* If you make the number of persistent SLB entries dynamic, please also
|
||||||
* update PR KVM to flush and restore them accordingly.
|
* update PR KVM to flush and restore them accordingly.
|
||||||
*/
|
*/
|
||||||
static struct slb_shadow * __initdata slb_shadow;
|
static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
|
||||||
|
|
||||||
static void __init allocate_slb_shadows(int nr_cpus, int limit)
|
|
||||||
{
|
|
||||||
int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
|
|
||||||
|
|
||||||
if (early_radix_enabled())
|
|
||||||
return;
|
|
||||||
|
|
||||||
slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
|
|
||||||
memset(slb_shadow, 0, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct slb_shadow * __init init_slb_shadow(int cpu)
|
|
||||||
{
|
{
|
||||||
struct slb_shadow *s;
|
struct slb_shadow *s;
|
||||||
|
|
||||||
if (early_radix_enabled())
|
if (cpu != boot_cpuid) {
|
||||||
return NULL;
|
/*
|
||||||
|
* Boot CPU comes here before early_radix_enabled
|
||||||
|
* is parsed (e.g., for disable_radix). So allocate
|
||||||
|
* always and this will be fixed up in free_unused_pacas.
|
||||||
|
*/
|
||||||
|
if (early_radix_enabled())
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
s = &slb_shadow[cpu];
|
s = __va(memblock_alloc_base(sizeof(*s), L1_CACHE_BYTES, limit));
|
||||||
|
memset(s, 0, sizeof(*s));
|
||||||
/*
|
|
||||||
* When we come through here to initialise boot_paca, the slb_shadow
|
|
||||||
* buffers are not allocated yet. That's OK, we'll get one later in
|
|
||||||
* boot, but make sure we don't corrupt memory at 0.
|
|
||||||
*/
|
|
||||||
if (!slb_shadow)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
|
s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
|
||||||
s->buffer_length = cpu_to_be32(sizeof(*s));
|
s->buffer_length = cpu_to_be32(sizeof(*s));
|
||||||
|
@ -114,10 +101,6 @@ static struct slb_shadow * __init init_slb_shadow(int cpu)
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_PPC_BOOK3S_64 */
|
|
||||||
|
|
||||||
static void __init allocate_slb_shadows(int nr_cpus, int limit) { }
|
|
||||||
|
|
||||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||||
|
|
||||||
/* The Paca is an array with one entry per processor. Each contains an
|
/* The Paca is an array with one entry per processor. Each contains an
|
||||||
|
@ -151,7 +134,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
||||||
new_paca->__current = &init_task;
|
new_paca->__current = &init_task;
|
||||||
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
|
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
new_paca->slb_shadow_ptr = init_slb_shadow(cpu);
|
new_paca->slb_shadow_ptr = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3E
|
#ifdef CONFIG_PPC_BOOK3E
|
||||||
|
@ -222,13 +205,16 @@ void __init allocate_pacas(void)
|
||||||
printk(KERN_DEBUG "Allocated %lu bytes for %u pacas\n",
|
printk(KERN_DEBUG "Allocated %lu bytes for %u pacas\n",
|
||||||
size, nr_cpu_ids);
|
size, nr_cpu_ids);
|
||||||
|
|
||||||
allocate_slb_shadows(nr_cpu_ids, limit);
|
|
||||||
|
|
||||||
/* Can't use for_each_*_cpu, as they aren't functional yet */
|
/* Can't use for_each_*_cpu, as they aren't functional yet */
|
||||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
|
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
|
||||||
initialise_paca(paca_ptrs[cpu], cpu);
|
struct paca_struct *paca = paca_ptrs[cpu];
|
||||||
|
|
||||||
|
initialise_paca(paca, cpu);
|
||||||
#ifdef CONFIG_PPC_PSERIES
|
#ifdef CONFIG_PPC_PSERIES
|
||||||
paca_ptrs[cpu]->lppaca_ptr = new_lppaca(cpu, limit);
|
paca->lppaca_ptr = new_lppaca(cpu, limit);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -263,6 +249,15 @@ void __init free_unused_pacas(void)
|
||||||
|
|
||||||
paca_nr_cpu_ids = nr_cpu_ids;
|
paca_nr_cpu_ids = nr_cpu_ids;
|
||||||
paca_ptrs_size = new_ptrs_size;
|
paca_ptrs_size = new_ptrs_size;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
if (early_radix_enabled()) {
|
||||||
|
/* Ugly fixup, see new_slb_shadow() */
|
||||||
|
memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
|
||||||
|
sizeof(struct slb_shadow));
|
||||||
|
paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void copy_mm_to_paca(struct mm_struct *mm)
|
void copy_mm_to_paca(struct mm_struct *mm)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче