powerpc/mce: Remove per cpu variables from MCE handlers
Access to per-cpu variables requires translation to be enabled on pseries machine running in hash mmu mode, Since part of MCE handler runs in realmode and part of MCE handling code is shared between ppc architectures pseries and powernv, it becomes difficult to manage these variables differently on different architectures, So have these variables in paca instead of having them as per-cpu variables to avoid complications. Signed-off-by: Ganesh Goudar <ganeshgr@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210128104143.70668-2-ganeshgr@linux.ibm.com
This commit is contained in:
Родитель
17c5cf0fb9
Коммит
923b3cf00b
|
@ -206,6 +206,17 @@ struct mce_error_info {
|
|||
|
||||
#define MAX_MC_EVT 10
|
||||
|
||||
struct mce_info {
|
||||
int mce_nest_count;
|
||||
struct machine_check_event mce_event[MAX_MC_EVT];
|
||||
/* Queue for delayed MCE events. */
|
||||
int mce_queue_count;
|
||||
struct machine_check_event mce_event_queue[MAX_MC_EVT];
|
||||
/* Queue for delayed MCE UE events. */
|
||||
int mce_ue_count;
|
||||
struct machine_check_event mce_ue_event_queue[MAX_MC_EVT];
|
||||
};
|
||||
|
||||
/* Release flags for get_mce_event() */
|
||||
#define MCE_EVENT_RELEASE true
|
||||
#define MCE_EVENT_DONTRELEASE false
|
||||
|
@ -234,4 +245,11 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs);
|
|||
long __machine_check_early_realmode_p9(struct pt_regs *regs);
|
||||
long __machine_check_early_realmode_p10(struct pt_regs *regs);
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
void mce_init(void);
|
||||
#else
|
||||
static inline void mce_init(void) { };
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#endif /* __ASM_PPC64_MCE_H__ */
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <asm/hmi.h>
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
#include <asm-generic/mmiowb_types.h>
|
||||
|
||||
|
@ -273,6 +274,9 @@ struct paca_struct {
|
|||
#ifdef CONFIG_MMIOWB
|
||||
struct mmiowb_state mmiowb_state;
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct mce_info *mce_info;
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
} ____cacheline_aligned;
|
||||
|
||||
extern void copy_mm_to_paca(struct mm_struct *mm);
|
||||
|
|
|
@ -17,23 +17,14 @@
|
|||
#include <linux/irq_work.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
|
||||
static DEFINE_PER_CPU(int, mce_nest_count);
|
||||
static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
|
||||
|
||||
/* Queue for delayed MCE events. */
|
||||
static DEFINE_PER_CPU(int, mce_queue_count);
|
||||
static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
|
||||
|
||||
/* Queue for delayed MCE UE events. */
|
||||
static DEFINE_PER_CPU(int, mce_ue_count);
|
||||
static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
|
||||
mce_ue_event_queue);
|
||||
#include "setup.h"
|
||||
|
||||
static void machine_check_process_queued_event(struct irq_work *work);
|
||||
static void machine_check_ue_irq_work(struct irq_work *work);
|
||||
|
@ -104,9 +95,10 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
|||
struct mce_error_info *mce_err,
|
||||
uint64_t nip, uint64_t addr, uint64_t phys_addr)
|
||||
{
|
||||
int index = __this_cpu_inc_return(mce_nest_count) - 1;
|
||||
struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
|
||||
int index = local_paca->mce_info->mce_nest_count++;
|
||||
struct machine_check_event *mce;
|
||||
|
||||
mce = &local_paca->mce_info->mce_event[index];
|
||||
/*
|
||||
* Return if we don't have enough space to log mce event.
|
||||
* mce_nest_count may go beyond MAX_MC_EVT but that's ok,
|
||||
|
@ -192,7 +184,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
|||
*/
|
||||
int get_mce_event(struct machine_check_event *mce, bool release)
|
||||
{
|
||||
int index = __this_cpu_read(mce_nest_count) - 1;
|
||||
int index = local_paca->mce_info->mce_nest_count - 1;
|
||||
struct machine_check_event *mc_evt;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -202,7 +194,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
|
|||
|
||||
/* Check if we have MCE info to process. */
|
||||
if (index < MAX_MC_EVT) {
|
||||
mc_evt = this_cpu_ptr(&mce_event[index]);
|
||||
mc_evt = &local_paca->mce_info->mce_event[index];
|
||||
/* Copy the event structure and release the original */
|
||||
if (mce)
|
||||
*mce = *mc_evt;
|
||||
|
@ -212,7 +204,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
|
|||
}
|
||||
/* Decrement the count to free the slot. */
|
||||
if (release)
|
||||
__this_cpu_dec(mce_nest_count);
|
||||
local_paca->mce_info->mce_nest_count--;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -234,13 +226,14 @@ static void machine_check_ue_event(struct machine_check_event *evt)
|
|||
{
|
||||
int index;
|
||||
|
||||
index = __this_cpu_inc_return(mce_ue_count) - 1;
|
||||
index = local_paca->mce_info->mce_ue_count++;
|
||||
/* If queue is full, just return for now. */
|
||||
if (index >= MAX_MC_EVT) {
|
||||
__this_cpu_dec(mce_ue_count);
|
||||
local_paca->mce_info->mce_ue_count--;
|
||||
return;
|
||||
}
|
||||
memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
|
||||
memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
|
||||
evt, sizeof(*evt));
|
||||
|
||||
/* Queue work to process this event later. */
|
||||
irq_work_queue(&mce_ue_event_irq_work);
|
||||
|
@ -257,13 +250,14 @@ void machine_check_queue_event(void)
|
|||
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
|
||||
return;
|
||||
|
||||
index = __this_cpu_inc_return(mce_queue_count) - 1;
|
||||
index = local_paca->mce_info->mce_queue_count++;
|
||||
/* If queue is full, just return for now. */
|
||||
if (index >= MAX_MC_EVT) {
|
||||
__this_cpu_dec(mce_queue_count);
|
||||
local_paca->mce_info->mce_queue_count--;
|
||||
return;
|
||||
}
|
||||
memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
|
||||
memcpy(&local_paca->mce_info->mce_event_queue[index],
|
||||
&evt, sizeof(evt));
|
||||
|
||||
/* Queue irq work to process this event later. */
|
||||
irq_work_queue(&mce_event_process_work);
|
||||
|
@ -290,9 +284,9 @@ static void machine_process_ue_event(struct work_struct *work)
|
|||
int index;
|
||||
struct machine_check_event *evt;
|
||||
|
||||
while (__this_cpu_read(mce_ue_count) > 0) {
|
||||
index = __this_cpu_read(mce_ue_count) - 1;
|
||||
evt = this_cpu_ptr(&mce_ue_event_queue[index]);
|
||||
while (local_paca->mce_info->mce_ue_count > 0) {
|
||||
index = local_paca->mce_info->mce_ue_count - 1;
|
||||
evt = &local_paca->mce_info->mce_ue_event_queue[index];
|
||||
blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
|
||||
#ifdef CONFIG_MEMORY_FAILURE
|
||||
/*
|
||||
|
@ -305,7 +299,7 @@ static void machine_process_ue_event(struct work_struct *work)
|
|||
*/
|
||||
if (evt->error_type == MCE_ERROR_TYPE_UE) {
|
||||
if (evt->u.ue_error.ignore_event) {
|
||||
__this_cpu_dec(mce_ue_count);
|
||||
local_paca->mce_info->mce_ue_count--;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -321,7 +315,7 @@ static void machine_process_ue_event(struct work_struct *work)
|
|||
"was generated\n");
|
||||
}
|
||||
#endif
|
||||
__this_cpu_dec(mce_ue_count);
|
||||
local_paca->mce_info->mce_ue_count--;
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
@ -339,17 +333,17 @@ static void machine_check_process_queued_event(struct irq_work *work)
|
|||
* For now just print it to console.
|
||||
* TODO: log this error event to FSP or nvram.
|
||||
*/
|
||||
while (__this_cpu_read(mce_queue_count) > 0) {
|
||||
index = __this_cpu_read(mce_queue_count) - 1;
|
||||
evt = this_cpu_ptr(&mce_event_queue[index]);
|
||||
while (local_paca->mce_info->mce_queue_count > 0) {
|
||||
index = local_paca->mce_info->mce_queue_count - 1;
|
||||
evt = &local_paca->mce_info->mce_event_queue[index];
|
||||
|
||||
if (evt->error_type == MCE_ERROR_TYPE_UE &&
|
||||
evt->u.ue_error.ignore_event) {
|
||||
__this_cpu_dec(mce_queue_count);
|
||||
local_paca->mce_info->mce_queue_count--;
|
||||
continue;
|
||||
}
|
||||
machine_check_print_event_info(evt, false, false);
|
||||
__this_cpu_dec(mce_queue_count);
|
||||
local_paca->mce_info->mce_queue_count--;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -742,3 +736,24 @@ long hmi_exception_realmode(struct pt_regs *regs)
|
|||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __init mce_init(void)
|
||||
{
|
||||
struct mce_info *mce_info;
|
||||
u64 limit;
|
||||
int i;
|
||||
|
||||
limit = min(ppc64_bolted_size(), ppc64_rma_size);
|
||||
for_each_possible_cpu(i) {
|
||||
mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
|
||||
__alignof__(*mce_info),
|
||||
MEMBLOCK_LOW_LIMIT,
|
||||
limit, cpu_to_node(i));
|
||||
if (!mce_info)
|
||||
goto err;
|
||||
paca_ptrs[i]->mce_info = mce_info;
|
||||
}
|
||||
return;
|
||||
err:
|
||||
panic("Failed to allocate memory for MCE event data\n");
|
||||
}
|
||||
|
|
|
@ -64,6 +64,7 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
#include <asm/kasan.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
#include "setup.h"
|
||||
|
||||
|
@ -938,6 +939,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
exc_lvl_early_init();
|
||||
emergency_stack_init();
|
||||
|
||||
mce_init();
|
||||
smp_release_cpus();
|
||||
|
||||
initmem_init();
|
||||
|
|
Загрузка…
Ссылка в новой задаче