Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: fix 27-rc crash on vsmp due to paravirt during module load x86, oprofile: BUG scheduling while atomic AMD IOMMU: protect completion wait loop with iommu lock AMD IOMMU: set iommu sunc flag after command queuing
This commit is contained in:
Коммит
9c38c7e573
|
@ -101,10 +101,10 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||||
*/
|
*/
|
||||||
static int iommu_completion_wait(struct amd_iommu *iommu)
|
static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
int ret, ready = 0;
|
int ret = 0, ready = 0;
|
||||||
unsigned status = 0;
|
unsigned status = 0;
|
||||||
struct iommu_cmd cmd;
|
struct iommu_cmd cmd;
|
||||||
unsigned long i = 0;
|
unsigned long flags, i = 0;
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(cmd));
|
memset(&cmd, 0, sizeof(cmd));
|
||||||
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
|
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
|
||||||
|
@ -112,10 +112,12 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||||
|
|
||||||
iommu->need_sync = 0;
|
iommu->need_sync = 0;
|
||||||
|
|
||||||
ret = iommu_queue_command(iommu, &cmd);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
|
|
||||||
|
ret = __iommu_queue_command(iommu, &cmd);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto out;
|
||||||
|
|
||||||
while (!ready && (i < EXIT_LOOP_COUNT)) {
|
while (!ready && (i < EXIT_LOOP_COUNT)) {
|
||||||
++i;
|
++i;
|
||||||
|
@ -130,6 +132,8 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||||
|
|
||||||
if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
|
if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
|
||||||
printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
|
printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
|
||||||
|
out:
|
||||||
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -140,6 +144,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||||
static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
|
static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
|
||||||
{
|
{
|
||||||
struct iommu_cmd cmd;
|
struct iommu_cmd cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
BUG_ON(iommu == NULL);
|
BUG_ON(iommu == NULL);
|
||||||
|
|
||||||
|
@ -147,9 +152,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
|
||||||
CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
|
CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
|
||||||
cmd.data[0] = devid;
|
cmd.data[0] = devid;
|
||||||
|
|
||||||
|
ret = iommu_queue_command(iommu, &cmd);
|
||||||
|
|
||||||
iommu->need_sync = 1;
|
iommu->need_sync = 1;
|
||||||
|
|
||||||
return iommu_queue_command(iommu, &cmd);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -159,6 +166,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
|
||||||
u64 address, u16 domid, int pde, int s)
|
u64 address, u16 domid, int pde, int s)
|
||||||
{
|
{
|
||||||
struct iommu_cmd cmd;
|
struct iommu_cmd cmd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(cmd));
|
memset(&cmd, 0, sizeof(cmd));
|
||||||
address &= PAGE_MASK;
|
address &= PAGE_MASK;
|
||||||
|
@ -171,9 +179,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
|
||||||
if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
|
if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
|
||||||
cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
|
cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
|
||||||
|
|
||||||
|
ret = iommu_queue_command(iommu, &cmd);
|
||||||
|
|
||||||
iommu->need_sync = 1;
|
iommu->need_sync = 1;
|
||||||
|
|
||||||
return iommu_queue_command(iommu, &cmd);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -61,7 +61,7 @@ static void vsmp_irq_enable(void)
|
||||||
native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
|
native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
|
static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
|
||||||
unsigned long addr, unsigned len)
|
unsigned long addr, unsigned len)
|
||||||
{
|
{
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
|
|
@ -295,10 +295,12 @@ static void nmi_cpu_shutdown(void *dummy)
|
||||||
|
|
||||||
static void nmi_shutdown(void)
|
static void nmi_shutdown(void)
|
||||||
{
|
{
|
||||||
struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
|
struct op_msrs *msrs;
|
||||||
|
|
||||||
nmi_enabled = 0;
|
nmi_enabled = 0;
|
||||||
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
|
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
|
||||||
unregister_die_notifier(&profile_exceptions_nb);
|
unregister_die_notifier(&profile_exceptions_nb);
|
||||||
|
msrs = &get_cpu_var(cpu_msrs);
|
||||||
model->shutdown(msrs);
|
model->shutdown(msrs);
|
||||||
free_msrs();
|
free_msrs();
|
||||||
put_cpu_var(cpu_msrs);
|
put_cpu_var(cpu_msrs);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче