* A performance fix for recent large AMD systems that avoids an ancient
cpu idle hardware workaround. * A new Intel model number. Folks like these upstream as soon as possible so that each developer doing feature development doesn't need to carry their own #define. * SGX fixes for a userspace crash and a rare kernel warning -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEV76QKkVc4xCGURexaDWVMHDJkrAFAmMx6tAACgkQaDWVMHDJ krClIQ//fSv5oE6XpRCGx9FuiTz6m1s6zebSyY1m1wyQ8j7InoBbgJnKc1GfBNvT +RCudOkHI5mqLsB7S5FcitFESH/TxrUQ3LlIXaMTySvf3OqaBe6oOFpBBoDD6Nal gzCoPfZ6dOLl7D6YjiYkSL3rWP3wMhsIm2I8dVwDvxD7iw9oRuTzON+DEFR/+b2L RTPTSGbGEHLlEXVc5S3+KYAGDTVVxo5XifLauFVWCa3bWCi6Wq78aJQnyVmvoCu9 iHs3hb7TOzSL4hS3nFHBL8wd1QXNfg2e7/gxl+AVhiTAyoQL5atpa6NnL5MHehGE +HVJtrskFs9GjakGJmCHlh5tJy7NeiHcggdrL+EtqUif4qOehhKytIPw99Vmq8Po B7nxMMueZQJZfsnkLttYxMTBbPv4oYAzn3uCzdODDjbUQrPkJv//pcW7cWhwGtda GIspz1jBF+CFMygke7/xNfhEiwxIcu8nZ7HywUhWbcoGv+N3IpAgeMHlYkAIqgXA Qhluo5o09LaTFmIS6j1Ba+tEXzTPdQdQBpBQDC3u4A5U8KOSsXA9b1OA1pPowF1k ur4PbJe5eq2LvXofmISorCAH9qw2lpJk3n+rWojU6Rml+SI4flrGWuiRPeqhJP2B RuiVSjx9tS9ohKIo/tZOo7varj7Ct+W2ZO/M40hp3cB94sFGp5s= =ULl1 -----END PGP SIGNATURE----- Merge tag 'x86_urgent_for_v6.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Dave Hansen: - A performance fix for recent large AMD systems that avoids an ancient cpu idle hardware workaround - A new Intel model number. Folks like these upstream as soon as possible so that each developer doing feature development doesn't need to carry their own #define - SGX fixes for a userspace crash and a rare kernel warning * tag 'x86_urgent_for_v6.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: ACPI: processor idle: Practically limit "Dummy wait" workaround to old Intel systems x86/sgx: Handle VA page allocation failure for EAUG on PF. x86/sgx: Do not fail on incomplete sanitization on premature stop of ksgxd x86/cpu: Add CPU model numbers for Meteor Lake
This commit is contained in:
Коммит
a1375562c0
|
@ -115,6 +115,9 @@
|
|||
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
|
||||
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
|
||||
|
||||
#define INTEL_FAM6_METEORLAKE 0xAC
|
||||
#define INTEL_FAM6_METEORLAKE_L 0xAA
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
|
||||
|
|
|
@ -344,8 +344,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
va_page = sgx_encl_grow(encl, false);
|
||||
if (IS_ERR(va_page))
|
||||
if (IS_ERR(va_page)) {
|
||||
if (PTR_ERR(va_page) == -EBUSY)
|
||||
vmret = VM_FAULT_NOPAGE;
|
||||
goto err_out_epc;
|
||||
}
|
||||
|
||||
if (va_page)
|
||||
list_add(&va_page->list, &encl->va_pages);
|
||||
|
|
|
@ -49,9 +49,13 @@ static LIST_HEAD(sgx_dirty_page_list);
|
|||
* Reset post-kexec EPC pages to the uninitialized state. The pages are removed
|
||||
* from the input list, and made available for the page allocator. SECS pages
|
||||
* prepending their children in the input list are left intact.
|
||||
*
|
||||
* Return 0 when sanitization was successful or kthread was stopped, and the
|
||||
* number of unsanitized pages otherwise.
|
||||
*/
|
||||
static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
|
||||
static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list)
|
||||
{
|
||||
unsigned long left_dirty = 0;
|
||||
struct sgx_epc_page *page;
|
||||
LIST_HEAD(dirty);
|
||||
int ret;
|
||||
|
@ -59,7 +63,7 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
|
|||
/* dirty_page_list is thread-local, no need for a lock: */
|
||||
while (!list_empty(dirty_page_list)) {
|
||||
if (kthread_should_stop())
|
||||
return;
|
||||
return 0;
|
||||
|
||||
page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
|
||||
|
||||
|
@ -92,12 +96,14 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
|
|||
} else {
|
||||
/* The page is not yet clean - move to the dirty list. */
|
||||
list_move_tail(&page->list, &dirty);
|
||||
left_dirty++;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
list_splice(&dirty, dirty_page_list);
|
||||
return left_dirty;
|
||||
}
|
||||
|
||||
static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
|
||||
|
@ -395,10 +401,7 @@ static int ksgxd(void *p)
|
|||
* required for SECS pages, whose child pages blocked EREMOVE.
|
||||
*/
|
||||
__sgx_sanitize_pages(&sgx_dirty_page_list);
|
||||
__sgx_sanitize_pages(&sgx_dirty_page_list);
|
||||
|
||||
/* sanity check: */
|
||||
WARN_ON(!list_empty(&sgx_dirty_page_list));
|
||||
WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list));
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
if (try_to_freeze())
|
||||
|
|
|
@ -531,10 +531,27 @@ static void wait_for_freeze(void)
|
|||
/* No delay is needed if we are in guest */
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
return;
|
||||
/*
|
||||
* Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
|
||||
* not this code. Assume that any Intel systems using this
|
||||
* are ancient and may need the dummy wait. This also assumes
|
||||
* that the motivating chipset issue was Intel-only.
|
||||
*/
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return;
|
||||
#endif
|
||||
/* Dummy wait op - must do something useless after P_LVL2 read
|
||||
because chipsets cannot guarantee that STPCLK# signal
|
||||
gets asserted in time to freeze execution properly. */
|
||||
/*
|
||||
* Dummy wait op - must do something useless after P_LVL2 read
|
||||
* because chipsets cannot guarantee that STPCLK# signal gets
|
||||
* asserted in time to freeze execution properly
|
||||
*
|
||||
* This workaround has been in place since the original ACPI
|
||||
* implementation was merged, circa 2002.
|
||||
*
|
||||
* If a profile is pointing to this instruction, please first
|
||||
* consider moving your system to a more modern idle
|
||||
* mechanism.
|
||||
*/
|
||||
inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче