i386: Allow KVM on i386 nonpae

Currently, CONFIG_X86_CMPXCHG64 both enables boot-time checking of
the cmpxchg64b feature and enables compilation of the set_64bit() family.
Since the option is dependent on PAE, and since KVM depends on set_64bit(),
this effectively disables KVM on i386 nopae.

Simplify by removing the config option altogether: the boot check is made
dependent on CONFIG_X86_PAE directly, and the set_64bit() family is exposed
without constraints.  It is up to users to check for the feature flag (KVM
does not as virtualiation extensions imply its existence).

Signed-off-by: Avi Kivity <avi@qumranet.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Avi Kivity 2007-07-19 14:30:14 +03:00 коммит произвёл Linus Torvalds
Родитель 3e1f900bff
Коммит 2d9ce177e6
6 изменённых файлов: 6 добавлений и 18 удалений

Просмотреть файл

@ -297,11 +297,6 @@ config X86_POPAD_OK
depends on !M386 depends on !M386
default y default y
config X86_CMPXCHG64
bool
depends on X86_PAE
default y
config X86_ALIGNMENT_16 config X86_ALIGNMENT_16
bool bool
depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1

Просмотреть файл

@ -166,7 +166,6 @@ CONFIG_X86_WP_WORKS_OK=y
CONFIG_X86_INVLPG=y CONFIG_X86_INVLPG=y
CONFIG_X86_BSWAP=y CONFIG_X86_BSWAP=y
CONFIG_X86_POPAD_OK=y CONFIG_X86_POPAD_OK=y
CONFIG_X86_CMPXCHG64=y
CONFIG_X86_GOOD_APIC=y CONFIG_X86_GOOD_APIC=y
CONFIG_X86_INTEL_USERCOPY=y CONFIG_X86_INTEL_USERCOPY=y
CONFIG_X86_USE_PPRO_CHECKSUM=y CONFIG_X86_USE_PPRO_CHECKSUM=y

Просмотреть файл

@ -52,7 +52,6 @@ CONFIG_X86_WP_WORKS_OK=y
CONFIG_X86_INVLPG=y CONFIG_X86_INVLPG=y
CONFIG_X86_BSWAP=y CONFIG_X86_BSWAP=y
CONFIG_X86_POPAD_OK=y CONFIG_X86_POPAD_OK=y
CONFIG_X86_CMPXCHG64=y
CONFIG_X86_GOOD_APIC=y CONFIG_X86_GOOD_APIC=y
CONFIG_X86_USE_PPRO_CHECKSUM=y CONFIG_X86_USE_PPRO_CHECKSUM=y
CONFIG_X86_TSC=y CONFIG_X86_TSC=y

Просмотреть файл

@ -11,7 +11,6 @@ if VIRTUALIZATION
config KVM config KVM
tristate "Kernel-based Virtual Machine (KVM) support" tristate "Kernel-based Virtual Machine (KVM) support"
depends on X86 && EXPERIMENTAL depends on X86 && EXPERIMENTAL
depends on X86_CMPXCHG64 || 64BIT
---help--- ---help---
Support hosting fully virtualized guest machines using hardware Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent virtualization extensions. You will need a fairly recent

Просмотреть файл

@ -3,14 +3,16 @@
#include <linux/bitops.h> /* for LOCK_PREFIX */ #include <linux/bitops.h> /* for LOCK_PREFIX */
/*
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
* you need to test for the feature in boot_cpu_data.
*/
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; }; struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x)) #define __xg(x) ((struct __xchg_dummy *)(x))
#ifdef CONFIG_X86_CMPXCHG64
/* /*
* The semantics of XCHGCMP8B are a bit strange, this is why * The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to * there is a loop and the loading of %%eax and %%edx has to
@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr,
__set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
__set_64bit(ptr, ll_low(value), ll_high(value)) ) __set_64bit(ptr, ll_low(value), ll_high(value)) )
#endif
/* /*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary, * Note 2: xchg has side effect, so that attribute volatile is necessary,
@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
}) })
#endif #endif
#ifdef CONFIG_X86_CMPXCHG64
static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
unsigned long long new) unsigned long long new)
{ {
@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
(unsigned long long)(n))) (unsigned long long)(n)))
#endif #endif
#endif

Просмотреть файл

@ -29,7 +29,7 @@
# define NEED_CMOV 0 # define NEED_CMOV 0
#endif #endif
#ifdef CONFIG_X86_CMPXCHG64 #ifdef CONFIG_X86_PAE
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) # define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
#else #else
# define NEED_CX8 0 # define NEED_CX8 0