[PATCH] x86_64: inline function prefix with __always_inline in vsyscall

In vsyscall function do_vgettimeofday(), some functions are declared as
inlined, which is a hint for gcc to compile the function inlined but it
not forced.  Sometimes compiler does not compile the function as
inlined, so here inline is replaced by __always_inline prefix.

It does not happen in gcc compiler actually, but it possibly happens.

Signed-off-by: bibo mao <bibo.mao@intel.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
mao, bibo 2006-04-11 12:54:54 +02:00 коммит произвёл Linus Torvalds
Родитель 44b940c299
Коммит cde227afe6
2 изменённых файлов: 3 добавлений и 3 удалений

Просмотреть файл

@ -177,7 +177,7 @@ static inline __u16 __readw(const volatile void __iomem *addr)
{ {
return *(__force volatile __u16 *)addr; return *(__force volatile __u16 *)addr;
} }
static inline __u32 __readl(const volatile void __iomem *addr) static __always_inline __u32 __readl(const volatile void __iomem *addr)
{ {
return *(__force volatile __u32 *)addr; return *(__force volatile __u32 *)addr;
} }

Просмотреть файл

@ -73,7 +73,7 @@ static inline int write_tryseqlock(seqlock_t *sl)
} }
/* Start of read calculation -- fetch last complete writer token */ /* Start of read calculation -- fetch last complete writer token */
static inline unsigned read_seqbegin(const seqlock_t *sl) static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
{ {
unsigned ret = sl->sequence; unsigned ret = sl->sequence;
smp_rmb(); smp_rmb();
@ -88,7 +88,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
* *
* Using xor saves one conditional branch. * Using xor saves one conditional branch.
*/ */
static inline int read_seqretry(const seqlock_t *sl, unsigned iv) static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv)
{ {
smp_rmb(); smp_rmb();
return (iv & 1) | (sl->sequence ^ iv); return (iv & 1) | (sl->sequence ^ iv);