2012-03-29 20:47:46 +04:00
|
|
|
#ifndef _ASM_UM_BARRIER_H_
|
|
|
|
#define _ASM_UM_BARRIER_H_
|
|
|
|
|
|
|
|
#include <asm/asm.h>
|
|
|
|
#include <asm/segment.h>
|
2016-01-27 00:12:04 +03:00
|
|
|
#include <asm/cpufeatures.h>
|
2012-03-29 20:47:46 +04:00
|
|
|
#include <asm/cmpxchg.h>
|
|
|
|
#include <asm/nops.h>
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/irqflags.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force strict CPU ordering.
|
|
|
|
* And yes, this is required on UP too when we're talking
|
|
|
|
* to devices.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
|
|
|
|
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
|
|
|
|
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
|
|
|
|
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
|
|
|
|
|
|
|
|
#else /* CONFIG_X86_32 */
|
|
|
|
|
|
|
|
#define mb() asm volatile("mfence" : : : "memory")
|
|
|
|
#define rmb() asm volatile("lfence" : : : "memory")
|
|
|
|
#define wmb() asm volatile("sfence" : : : "memory")
|
|
|
|
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_PPRO_FENCE
|
2014-12-12 02:02:06 +03:00
|
|
|
#define dma_rmb() rmb()
|
2012-03-29 20:47:46 +04:00
|
|
|
#else /* CONFIG_X86_PPRO_FENCE */
|
2014-12-12 02:02:06 +03:00
|
|
|
#define dma_rmb() barrier()
|
2012-03-29 20:47:46 +04:00
|
|
|
#endif /* CONFIG_X86_PPRO_FENCE */
|
2014-12-12 02:02:06 +03:00
|
|
|
#define dma_wmb() barrier()
|
2012-03-29 20:47:46 +04:00
|
|
|
|
2015-12-21 10:22:18 +03:00
|
|
|
#include <asm-generic/barrier.h>
|
2014-12-12 02:01:55 +03:00
|
|
|
|
2012-03-29 20:47:46 +04:00
|
|
|
#endif
|