2005-04-17 02:20:36 +04:00
|
|
|
#ifndef __LINUX_CACHE_H
|
|
|
|
#define __LINUX_CACHE_H
|
|
|
|
|
2014-01-24 03:54:16 +04:00
|
|
|
#include <uapi/linux/kernel.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <asm/cache.h>
|
|
|
|
|
|
|
|
#ifndef L1_CACHE_ALIGN
|
2014-01-24 03:54:16 +04:00
|
|
|
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef SMP_CACHE_BYTES
|
|
|
|
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
|
|
|
#endif
|
|
|
|
|
2016-02-18 01:41:15 +03:00
|
|
|
/*
|
|
|
|
* __read_mostly is used to keep rarely changing variables out of frequently
|
|
|
|
* updated cachelines. If an architecture doesn't support it, ignore the
|
|
|
|
* hint.
|
|
|
|
*/
|
2006-03-23 14:00:16 +03:00
|
|
|
#ifndef __read_mostly
|
2005-07-08 04:56:59 +04:00
|
|
|
#define __read_mostly
|
|
|
|
#endif
|
|
|
|
|
2016-02-18 01:41:15 +03:00
|
|
|
/*
|
|
|
|
* __ro_after_init is used to mark things that are read-only after init (i.e.
|
|
|
|
* after mark_rodata_ro() has been called). These are effectively read-only,
|
|
|
|
* but may get written to during init, so can't live in .rodata (via "const").
|
|
|
|
*/
|
|
|
|
#ifndef __ro_after_init
|
|
|
|
#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#ifndef ____cacheline_aligned
|
|
|
|
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef ____cacheline_aligned_in_smp
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define ____cacheline_aligned_in_smp ____cacheline_aligned
|
|
|
|
#else
|
|
|
|
#define ____cacheline_aligned_in_smp
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __cacheline_aligned
|
|
|
|
#define __cacheline_aligned \
|
|
|
|
__attribute__((__aligned__(SMP_CACHE_BYTES), \
|
2010-02-20 03:03:34 +03:00
|
|
|
__section__(".data..cacheline_aligned")))
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /* __cacheline_aligned */
|
|
|
|
|
|
|
|
#ifndef __cacheline_aligned_in_smp
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define __cacheline_aligned_in_smp __cacheline_aligned
|
|
|
|
#else
|
|
|
|
#define __cacheline_aligned_in_smp
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#endif
|
|
|
|
|
2006-01-08 12:01:27 +03:00
|
|
|
/*
|
|
|
|
* The maximum alignment needed for some critical structures
|
|
|
|
* These could be inter-node cacheline sizes/L3 cacheline
|
|
|
|
* size etc. Define this in asm/cache.h for your arch
|
|
|
|
*/
|
|
|
|
#ifndef INTERNODE_CACHE_SHIFT
|
|
|
|
#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if !defined(____cacheline_internodealigned_in_smp)
|
2005-04-17 02:20:36 +04:00
|
|
|
#if defined(CONFIG_SMP)
|
2006-01-08 12:01:27 +03:00
|
|
|
#define ____cacheline_internodealigned_in_smp \
|
|
|
|
__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
|
2005-04-17 02:20:36 +04:00
|
|
|
#else
|
2006-01-08 12:01:27 +03:00
|
|
|
#define ____cacheline_internodealigned_in_smp
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2008-04-28 13:12:22 +04:00
|
|
|
#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
|
|
|
|
#define cache_line_size() L1_CACHE_BYTES
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /* __LINUX_CACHE_H */
|