locking/atomic: scripts: restructure fallback ifdeffery
Currently the various ordering variants of an atomic operation are defined in groups of full/acquire/release/relaxed ordering variants with some shared ifdeffery and several potential definitions of each ordering variant in different branches of the shared ifdeffery. As an ordering variant can have several potential definitions down different branches of the shared ifdeffery, it can be painful for a human to find a relevant definition, and we don't have a good location to place anything common to all definitions of an ordering variant (e.g. kerneldoc). Historically the grouping of full/acquire/release/relaxed ordering variants was necessary as we filled in the missing atomics in the same namespace as the architecture used. It would be easy to accidentally define one ordering fallback in terms of another ordering fallback with redundant barriers, and avoiding that would otherwise require a lot of baroque ifdeffery. With recent changes we no longer need to fill in the missing atomics in the arch_atomic*_<op>() namespace, and only need to fill in the raw_atomic*_<op>() namespace. Due to this, there's no risk of a namespace collision, and we can define each raw_atomic*_<op> ordering variant with its own ifdeffery checking for the arch_atomic*_<op> ordering variants. Restructure the fallbacks in this way, with each ordering variant having its own ifdeffery of the form: | #if defined(arch_atomic_fetch_andnot_acquire) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire | #elif defined(arch_atomic_fetch_andnot_relaxed) | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | } | #elif defined(arch_atomic_fetch_andnot) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot | #else | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | return raw_atomic_fetch_and_acquire(~i, v); | } | #endif Note that where there's no relevant arch_atomic*_<op>() ordering variant, we'll define the operation in terms of a distinct raw_atomic*_<otherop>(), as this itself might have been filled in with a fallback. As we now generate the raw_atomic*_<op>() implementations directly, we no longer need the trivial wrappers, so they are removed. This makes the ifdeffery easier to follow, and will allow for further improvements in subsequent patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-21-mark.rutland@arm.com
This commit is contained in:
Родитель
1815da1718
Коммит
9257959a6e
|
@ -78,7 +78,6 @@
|
|||
})
|
||||
|
||||
#include <linux/atomic/atomic-arch-fallback.h>
|
||||
#include <linux/atomic/atomic-raw.h>
|
||||
#include <linux/atomic/atomic-long.h>
|
||||
#include <linux/atomic/atomic-instrumented.h>
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,6 +1,6 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
|
||||
raw_${atomic}_${pfx}${name}${sfx}_acquire(${params})
|
||||
{
|
||||
${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
__atomic_acquire_fence();
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
|
||||
raw_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
return arch_${atomic}_add_return${order}(i, v) < 0;
|
||||
return raw_${atomic}_add_return${order}(i, v) < 0;
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat << EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
raw_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
{
|
||||
return arch_${atomic}_fetch_add_unless(v, a, u) != u;
|
||||
return raw_${atomic}_fetch_add_unless(v, a, u) != u;
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
|
||||
raw_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
|
||||
${retstmt}raw_${atomic}_${pfx}and${sfx}${order}(~i, v);
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${int}
|
||||
arch_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new)
|
||||
raw_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new)
|
||||
{
|
||||
return arch_cmpxchg${order}(&v->counter, old, new);
|
||||
return raw_cmpxchg${order}(&v->counter, old, new);
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
|
||||
raw_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
|
||||
{
|
||||
${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
|
||||
${retstmt}raw_${atomic}_${pfx}sub${sfx}${order}(1, v);
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_dec_and_test(${atomic}_t *v)
|
||||
raw_${atomic}_dec_and_test(${atomic}_t *v)
|
||||
{
|
||||
return arch_${atomic}_dec_return(v) == 0;
|
||||
return raw_${atomic}_dec_return(v) == 0;
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
arch_${atomic}_dec_if_positive(${atomic}_t *v)
|
||||
raw_${atomic}_dec_if_positive(${atomic}_t *v)
|
||||
{
|
||||
${int} dec, c = arch_${atomic}_read(v);
|
||||
${int} dec, c = raw_${atomic}_read(v);
|
||||
|
||||
do {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
break;
|
||||
} while (!arch_${atomic}_try_cmpxchg(v, &c, dec));
|
||||
} while (!raw_${atomic}_try_cmpxchg(v, &c, dec));
|
||||
|
||||
return dec;
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
cat <<EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_dec_unless_positive(${atomic}_t *v)
|
||||
raw_${atomic}_dec_unless_positive(${atomic}_t *v)
|
||||
{
|
||||
${int} c = arch_${atomic}_read(v);
|
||||
${int} c = raw_${atomic}_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c > 0))
|
||||
return false;
|
||||
} while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1));
|
||||
} while (!raw_${atomic}_try_cmpxchg(v, &c, c - 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
arch_${atomic}_${pfx}${name}${sfx}(${params})
|
||||
raw_${atomic}_${pfx}${name}${sfx}(${params})
|
||||
{
|
||||
${ret} ret;
|
||||
__atomic_pre_full_fence();
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
cat << EOF
|
||||
static __always_inline ${int}
|
||||
arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
raw_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
{
|
||||
${int} c = arch_${atomic}_read(v);
|
||||
${int} c = raw_${atomic}_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c == u))
|
||||
break;
|
||||
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
|
||||
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + a));
|
||||
|
||||
return c;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
|
||||
raw_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
|
||||
{
|
||||
${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v);
|
||||
${retstmt}raw_${atomic}_${pfx}add${sfx}${order}(1, v);
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_inc_and_test(${atomic}_t *v)
|
||||
raw_${atomic}_inc_and_test(${atomic}_t *v)
|
||||
{
|
||||
return arch_${atomic}_inc_return(v) == 0;
|
||||
return raw_${atomic}_inc_return(v) == 0;
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_inc_not_zero(${atomic}_t *v)
|
||||
raw_${atomic}_inc_not_zero(${atomic}_t *v)
|
||||
{
|
||||
return arch_${atomic}_add_unless(v, 1, 0);
|
||||
return raw_${atomic}_add_unless(v, 1, 0);
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
cat <<EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_inc_unless_negative(${atomic}_t *v)
|
||||
raw_${atomic}_inc_unless_negative(${atomic}_t *v)
|
||||
{
|
||||
${int} c = arch_${atomic}_read(v);
|
||||
${int} c = raw_${atomic}_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c < 0))
|
||||
return false;
|
||||
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1));
|
||||
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
arch_${atomic}_read_acquire(const ${atomic}_t *v)
|
||||
raw_${atomic}_read_acquire(const ${atomic}_t *v)
|
||||
{
|
||||
${int} ret;
|
||||
|
||||
if (__native_word(${atomic}_t)) {
|
||||
ret = smp_load_acquire(&(v)->counter);
|
||||
} else {
|
||||
ret = arch_${atomic}_read(v);
|
||||
ret = raw_${atomic}_read(v);
|
||||
__atomic_acquire_fence();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
arch_${atomic}_${pfx}${name}${sfx}_release(${params})
|
||||
raw_${atomic}_${pfx}${name}${sfx}_release(${params})
|
||||
{
|
||||
__atomic_release_fence();
|
||||
${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
cat <<EOF
|
||||
static __always_inline void
|
||||
arch_${atomic}_set_release(${atomic}_t *v, ${int} i)
|
||||
raw_${atomic}_set_release(${atomic}_t *v, ${int} i)
|
||||
{
|
||||
if (__native_word(${atomic}_t)) {
|
||||
smp_store_release(&(v)->counter, i);
|
||||
} else {
|
||||
__atomic_release_fence();
|
||||
arch_${atomic}_set(v, i);
|
||||
raw_${atomic}_set(v, i);
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
|
||||
raw_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
return arch_${atomic}_sub_return(i, v) == 0;
|
||||
return raw_${atomic}_sub_return(i, v) == 0;
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
cat <<EOF
|
||||
static __always_inline bool
|
||||
arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
|
||||
raw_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
|
||||
{
|
||||
${int} r, o = *old;
|
||||
r = arch_${atomic}_cmpxchg${order}(v, o, new);
|
||||
r = raw_${atomic}_cmpxchg${order}(v, o, new);
|
||||
if (unlikely(r != o))
|
||||
*old = r;
|
||||
return likely(r == o);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
cat <<EOF
|
||||
static __always_inline ${int}
|
||||
arch_${atomic}_xchg${order}(${atomic}_t *v, ${int} new)
|
||||
raw_${atomic}_xchg${order}(${atomic}_t *v, ${int} new)
|
||||
{
|
||||
return arch_xchg${order}(&v->counter, new);
|
||||
return raw_xchg${order}(&v->counter, new);
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -17,19 +17,12 @@ gen_template_fallback()
|
|||
local atomic="$1"; shift
|
||||
local int="$1"; shift
|
||||
|
||||
local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
|
||||
|
||||
local ret="$(gen_ret_type "${meta}" "${int}")"
|
||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||
local params="$(gen_params "${int}" "${atomic}" "$@")"
|
||||
local args="$(gen_args "$@")"
|
||||
|
||||
if [ ! -z "${template}" ]; then
|
||||
printf "#ifndef ${atomicname}\n"
|
||||
. ${template}
|
||||
printf "#define ${atomicname} ${atomicname}\n"
|
||||
printf "#endif\n\n"
|
||||
fi
|
||||
. ${template}
|
||||
}
|
||||
|
||||
#gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
|
||||
|
@ -59,17 +52,59 @@ gen_proto_fallback()
|
|||
gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
|
||||
}
|
||||
|
||||
#gen_basic_fallbacks(basename)
|
||||
gen_basic_fallbacks()
|
||||
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, args...)
|
||||
gen_proto_order_variant()
|
||||
{
|
||||
local basename="$1"; shift
|
||||
cat << EOF
|
||||
#define ${basename}_acquire ${basename}
|
||||
#define ${basename}_release ${basename}
|
||||
#define ${basename}_relaxed ${basename}
|
||||
EOF
|
||||
local meta="$1"; shift
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local order="$1"; shift
|
||||
local atomic="$1"
|
||||
|
||||
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
|
||||
local basename="${atomic}_${pfx}${name}${sfx}"
|
||||
|
||||
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
|
||||
|
||||
# Where there is no possible fallback, this order variant is mandatory
|
||||
# and must be provided by arch code. Add a comment to the header to
|
||||
# make this obvious.
|
||||
#
|
||||
# Ideally we'd error on a missing definition, but arch code might
|
||||
# define this order variant as a C function without a preprocessor
|
||||
# symbol.
|
||||
if [ -z ${template} ] && [ -z "${order}" ] && ! meta_has_relaxed "${meta}"; then
|
||||
printf "#define raw_${atomicname} arch_${atomicname}\n\n"
|
||||
return
|
||||
fi
|
||||
|
||||
printf "#if defined(arch_${atomicname})\n"
|
||||
printf "#define raw_${atomicname} arch_${atomicname}\n"
|
||||
|
||||
# Allow FULL/ACQUIRE/RELEASE ops to be defined in terms of RELAXED ops
|
||||
if [ "${order}" != "_relaxed" ] && meta_has_relaxed "${meta}"; then
|
||||
printf "#elif defined(arch_${basename}_relaxed)\n"
|
||||
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
|
||||
fi
|
||||
|
||||
# Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops
|
||||
if [ ! -z "${order}" ]; then
|
||||
printf "#elif defined(arch_${basename})\n"
|
||||
printf "#define raw_${atomicname} arch_${basename}\n"
|
||||
fi
|
||||
|
||||
printf "#else\n"
|
||||
if [ ! -z "${template}" ]; then
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
|
||||
else
|
||||
printf "#error \"Unable to define raw_${atomicname}\"\n"
|
||||
fi
|
||||
|
||||
printf "#endif\n\n"
|
||||
}
|
||||
|
||||
|
||||
#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
|
||||
gen_proto_order_variants()
|
||||
{
|
||||
|
@ -79,49 +114,30 @@ gen_proto_order_variants()
|
|||
local sfx="$1"; shift
|
||||
local atomic="$1"
|
||||
|
||||
local basename="arch_${atomic}_${pfx}${name}${sfx}"
|
||||
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
|
||||
|
||||
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "")"
|
||||
|
||||
# If we don't have relaxed atomics, then we don't bother with ordering fallbacks
|
||||
# read_acquire and set_release need to be templated, though
|
||||
if ! meta_has_relaxed "${meta}"; then
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
|
||||
|
||||
if meta_has_acquire "${meta}"; then
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
|
||||
fi
|
||||
|
||||
if meta_has_release "${meta}"; then
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
|
||||
fi
|
||||
|
||||
return
|
||||
if meta_has_acquire "${meta}"; then
|
||||
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
|
||||
fi
|
||||
|
||||
printf "#ifndef ${basename}_relaxed\n"
|
||||
|
||||
if [ ! -z "${template}" ]; then
|
||||
printf "#ifdef ${basename}\n"
|
||||
if meta_has_release "${meta}"; then
|
||||
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
|
||||
fi
|
||||
|
||||
gen_basic_fallbacks "${basename}"
|
||||
|
||||
if [ ! -z "${template}" ]; then
|
||||
printf "#endif /* ${basename} */\n\n"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
|
||||
if meta_has_relaxed "${meta}"; then
|
||||
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
printf "#else /* ${basename}_relaxed */\n\n"
|
||||
|
||||
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
|
||||
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
|
||||
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
|
||||
|
||||
printf "#endif /* ${basename}_relaxed */\n\n"
|
||||
#gen_basic_fallbacks(basename)
|
||||
gen_basic_fallbacks()
|
||||
{
|
||||
local basename="$1"; shift
|
||||
cat << EOF
|
||||
#define raw_${basename}_acquire arch_${basename}
|
||||
#define raw_${basename}_release arch_${basename}
|
||||
#define raw_${basename}_relaxed arch_${basename}
|
||||
EOF
|
||||
}
|
||||
|
||||
gen_order_fallbacks()
|
||||
|
@ -130,36 +146,65 @@ gen_order_fallbacks()
|
|||
|
||||
cat <<EOF
|
||||
|
||||
#ifndef ${xchg}_acquire
|
||||
#define ${xchg}_acquire(...) \\
|
||||
__atomic_op_acquire(${xchg}, __VA_ARGS__)
|
||||
#define raw_${xchg}_relaxed arch_${xchg}_relaxed
|
||||
|
||||
#ifdef arch_${xchg}_acquire
|
||||
#define raw_${xchg}_acquire arch_${xchg}_acquire
|
||||
#else
|
||||
#define raw_${xchg}_acquire(...) \\
|
||||
__atomic_op_acquire(arch_${xchg}, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#ifndef ${xchg}_release
|
||||
#define ${xchg}_release(...) \\
|
||||
__atomic_op_release(${xchg}, __VA_ARGS__)
|
||||
#ifdef arch_${xchg}_release
|
||||
#define raw_${xchg}_release arch_${xchg}_release
|
||||
#else
|
||||
#define raw_${xchg}_release(...) \\
|
||||
__atomic_op_release(arch_${xchg}, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#ifndef ${xchg}
|
||||
#define ${xchg}(...) \\
|
||||
__atomic_op_fence(${xchg}, __VA_ARGS__)
|
||||
#ifdef arch_${xchg}
|
||||
#define raw_${xchg} arch_${xchg}
|
||||
#else
|
||||
#define raw_${xchg}(...) \\
|
||||
__atomic_op_fence(arch_${xchg}, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
gen_xchg_order_fallback()
|
||||
{
|
||||
local xchg="$1"; shift
|
||||
local order="$1"; shift
|
||||
local forder="${order:-_fence}"
|
||||
|
||||
printf "#if defined(arch_${xchg}${order})\n"
|
||||
printf "#define raw_${xchg}${order} arch_${xchg}${order}\n"
|
||||
|
||||
if [ "${order}" != "_relaxed" ]; then
|
||||
printf "#elif defined(arch_${xchg}_relaxed)\n"
|
||||
printf "#define raw_${xchg}${order}(...) \\\\\n"
|
||||
printf " __atomic_op${forder}(arch_${xchg}, __VA_ARGS__)\n"
|
||||
fi
|
||||
|
||||
if [ ! -z "${order}" ]; then
|
||||
printf "#elif defined(arch_${xchg})\n"
|
||||
printf "#define raw_${xchg}${order} arch_${xchg}\n"
|
||||
fi
|
||||
|
||||
printf "#else\n"
|
||||
printf "extern void raw_${xchg}${order}_not_implemented(void);\n"
|
||||
printf "#define raw_${xchg}${order}(...) raw_${xchg}${order}_not_implemented()\n"
|
||||
printf "#endif\n\n"
|
||||
}
|
||||
|
||||
gen_xchg_fallbacks()
|
||||
{
|
||||
local xchg="$1"; shift
|
||||
printf "#ifndef ${xchg}_relaxed\n"
|
||||
|
||||
gen_basic_fallbacks ${xchg}
|
||||
|
||||
printf "#else /* ${xchg}_relaxed */\n"
|
||||
|
||||
gen_order_fallbacks ${xchg}
|
||||
|
||||
printf "#endif /* ${xchg}_relaxed */\n\n"
|
||||
for order in "" "_acquire" "_release" "_relaxed"; do
|
||||
gen_xchg_order_fallback "${xchg}" "${order}"
|
||||
done
|
||||
}
|
||||
|
||||
gen_try_cmpxchg_fallback()
|
||||
|
@ -168,40 +213,61 @@ gen_try_cmpxchg_fallback()
|
|||
local order="$1"; shift;
|
||||
|
||||
cat <<EOF
|
||||
#ifndef arch_try_${cmpxchg}${order}
|
||||
#define arch_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
|
||||
#define raw_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
|
||||
({ \\
|
||||
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
|
||||
___r = arch_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
|
||||
___r = raw_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
|
||||
if (unlikely(___r != ___o)) \\
|
||||
*___op = ___r; \\
|
||||
likely(___r == ___o); \\
|
||||
})
|
||||
#endif /* arch_try_${cmpxchg}${order} */
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
gen_try_cmpxchg_order_fallback()
|
||||
{
|
||||
local cmpxchg="$1"; shift
|
||||
local order="$1"; shift
|
||||
local forder="${order:-_fence}"
|
||||
|
||||
printf "#if defined(arch_try_${cmpxchg}${order})\n"
|
||||
printf "#define raw_try_${cmpxchg}${order} arch_try_${cmpxchg}${order}\n"
|
||||
|
||||
if [ "${order}" != "_relaxed" ]; then
|
||||
printf "#elif defined(arch_try_${cmpxchg}_relaxed)\n"
|
||||
printf "#define raw_try_${cmpxchg}${order}(...) \\\\\n"
|
||||
printf " __atomic_op${forder}(arch_try_${cmpxchg}, __VA_ARGS__)\n"
|
||||
fi
|
||||
|
||||
if [ ! -z "${order}" ]; then
|
||||
printf "#elif defined(arch_try_${cmpxchg})\n"
|
||||
printf "#define raw_try_${cmpxchg}${order} arch_try_${cmpxchg}\n"
|
||||
fi
|
||||
|
||||
printf "#else\n"
|
||||
gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
|
||||
printf "#endif\n\n"
|
||||
}
|
||||
|
||||
gen_try_cmpxchg_fallbacks()
|
||||
{
|
||||
local cmpxchg="$1"; shift;
|
||||
|
||||
printf "#ifndef arch_try_${cmpxchg}_relaxed\n"
|
||||
printf "#ifdef arch_try_${cmpxchg}\n"
|
||||
|
||||
gen_basic_fallbacks "arch_try_${cmpxchg}"
|
||||
|
||||
printf "#endif /* arch_try_${cmpxchg} */\n\n"
|
||||
|
||||
for order in "" "_acquire" "_release" "_relaxed"; do
|
||||
gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
|
||||
gen_try_cmpxchg_order_fallback "${cmpxchg}" "${order}"
|
||||
done
|
||||
}
|
||||
|
||||
printf "#else /* arch_try_${cmpxchg}_relaxed */\n"
|
||||
gen_cmpxchg_local_fallbacks()
|
||||
{
|
||||
local cmpxchg="$1"; shift
|
||||
|
||||
gen_order_fallbacks "arch_try_${cmpxchg}"
|
||||
|
||||
printf "#endif /* arch_try_${cmpxchg}_relaxed */\n\n"
|
||||
printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n"
|
||||
printf "#ifdef arch_try_${cmpxchg}\n"
|
||||
printf "#define raw_try_${cmpxchg} arch_try_${cmpxchg}\n"
|
||||
printf "#else\n"
|
||||
gen_try_cmpxchg_fallback "${cmpxchg}" ""
|
||||
printf "#endif\n\n"
|
||||
}
|
||||
|
||||
cat << EOF
|
||||
|
@ -217,7 +283,7 @@ cat << EOF
|
|||
|
||||
EOF
|
||||
|
||||
for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64" "arch_cmpxchg128"; do
|
||||
for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128"; do
|
||||
gen_xchg_fallbacks "${xchg}"
|
||||
done
|
||||
|
||||
|
@ -225,8 +291,12 @@ for cmpxchg in "cmpxchg" "cmpxchg64" "cmpxchg128"; do
|
|||
gen_try_cmpxchg_fallbacks "${cmpxchg}"
|
||||
done
|
||||
|
||||
for cmpxchg in "cmpxchg_local" "cmpxchg64_local"; do
|
||||
gen_try_cmpxchg_fallback "${cmpxchg}" ""
|
||||
for cmpxchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local"; do
|
||||
gen_cmpxchg_local_fallbacks "${cmpxchg}" ""
|
||||
done
|
||||
|
||||
for cmpxchg in "sync_cmpxchg"; do
|
||||
printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n"
|
||||
done
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
|
|
|
@ -1,80 +0,0 @@
|
|||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
ATOMICDIR=$(dirname $0)
|
||||
|
||||
. ${ATOMICDIR}/atomic-tbl.sh
|
||||
|
||||
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
|
||||
gen_proto_order_variant()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local order="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
local int="$1"; shift
|
||||
|
||||
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
|
||||
|
||||
local ret="$(gen_ret_type "${meta}" "${int}")"
|
||||
local params="$(gen_params "${int}" "${atomic}" "$@")"
|
||||
local args="$(gen_args "$@")"
|
||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomicname}(${params})
|
||||
{
|
||||
${retstmt}arch_${atomicname}(${args});
|
||||
}
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
gen_xchg()
|
||||
{
|
||||
local xchg="$1"; shift
|
||||
local order="$1"; shift
|
||||
|
||||
cat <<EOF
|
||||
#define raw_${xchg}${order}(...) \\
|
||||
arch_${xchg}${order}(__VA_ARGS__)
|
||||
EOF
|
||||
}
|
||||
|
||||
cat << EOF
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
// Generated by $0
|
||||
// DO NOT MODIFY THIS FILE DIRECTLY
|
||||
|
||||
#ifndef _LINUX_ATOMIC_RAW_H
|
||||
#define _LINUX_ATOMIC_RAW_H
|
||||
|
||||
EOF
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
|
||||
done
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
|
||||
done
|
||||
|
||||
for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do
|
||||
for order in "" "_acquire" "_release" "_relaxed"; do
|
||||
gen_xchg "${xchg}" "${order}"
|
||||
printf "\n"
|
||||
done
|
||||
done
|
||||
|
||||
for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local"; do
|
||||
gen_xchg "${xchg}" ""
|
||||
printf "\n"
|
||||
done
|
||||
|
||||
cat <<EOF
|
||||
#endif /* _LINUX_ATOMIC_RAW_H */
|
||||
EOF
|
|
@ -11,7 +11,6 @@ cat <<EOF |
|
|||
gen-atomic-instrumented.sh linux/atomic/atomic-instrumented.h
|
||||
gen-atomic-long.sh linux/atomic/atomic-long.h
|
||||
gen-atomic-fallback.sh linux/atomic/atomic-arch-fallback.h
|
||||
gen-atomic-raw.sh linux/atomic/atomic-raw.h
|
||||
EOF
|
||||
while read script header args; do
|
||||
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
|
||||
|
|
Загрузка…
Ссылка в новой задаче