зеркало из https://github.com/microsoft/clang-1.git
reimplement __sync_* builtins to be variadic and to follow the same
semantic rules that gcc and icc use. This implements the variadic and concrete versions as builtins and has sema do the disambiguation. There are probably a bunch of details to finish up but this seems like a large monotonic step forward :) git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@71212 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Родитель
fb1e3310da
Коммит
5caa370ea6
|
@ -39,10 +39,11 @@
|
|||
// . -> "...". This may only occur at the end of the function list.
|
||||
//
|
||||
// Types maybe prefixed with the following modifiers:
|
||||
// L -> long (e.g. Li for 'long int')
|
||||
// LL -> long long
|
||||
// S -> signed
|
||||
// U -> unsigned
|
||||
// L -> long (e.g. Li for 'long int')
|
||||
// LL -> long long
|
||||
// LLL -> __int128_t (e.g. LLLi)
|
||||
// S -> signed
|
||||
// U -> unsigned
|
||||
//
|
||||
// Types may be postfixed with the following modifiers:
|
||||
// * -> pointer
|
||||
|
@ -196,37 +197,139 @@ BUILTIN(__builtin_shufflevector, "v." , "nc")
|
|||
|
||||
BUILTIN(__builtin_alloca, "v*z" , "n")
|
||||
|
||||
// Atomic operators builtin.
|
||||
// "Overloaded" Atomic operator builtins. These are overloaded to support data
|
||||
// types of i8, i16, i32, i64, and i128. The front-end sees calls to the
|
||||
// non-suffixed version of these (which has a bogus type) and transforms them to
|
||||
// the right overloaded version in Sema (plus casts).
|
||||
|
||||
// FIXME: These should be overloaded for i8, i16, i32, and i64.
|
||||
BUILTIN(__sync_fetch_and_add,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_sub,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_or,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_and,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_xor,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_nand,"ii*i", "n")
|
||||
// FIXME: These assume that char -> i8, short -> i16, int -> i32,
|
||||
// long long -> i64.
|
||||
|
||||
// GCC does not support these, they are a Clang extension.
|
||||
BUILTIN(__sync_fetch_and_min,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_max,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_umin,"UiUi*Ui", "n")
|
||||
BUILTIN(__sync_fetch_and_umax,"UiUi*Ui", "n")
|
||||
BUILTIN(__sync_fetch_and_add, "v.", "")
|
||||
BUILTIN(__sync_fetch_and_add_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_fetch_and_add_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_fetch_and_add_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_fetch_and_add_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_fetch_and_add_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_add_and_fetch,"ii*i", "n")
|
||||
BUILTIN(__sync_sub_and_fetch,"ii*i", "n")
|
||||
BUILTIN(__sync_or_and_fetch,"ii*i", "n")
|
||||
BUILTIN(__sync_and_and_fetch,"ii*i", "n")
|
||||
BUILTIN(__sync_xor_and_fetch,"ii*i", "n")
|
||||
BUILTIN(__sync_nand_and_fetch,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_sub, "v.", "")
|
||||
BUILTIN(__sync_fetch_and_sub_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_fetch_and_sub_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_fetch_and_sub_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_fetch_and_sub_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_fetch_and_sub_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_bool_compare_and_swap,"ii*ii", "n")
|
||||
BUILTIN(__sync_val_compare_and_swap,"ii*ii", "n")
|
||||
BUILTIN(__sync_lock_test_and_set,"ii*i", "n")
|
||||
BUILTIN(__sync_lock_release,"ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_or, "v.", "")
|
||||
BUILTIN(__sync_fetch_and_or_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_fetch_and_or_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_fetch_and_or_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_fetch_and_or_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_fetch_and_or_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_fetch_and_and, "v.", "")
|
||||
BUILTIN(__sync_fetch_and_and_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_fetch_and_and_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_fetch_and_and_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_fetch_and_and_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_fetch_and_and_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_fetch_and_xor, "v.", "")
|
||||
BUILTIN(__sync_fetch_and_xor_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_fetch_and_xor_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_fetch_and_xor_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_fetch_and_xor_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_fetch_and_xor_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_fetch_and_nand, "v.", "")
|
||||
BUILTIN(__sync_fetch_and_nand_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_fetch_and_nand_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_fetch_and_nand_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_fetch_and_nand_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_fetch_and_nand_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
|
||||
BUILTIN(__sync_add_and_fetch, "v.", "")
|
||||
BUILTIN(__sync_add_and_fetch_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_add_and_fetch_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_add_and_fetch_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_add_and_fetch_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_add_and_fetch_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_sub_and_fetch, "v.", "")
|
||||
BUILTIN(__sync_sub_and_fetch_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_sub_and_fetch_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_sub_and_fetch_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_sub_and_fetch_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_sub_and_fetch_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_or_and_fetch, "v.", "")
|
||||
BUILTIN(__sync_or_and_fetch_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_or_and_fetch_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_or_and_fetch_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_or_and_fetch_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_or_and_fetch_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_and_and_fetch, "v.", "")
|
||||
BUILTIN(__sync_and_and_fetch_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_and_and_fetch_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_and_and_fetch_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_and_and_fetch_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_and_and_fetch_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_xor_and_fetch, "v.", "")
|
||||
BUILTIN(__sync_xor_and_fetch_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_xor_and_fetch_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_xor_and_fetch_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_xor_and_fetch_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_xor_and_fetch_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_nand_and_fetch, "v.", "")
|
||||
BUILTIN(__sync_nand_and_fetch_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_nand_and_fetch_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_nand_and_fetch_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_nand_and_fetch_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_nand_and_fetch_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
|
||||
BUILTIN(__sync_bool_compare_and_swap, "v.", "")
|
||||
BUILTIN(__sync_bool_compare_and_swap_1, "bc*cc.", "n")
|
||||
BUILTIN(__sync_bool_compare_and_swap_2, "bs*ss.", "n")
|
||||
BUILTIN(__sync_bool_compare_and_swap_4, "bi*ii.", "n")
|
||||
BUILTIN(__sync_bool_compare_and_swap_8, "bLLi*LLi.", "n")
|
||||
BUILTIN(__sync_bool_compare_and_swap_16, "bLLLi*LLLiLLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_val_compare_and_swap, "v.", "")
|
||||
BUILTIN(__sync_val_compare_and_swap_1, "cc*cc.", "n")
|
||||
BUILTIN(__sync_val_compare_and_swap_2, "ss*ss.", "n")
|
||||
BUILTIN(__sync_val_compare_and_swap_4, "ii*ii.", "n")
|
||||
BUILTIN(__sync_val_compare_and_swap_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_val_compare_and_swap_16, "LLLiLLLi*LLLiLLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_lock_test_and_set, "v.", "")
|
||||
BUILTIN(__sync_lock_test_and_set_1, "cc*c.", "n")
|
||||
BUILTIN(__sync_lock_test_and_set_2, "ss*s.", "n")
|
||||
BUILTIN(__sync_lock_test_and_set_4, "ii*i.", "n")
|
||||
BUILTIN(__sync_lock_test_and_set_8, "LLiLLi*LLi.", "n")
|
||||
BUILTIN(__sync_lock_test_and_set_16, "LLLiLLLi*LLLi.", "n")
|
||||
|
||||
BUILTIN(__sync_lock_release, "v.", "")
|
||||
BUILTIN(__sync_lock_release_1, "vc*.", "n")
|
||||
BUILTIN(__sync_lock_release_2, "vs*.", "n")
|
||||
BUILTIN(__sync_lock_release_4, "vi*.", "n")
|
||||
BUILTIN(__sync_lock_release_8, "vLLi*.", "n")
|
||||
BUILTIN(__sync_lock_release_16, "vLLLi*.", "n")
|
||||
|
||||
|
||||
|
||||
// Non-overloaded atomic builtins.
|
||||
BUILTIN(__sync_synchronize, "v.", "n")
|
||||
|
||||
// LLVM instruction builtin
|
||||
// LLVM instruction builtin [Clang extension].
|
||||
BUILTIN(__builtin_llvm_memory_barrier,"vbbbbb", "n")
|
||||
// GCC does not support these, they are a Clang extension.
|
||||
BUILTIN(__sync_fetch_and_min, "ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_max, "ii*i", "n")
|
||||
BUILTIN(__sync_fetch_and_umin, "UiUi*Ui", "n")
|
||||
BUILTIN(__sync_fetch_and_umax, "UiUi*Ui", "n")
|
||||
|
||||
// Builtin library functions
|
||||
LIBBUILTIN(alloca, "v*z", "f", "stdlib.h")
|
||||
|
|
|
@ -1310,6 +1310,16 @@ def err_typecheck_call_too_many_args : Error<
|
|||
"too many arguments to %select{function|block|method}0 call">;
|
||||
def warn_call_wrong_number_of_arguments : Warning<
|
||||
"too %select{few|many}0 arguments in call to %1">;
|
||||
def err_atomic_builtin_must_be_pointer : Error<
|
||||
"first argument to atomic builtin must be a pointer (%0 invalid)">;
|
||||
def err_atomic_builtin_must_be_pointer_intptr : Error<
|
||||
"first argument to atomic builtin must be a pointer to integer or pointer"
|
||||
" (%0 invalid)">;
|
||||
def err_atomic_builtin_pointer_size : Error<
|
||||
"first argument to atomic builtin must be a pointer to 1,2,4,8 or 16 byte "
|
||||
"type (%0 invalid)">;
|
||||
|
||||
|
||||
def err_deleted_function_use : Error<"attempt to use a deleted function">;
|
||||
|
||||
def err_cannot_pass_objc_interface_to_vararg : Error<
|
||||
|
|
|
@ -315,9 +315,52 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
|
|||
return RValue::get(EmitScalarExpr(E->getArg(0)));
|
||||
}
|
||||
case Builtin::BI__sync_fetch_and_add:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
|
||||
case Builtin::BI__sync_fetch_and_sub:
|
||||
case Builtin::BI__sync_fetch_and_or:
|
||||
case Builtin::BI__sync_fetch_and_and:
|
||||
case Builtin::BI__sync_fetch_and_xor:
|
||||
case Builtin::BI__sync_add_and_fetch:
|
||||
case Builtin::BI__sync_sub_and_fetch:
|
||||
case Builtin::BI__sync_and_and_fetch:
|
||||
case Builtin::BI__sync_or_and_fetch:
|
||||
case Builtin::BI__sync_xor_and_fetch:
|
||||
case Builtin::BI__sync_val_compare_and_swap:
|
||||
case Builtin::BI__sync_bool_compare_and_swap:
|
||||
case Builtin::BI__sync_lock_test_and_set:
|
||||
case Builtin::BI__sync_lock_release:
|
||||
assert(0 && "Shouldn't make it through sema");
|
||||
case Builtin::BI__sync_fetch_and_add_1:
|
||||
case Builtin::BI__sync_fetch_and_add_2:
|
||||
case Builtin::BI__sync_fetch_and_add_4:
|
||||
case Builtin::BI__sync_fetch_and_add_8:
|
||||
case Builtin::BI__sync_fetch_and_add_16:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
|
||||
case Builtin::BI__sync_fetch_and_sub_1:
|
||||
case Builtin::BI__sync_fetch_and_sub_2:
|
||||
case Builtin::BI__sync_fetch_and_sub_4:
|
||||
case Builtin::BI__sync_fetch_and_sub_8:
|
||||
case Builtin::BI__sync_fetch_and_sub_16:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
|
||||
case Builtin::BI__sync_fetch_and_or_1:
|
||||
case Builtin::BI__sync_fetch_and_or_2:
|
||||
case Builtin::BI__sync_fetch_and_or_4:
|
||||
case Builtin::BI__sync_fetch_and_or_8:
|
||||
case Builtin::BI__sync_fetch_and_or_16:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
|
||||
case Builtin::BI__sync_fetch_and_and_1:
|
||||
case Builtin::BI__sync_fetch_and_and_2:
|
||||
case Builtin::BI__sync_fetch_and_and_4:
|
||||
case Builtin::BI__sync_fetch_and_and_8:
|
||||
case Builtin::BI__sync_fetch_and_and_16:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
|
||||
case Builtin::BI__sync_fetch_and_xor_1:
|
||||
case Builtin::BI__sync_fetch_and_xor_2:
|
||||
case Builtin::BI__sync_fetch_and_xor_4:
|
||||
case Builtin::BI__sync_fetch_and_xor_8:
|
||||
case Builtin::BI__sync_fetch_and_xor_16:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
|
||||
|
||||
// Clang extensions: not overloaded yet.
|
||||
case Builtin::BI__sync_fetch_and_min:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
|
||||
case Builtin::BI__sync_fetch_and_max:
|
||||
|
@ -326,30 +369,49 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
|
|||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
|
||||
case Builtin::BI__sync_fetch_and_umax:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
|
||||
case Builtin::BI__sync_fetch_and_and:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
|
||||
case Builtin::BI__sync_fetch_and_or:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
|
||||
case Builtin::BI__sync_fetch_and_xor:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
|
||||
|
||||
case Builtin::BI__sync_add_and_fetch:
|
||||
case Builtin::BI__sync_add_and_fetch_1:
|
||||
case Builtin::BI__sync_add_and_fetch_2:
|
||||
case Builtin::BI__sync_add_and_fetch_4:
|
||||
case Builtin::BI__sync_add_and_fetch_8:
|
||||
case Builtin::BI__sync_add_and_fetch_16:
|
||||
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
|
||||
llvm::Instruction::Add);
|
||||
case Builtin::BI__sync_sub_and_fetch:
|
||||
case Builtin::BI__sync_sub_and_fetch_1:
|
||||
case Builtin::BI__sync_sub_and_fetch_2:
|
||||
case Builtin::BI__sync_sub_and_fetch_4:
|
||||
case Builtin::BI__sync_sub_and_fetch_8:
|
||||
case Builtin::BI__sync_sub_and_fetch_16:
|
||||
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
|
||||
llvm::Instruction::Sub);
|
||||
case Builtin::BI__sync_and_and_fetch:
|
||||
case Builtin::BI__sync_and_and_fetch_1:
|
||||
case Builtin::BI__sync_and_and_fetch_2:
|
||||
case Builtin::BI__sync_and_and_fetch_4:
|
||||
case Builtin::BI__sync_and_and_fetch_8:
|
||||
case Builtin::BI__sync_and_and_fetch_16:
|
||||
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
|
||||
llvm::Instruction::And);
|
||||
case Builtin::BI__sync_or_and_fetch:
|
||||
case Builtin::BI__sync_or_and_fetch_1:
|
||||
case Builtin::BI__sync_or_and_fetch_2:
|
||||
case Builtin::BI__sync_or_and_fetch_4:
|
||||
case Builtin::BI__sync_or_and_fetch_8:
|
||||
case Builtin::BI__sync_or_and_fetch_16:
|
||||
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
|
||||
llvm::Instruction::Or);
|
||||
case Builtin::BI__sync_xor_and_fetch:
|
||||
case Builtin::BI__sync_xor_and_fetch_1:
|
||||
case Builtin::BI__sync_xor_and_fetch_2:
|
||||
case Builtin::BI__sync_xor_and_fetch_4:
|
||||
case Builtin::BI__sync_xor_and_fetch_8:
|
||||
case Builtin::BI__sync_xor_and_fetch_16:
|
||||
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
|
||||
llvm::Instruction::Xor);
|
||||
|
||||
case Builtin::BI__sync_val_compare_and_swap: {
|
||||
case Builtin::BI__sync_val_compare_and_swap_1:
|
||||
case Builtin::BI__sync_val_compare_and_swap_2:
|
||||
case Builtin::BI__sync_val_compare_and_swap_4:
|
||||
case Builtin::BI__sync_val_compare_and_swap_8:
|
||||
case Builtin::BI__sync_val_compare_and_swap_16:
|
||||
{
|
||||
const llvm::Type *ResType[2];
|
||||
ResType[0]= ConvertType(E->getType());
|
||||
ResType[1] = ConvertType(E->getArg(0)->getType());
|
||||
|
@ -360,10 +422,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
|
|||
EmitScalarExpr(E->getArg(2))));
|
||||
}
|
||||
|
||||
case Builtin::BI__sync_bool_compare_and_swap: {
|
||||
case Builtin::BI__sync_bool_compare_and_swap_1:
|
||||
case Builtin::BI__sync_bool_compare_and_swap_2:
|
||||
case Builtin::BI__sync_bool_compare_and_swap_4:
|
||||
case Builtin::BI__sync_bool_compare_and_swap_8:
|
||||
case Builtin::BI__sync_bool_compare_and_swap_16:
|
||||
{
|
||||
const llvm::Type *ResType[2];
|
||||
ResType[0]= ConvertType(E->getType());
|
||||
ResType[1] = ConvertType(E->getArg(0)->getType());
|
||||
ResType[0]= ConvertType(E->getArg(1)->getType());
|
||||
ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
|
||||
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
|
||||
Value *OldVal = EmitScalarExpr(E->getArg(1));
|
||||
Value *PrevVal = Builder.CreateCall3(AtomF,
|
||||
|
@ -375,9 +442,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
|
|||
return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
|
||||
}
|
||||
|
||||
case Builtin::BI__sync_lock_test_and_set:
|
||||
case Builtin::BI__sync_lock_test_and_set_1:
|
||||
case Builtin::BI__sync_lock_test_and_set_2:
|
||||
case Builtin::BI__sync_lock_test_and_set_4:
|
||||
case Builtin::BI__sync_lock_test_and_set_8:
|
||||
case Builtin::BI__sync_lock_test_and_set_16:
|
||||
return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
|
||||
|
||||
case Builtin::BI__sync_lock_release_1:
|
||||
case Builtin::BI__sync_lock_release_2:
|
||||
case Builtin::BI__sync_lock_release_4:
|
||||
case Builtin::BI__sync_lock_release_8:
|
||||
case Builtin::BI__sync_lock_release_16:
|
||||
assert(0 && "FIXME: Implement");
|
||||
|
||||
// Library functions with special handling.
|
||||
|
||||
|
|
|
@ -2590,6 +2590,7 @@ private:
|
|||
bool SemaBuiltinPrefetch(CallExpr *TheCall);
|
||||
bool SemaBuiltinObjectSize(CallExpr *TheCall);
|
||||
bool SemaBuiltinLongjmp(CallExpr *TheCall);
|
||||
bool SemaBuiltinAtomicOverloaded(CallExpr *TheCall);
|
||||
bool SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
|
||||
bool HasVAListArg, unsigned format_idx,
|
||||
unsigned firstDataArg);
|
||||
|
|
|
@ -139,6 +139,23 @@ Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
|
|||
if (SemaBuiltinLongjmp(TheCall))
|
||||
return ExprError();
|
||||
return move(TheCallResult);
|
||||
case Builtin::BI__sync_fetch_and_add:
|
||||
case Builtin::BI__sync_fetch_and_sub:
|
||||
case Builtin::BI__sync_fetch_and_or:
|
||||
case Builtin::BI__sync_fetch_and_and:
|
||||
case Builtin::BI__sync_fetch_and_xor:
|
||||
case Builtin::BI__sync_add_and_fetch:
|
||||
case Builtin::BI__sync_sub_and_fetch:
|
||||
case Builtin::BI__sync_and_and_fetch:
|
||||
case Builtin::BI__sync_or_and_fetch:
|
||||
case Builtin::BI__sync_xor_and_fetch:
|
||||
case Builtin::BI__sync_val_compare_and_swap:
|
||||
case Builtin::BI__sync_bool_compare_and_swap:
|
||||
case Builtin::BI__sync_lock_test_and_set:
|
||||
case Builtin::BI__sync_lock_release:
|
||||
if (SemaBuiltinAtomicOverloaded(TheCall))
|
||||
return ExprError();
|
||||
return move(TheCallResult);
|
||||
}
|
||||
|
||||
// FIXME: This mechanism should be abstracted to be less fragile and
|
||||
|
@ -162,6 +179,173 @@ Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
|
|||
return move(TheCallResult);
|
||||
}
|
||||
|
||||
/// SemaBuiltinAtomicOverloaded - We have a call to a function like
|
||||
/// __sync_fetch_and_add, which is an overloaded function based on the pointer
|
||||
/// type of its first argument. The main ActOnCallExpr routines have already
|
||||
/// promoted the types of arguments because all of these calls are prototyped as
|
||||
/// void(...).
|
||||
///
|
||||
/// This function goes through and does final semantic checking for these
|
||||
/// builtins,
|
||||
bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
|
||||
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
|
||||
FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
|
||||
|
||||
// Ensure that we have at least one argument to do type inference from.
|
||||
if (TheCall->getNumArgs() < 1)
|
||||
return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
|
||||
<< 0 << TheCall->getCallee()->getSourceRange();
|
||||
|
||||
// Inspect the first argument of the atomic builtin. This should always be
|
||||
// a pointer type, whose element is an integral scalar or pointer type.
|
||||
// Because it is a pointer type, we don't have to worry about any implicit
|
||||
// casts here.
|
||||
Expr *FirstArg = TheCall->getArg(0);
|
||||
if (!FirstArg->getType()->isPointerType())
|
||||
return Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
|
||||
<< FirstArg->getType() << FirstArg->getSourceRange();
|
||||
|
||||
QualType ValType = FirstArg->getType()->getAsPointerType()->getPointeeType();
|
||||
if (!ValType->isIntegerType() && !ValType->isPointerType() &&
|
||||
!ValType->isBlockPointerType())
|
||||
return Diag(DRE->getLocStart(),
|
||||
diag::err_atomic_builtin_must_be_pointer_intptr)
|
||||
<< FirstArg->getType() << FirstArg->getSourceRange();
|
||||
|
||||
// We need to figure out which concrete builtin this maps onto. For example,
|
||||
// __sync_fetch_and_add with a 2 byte object turns into
|
||||
// __sync_fetch_and_add_2.
|
||||
#define BUILTIN_ROW(x) \
|
||||
{ Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
|
||||
Builtin::BI##x##_8, Builtin::BI##x##_16 }
|
||||
|
||||
static const unsigned BuiltinIndices[][5] = {
|
||||
BUILTIN_ROW(__sync_fetch_and_add),
|
||||
BUILTIN_ROW(__sync_fetch_and_sub),
|
||||
BUILTIN_ROW(__sync_fetch_and_or),
|
||||
BUILTIN_ROW(__sync_fetch_and_and),
|
||||
BUILTIN_ROW(__sync_fetch_and_xor),
|
||||
|
||||
BUILTIN_ROW(__sync_add_and_fetch),
|
||||
BUILTIN_ROW(__sync_sub_and_fetch),
|
||||
BUILTIN_ROW(__sync_and_and_fetch),
|
||||
BUILTIN_ROW(__sync_or_and_fetch),
|
||||
BUILTIN_ROW(__sync_xor_and_fetch),
|
||||
|
||||
BUILTIN_ROW(__sync_val_compare_and_swap),
|
||||
BUILTIN_ROW(__sync_bool_compare_and_swap),
|
||||
BUILTIN_ROW(__sync_lock_test_and_set),
|
||||
BUILTIN_ROW(__sync_lock_release)
|
||||
};
|
||||
#undef BUILTIN_ROW
|
||||
|
||||
// Determine the index of the size.
|
||||
unsigned SizeIndex;
|
||||
switch (Context.getTypeSize(ValType)/8) {
|
||||
case 1: SizeIndex = 0; break;
|
||||
case 2: SizeIndex = 1; break;
|
||||
case 4: SizeIndex = 2; break;
|
||||
case 8: SizeIndex = 3; break;
|
||||
case 16: SizeIndex = 4; break;
|
||||
default:
|
||||
return Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
|
||||
<< FirstArg->getType() << FirstArg->getSourceRange();
|
||||
}
|
||||
|
||||
// Each of these builtins has one pointer argument, followed by some number of
|
||||
// values (0, 1 or 2) followed by a potentially empty varags list of stuff
|
||||
// that we ignore. Find out which row of BuiltinIndices to read from as well
|
||||
// as the number of fixed args.
|
||||
unsigned BuiltinID = FDecl->getBuiltinID(Context);
|
||||
unsigned BuiltinIndex, NumFixed = 1;
|
||||
switch (BuiltinID) {
|
||||
default: assert(0 && "Unknown overloaded atomic builtin!");
|
||||
case Builtin::BI__sync_fetch_and_add: BuiltinIndex = 0; break;
|
||||
case Builtin::BI__sync_fetch_and_sub: BuiltinIndex = 1; break;
|
||||
case Builtin::BI__sync_fetch_and_or: BuiltinIndex = 2; break;
|
||||
case Builtin::BI__sync_fetch_and_and: BuiltinIndex = 3; break;
|
||||
case Builtin::BI__sync_fetch_and_xor: BuiltinIndex = 4; break;
|
||||
|
||||
case Builtin::BI__sync_add_and_fetch: BuiltinIndex = 5; break;
|
||||
case Builtin::BI__sync_sub_and_fetch: BuiltinIndex = 6; break;
|
||||
case Builtin::BI__sync_and_and_fetch: BuiltinIndex = 7; break;
|
||||
case Builtin::BI__sync_or_and_fetch: BuiltinIndex = 8; break;
|
||||
case Builtin::BI__sync_xor_and_fetch: BuiltinIndex = 9; break;
|
||||
|
||||
case Builtin::BI__sync_val_compare_and_swap:
|
||||
BuiltinIndex = 10;
|
||||
NumFixed = 2;
|
||||
break;
|
||||
case Builtin::BI__sync_bool_compare_and_swap:
|
||||
BuiltinIndex = 11;
|
||||
NumFixed = 2;
|
||||
break;
|
||||
case Builtin::BI__sync_lock_test_and_set: BuiltinIndex = 12; break;
|
||||
case Builtin::BI__sync_lock_release:
|
||||
BuiltinIndex = 13;
|
||||
NumFixed = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
// Now that we know how many fixed arguments we expect, first check that we
|
||||
// have at least that many.
|
||||
if (TheCall->getNumArgs() < 1+NumFixed)
|
||||
return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
|
||||
<< 0 << TheCall->getCallee()->getSourceRange();
|
||||
|
||||
// Next, walk the valid ones promoting to the right type.
|
||||
for (unsigned i = 0; i != NumFixed; ++i) {
|
||||
Expr *Arg = TheCall->getArg(i+1);
|
||||
|
||||
// If the argument is an implicit cast, then there was a promotion due to
|
||||
// "...", just remove it now.
|
||||
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
|
||||
Arg = ICE->getSubExpr();
|
||||
ICE->setSubExpr(0);
|
||||
ICE->Destroy(Context);
|
||||
TheCall->setArg(i+1, Arg);
|
||||
}
|
||||
|
||||
// GCC does an implicit conversion to the pointer or integer ValType. This
|
||||
// can fail in some cases (1i -> int**), check for this error case now.
|
||||
if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg))
|
||||
return true;
|
||||
|
||||
// Okay, we have something that *can* be converted to the right type. Check
|
||||
// to see if there is a potentially weird extension going on here. This can
|
||||
// happen when you do an atomic operation on something like an char* and
|
||||
// pass in 42. The 42 gets converted to char. This is even more strange
|
||||
// for things like 45.123 -> char, etc.
|
||||
// FIXME: Do this check.
|
||||
ImpCastExprToType(Arg, ValType, false);
|
||||
TheCall->setArg(i+1, Arg);
|
||||
}
|
||||
|
||||
// Okay, if we get here, everything is good. Get the decl for the concrete
|
||||
// builtin.
|
||||
unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
|
||||
const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
|
||||
IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName);
|
||||
FunctionDecl *NewBuiltinDecl =
|
||||
cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID,
|
||||
TUScope, false, DRE->getLocStart()));
|
||||
// Switch the DeclRefExpr to refer to the new decl.
|
||||
DRE->setDecl(NewBuiltinDecl);
|
||||
DRE->setType(NewBuiltinDecl->getType());
|
||||
|
||||
// Set the callee in the CallExpr.
|
||||
// FIXME: This leaks the original parens and implicit casts.
|
||||
Expr *PromotedCall = DRE;
|
||||
UsualUnaryConversions(PromotedCall);
|
||||
TheCall->setCallee(PromotedCall);
|
||||
|
||||
|
||||
// Change the result type of the call to match the result type of the decl.
|
||||
TheCall->setType(NewBuiltinDecl->getResultType());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/// CheckObjCString - Checks that the argument to the builtin
|
||||
/// CFString constructor is correct
|
||||
/// FIXME: GCC currently emits the following warning:
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
// RUN: clang-cc %s -emit-llvm -o - > %t1 &&
|
||||
// RUN: grep @llvm.atomic.load.add.i32 %t1 | count 3 &&
|
||||
// RUN: grep @llvm.atomic.load.sub.i32 %t1 | count 3 &&
|
||||
// RUN: grep @llvm.atomic.load.sub.i8 %t1 | count 2 &&
|
||||
// RUN: grep @llvm.atomic.load.min.i32 %t1 &&
|
||||
// RUN: grep @llvm.atomic.load.max.i32 %t1 &&
|
||||
// RUN: grep @llvm.atomic.load.umin.i32 %t1 &&
|
||||
// RUN: grep @llvm.atomic.load.umax.i32 %t1 &&
|
||||
// RUN: grep @llvm.atomic.swap.i32 %t1 &&
|
||||
// RUN: grep @llvm.atomic.cmp.swap.i32 %t1 | count 3 &&
|
||||
// RUN: grep @llvm.atomic.load.and.i32 %t1 | count 3 &&
|
||||
// RUN: grep @llvm.atomic.load.or.i32 %t1 | count 3 &&
|
||||
// RUN: grep @llvm.atomic.load.xor.i32 %t1 | count 3
|
||||
// RUN: grep @llvm.atomic.load.and.i32 %t1 | count 2 &&
|
||||
// RUN: grep @llvm.atomic.load.or.i8 %t1 &&
|
||||
// RUN: grep @llvm.atomic.load.xor.i8 %t1
|
||||
|
||||
|
||||
int atomic(void)
|
||||
|
@ -17,11 +17,12 @@ int atomic(void)
|
|||
// nonsenical test for sync functions
|
||||
int old;
|
||||
int val = 1;
|
||||
char valc = 1;
|
||||
unsigned int uval = 1;
|
||||
int cmp = 0;
|
||||
|
||||
old = __sync_fetch_and_add(&val, 1);
|
||||
old = __sync_fetch_and_sub(&val, 2);
|
||||
old = __sync_fetch_and_sub(&valc, 2);
|
||||
old = __sync_fetch_and_min(&val, 3);
|
||||
old = __sync_fetch_and_max(&val, 4);
|
||||
old = __sync_fetch_and_umin(&uval, 5u);
|
||||
|
@ -35,9 +36,9 @@ int atomic(void)
|
|||
|
||||
old = __sync_add_and_fetch(&val, 1);
|
||||
old = __sync_sub_and_fetch(&val, 2);
|
||||
old = __sync_and_and_fetch(&val, 3);
|
||||
old = __sync_or_and_fetch(&val, 4);
|
||||
old = __sync_xor_and_fetch(&val, 5);
|
||||
old = __sync_and_and_fetch(&valc, 3);
|
||||
old = __sync_or_and_fetch(&valc, 4);
|
||||
old = __sync_xor_and_fetch(&valc, 5);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ int test6(float a, long double b) {
|
|||
|
||||
|
||||
#define CFSTR __builtin___CFStringMakeConstantString
|
||||
void cfstring() {
|
||||
void test7() {
|
||||
CFSTR("\242");
|
||||
CFSTR("\0"); // expected-warning {{ CFString literal contains NUL character }}
|
||||
CFSTR(242); // expected-error {{ CFString literal is not a string constant }} expected-warning {{incompatible integer to pointer conversion}}
|
||||
|
@ -35,8 +35,18 @@ void cfstring() {
|
|||
typedef __attribute__(( ext_vector_type(16) )) unsigned char uchar16;
|
||||
|
||||
// rdar://5905347
|
||||
unsigned char foo( short v ) {
|
||||
unsigned char test8( short v ) {
|
||||
uchar16 c;
|
||||
return __builtin_ia32_vec_ext_v4si( c ); // expected-error {{too few arguments to function}}
|
||||
}
|
||||
|
||||
|
||||
// atomics.
|
||||
|
||||
unsigned char test9(short v) {
|
||||
unsigned i, old;
|
||||
|
||||
old = __sync_fetch_and_add(); // expected-error {{too few arguments to function call}}
|
||||
old = __sync_fetch_and_add(&old); // expected-error {{too few arguments to function call}}
|
||||
old = __sync_fetch_and_add((int**)0, 42i); // expected-error {{operand of type '_Complex int' cannot be cast to a pointer type}} expected-warning {{imaginary constants are an extension}}
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче