зеркало из https://github.com/microsoft/clang-1.git
Fix a bug in the emission of __real/__imag l-values on scalar operands.
Fix a bug in the emission of complex compound assignment l-values. Introduce a method to emit an expression whose value isn't relevant. Make that method evaluate its operand as an l-value if it is one. Fixes our volatile compliance in C++. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@120931 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Родитель
560bf12e97
Коммит
2a41637a99
|
@ -78,6 +78,16 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
|
|||
return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
|
||||
}
|
||||
|
||||
/// EmitIgnoredExpr - Emit code to compute the specified expression,
|
||||
/// ignoring the result.
|
||||
void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
|
||||
if (E->isRValue())
|
||||
return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
|
||||
|
||||
// Just emit it as an l-value and drop the result.
|
||||
EmitLValue(E);
|
||||
}
|
||||
|
||||
/// EmitAnyExpr - Emit code to compute the specified expression which
|
||||
/// can have any type. The result is returned as an RValue struct.
|
||||
/// If this is an aggregate expression, AggSlot indicates where the
|
||||
|
@ -500,7 +510,9 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
|
|||
case Expr::BinaryOperatorClass:
|
||||
return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
|
||||
case Expr::CompoundAssignOperatorClass:
|
||||
return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
|
||||
if (!E->getType()->isAnyComplexType())
|
||||
return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
|
||||
return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
|
||||
case Expr::CallExprClass:
|
||||
case Expr::CXXMemberCallExprClass:
|
||||
case Expr::CXXOperatorCallExprClass:
|
||||
|
@ -1227,9 +1239,22 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
|
|||
case UO_Real:
|
||||
case UO_Imag: {
|
||||
LValue LV = EmitLValue(E->getSubExpr());
|
||||
assert(LV.isSimple() && "real/imag on non-ordinary l-value");
|
||||
llvm::Value *Addr = LV.getAddress();
|
||||
|
||||
// real and imag are valid on scalars. This is a faster way of
|
||||
// testing that.
|
||||
if (!cast<llvm::PointerType>(Addr->getType())
|
||||
->getElementType()->isStructTy()) {
|
||||
assert(E->getSubExpr()->getType()->isArithmeticType());
|
||||
return LV;
|
||||
}
|
||||
|
||||
assert(E->getSubExpr()->getType()->isAnyComplexType());
|
||||
|
||||
unsigned Idx = E->getOpcode() == UO_Imag;
|
||||
return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
|
||||
Idx, "idx"),
|
||||
Idx, "idx"),
|
||||
ExprTy);
|
||||
}
|
||||
case UO_PreInc:
|
||||
|
@ -1914,7 +1939,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
|
|||
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
|
||||
// Comma expressions just emit their LHS then their RHS as an l-value.
|
||||
if (E->getOpcode() == BO_Comma) {
|
||||
EmitAnyExpr(E->getLHS());
|
||||
EmitIgnoredExpr(E->getLHS());
|
||||
EnsureInsertPoint();
|
||||
return EmitLValue(E->getRHS());
|
||||
}
|
||||
|
@ -1923,14 +1948,9 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
|
|||
E->getOpcode() == BO_PtrMemI)
|
||||
return EmitPointerToDataMemberBinaryExpr(E);
|
||||
|
||||
assert(E->isAssignmentOp() && "unexpected binary l-value");
|
||||
assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
|
||||
|
||||
if (!hasAggregateLLVMType(E->getType())) {
|
||||
if (E->isCompoundAssignmentOp())
|
||||
return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
|
||||
|
||||
assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
|
||||
|
||||
// Emit the LHS as an l-value.
|
||||
LValue LV = EmitLValue(E->getLHS());
|
||||
// Store the value through the l-value.
|
||||
|
@ -1941,9 +1961,6 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
|
|||
if (E->getType()->isAnyComplexType())
|
||||
return EmitComplexAssignmentLValue(E);
|
||||
|
||||
// The compound assignment operators are not used for aggregates.
|
||||
assert(E->getOpcode() == BO_Assign && "aggregate compound assignment?");
|
||||
|
||||
return EmitAggExprToLValue(E);
|
||||
}
|
||||
|
||||
|
|
|
@ -362,7 +362,7 @@ void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
|
|||
}
|
||||
|
||||
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
|
||||
CGF.EmitAnyExpr(E->getLHS(), AggValueSlot::ignored(), true);
|
||||
CGF.EmitIgnoredExpr(E->getLHS());
|
||||
Visit(E->getRHS());
|
||||
}
|
||||
|
||||
|
@ -547,7 +547,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
|
|||
CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
|
||||
false, Dest.isZeroed()));
|
||||
} else {
|
||||
CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
|
||||
CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -634,7 +634,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
|||
}
|
||||
|
||||
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
|
||||
CGF.EmitStmt(E->getLHS());
|
||||
CGF.EmitIgnoredExpr(E->getLHS());
|
||||
CGF.EnsureInsertPoint();
|
||||
return Visit(E->getRHS());
|
||||
}
|
||||
|
@ -764,14 +764,15 @@ ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
|
|||
}
|
||||
|
||||
LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
|
||||
assert(E->getOpcode() == BO_Assign);
|
||||
ComplexPairTy Val; // ignored
|
||||
return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
|
||||
}
|
||||
|
||||
LValue CodeGenFunction::
|
||||
EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
|
||||
ComplexPairTy(ComplexExprEmitter::*Op)(const ComplexExprEmitter::BinOpInfo &);
|
||||
|
||||
switch (E->getOpcode()) {
|
||||
case BO_Assign:
|
||||
return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
|
||||
|
||||
case BO_MulAssign: Op = &ComplexExprEmitter::EmitBinMul; break;
|
||||
case BO_DivAssign: Op = &ComplexExprEmitter::EmitBinDiv; break;
|
||||
case BO_SubAssign: Op = &ComplexExprEmitter::EmitBinSub; break;
|
||||
|
@ -782,6 +783,6 @@ LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
|
|||
Op = 0;
|
||||
}
|
||||
|
||||
return ComplexExprEmitter(*this).EmitCompoundAssignLValue(
|
||||
cast<CompoundAssignOperator>(E), Op, Val);
|
||||
ComplexPairTy Val; // ignored
|
||||
return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
|
||||
}
|
||||
|
|
|
@ -1139,10 +1139,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
|
|||
return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
|
||||
}
|
||||
case CK_ToVoid: {
|
||||
if (!E->isRValue())
|
||||
CGF.EmitLValue(E);
|
||||
else
|
||||
CGF.EmitAnyExpr(E, AggValueSlot::ignored(), true);
|
||||
CGF.EmitIgnoredExpr(E);
|
||||
return 0;
|
||||
}
|
||||
case CK_VectorSplat: {
|
||||
|
@ -1464,7 +1461,7 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
|
|||
} else {
|
||||
// C99 6.5.3.4p2: If the argument is an expression of type
|
||||
// VLA, it is evaluated.
|
||||
CGF.EmitAnyExpr(E->getArgumentExpr());
|
||||
CGF.EmitIgnoredExpr(E->getArgumentExpr());
|
||||
}
|
||||
|
||||
return CGF.GetVLASize(VAT);
|
||||
|
@ -2308,7 +2305,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
|
|||
}
|
||||
|
||||
Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
|
||||
CGF.EmitStmt(E->getLHS());
|
||||
CGF.EmitIgnoredExpr(E->getLHS());
|
||||
CGF.EnsureInsertPoint();
|
||||
return Visit(E->getRHS());
|
||||
}
|
||||
|
@ -2578,7 +2575,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
|
|||
}
|
||||
|
||||
|
||||
LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
|
||||
LValue CodeGenFunction::EmitCompoundAssignmentLValue(
|
||||
const CompoundAssignOperator *E) {
|
||||
ScalarExprEmitter Scalar(*this);
|
||||
Value *Result = 0;
|
||||
|
|
|
@ -386,8 +386,7 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
|
|||
FunctionType::ExtInfo()),
|
||||
GetCopyStructFn, ReturnValueSlot(), Args);
|
||||
} else if (PID->getSetterCXXAssignment()) {
|
||||
EmitAnyExpr(PID->getSetterCXXAssignment(), AggValueSlot::ignored(), true);
|
||||
|
||||
EmitIgnoredExpr(PID->getSetterCXXAssignment());
|
||||
} else {
|
||||
// FIXME: Find a clean way to avoid AST node creation.
|
||||
SourceLocation Loc = PD->getLocation();
|
||||
|
|
|
@ -69,13 +69,27 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
|
|||
EmitStopPoint(S);
|
||||
|
||||
switch (S->getStmtClass()) {
|
||||
default:
|
||||
// Must be an expression in a stmt context. Emit the value (to get
|
||||
// side-effects) and ignore the result.
|
||||
if (!isa<Expr>(S))
|
||||
ErrorUnsupported(S, "statement");
|
||||
case Stmt::NoStmtClass:
|
||||
case Stmt::CXXCatchStmtClass:
|
||||
case Stmt::SwitchCaseClass:
|
||||
llvm_unreachable("invalid statement class to emit generically");
|
||||
case Stmt::NullStmtClass:
|
||||
case Stmt::CompoundStmtClass:
|
||||
case Stmt::DeclStmtClass:
|
||||
case Stmt::LabelStmtClass:
|
||||
case Stmt::GotoStmtClass:
|
||||
case Stmt::BreakStmtClass:
|
||||
case Stmt::ContinueStmtClass:
|
||||
case Stmt::DefaultStmtClass:
|
||||
case Stmt::CaseStmtClass:
|
||||
llvm_unreachable("should have emitted these statements as simple");
|
||||
|
||||
EmitAnyExpr(cast<Expr>(S), AggValueSlot::ignored(), true);
|
||||
#define STMT(Type, Base)
|
||||
#define ABSTRACT_STMT(Op)
|
||||
#define EXPR(Type, Base) \
|
||||
case Stmt::Type##Class:
|
||||
#include "clang/AST/StmtNodes.inc"
|
||||
EmitIgnoredExpr(cast<Expr>(S));
|
||||
|
||||
// Expression emitters don't handle unreachable blocks yet, so look for one
|
||||
// explicitly here. This handles the common case of a call to a noreturn
|
||||
|
@ -87,6 +101,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
|
|||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case Stmt::IndirectGotoStmtClass:
|
||||
EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
|
||||
|
||||
|
|
|
@ -1048,6 +1048,9 @@ public:
|
|||
/// expression and compare the result against zero, returning an Int1Ty value.
|
||||
llvm::Value *EvaluateExprAsBool(const Expr *E);
|
||||
|
||||
/// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
|
||||
void EmitIgnoredExpr(const Expr *E);
|
||||
|
||||
/// EmitAnyExpr - Emit code to compute the specified expression which can have
|
||||
/// any type. The result is returned as an RValue struct. If this is an
|
||||
/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
|
||||
|
@ -1404,10 +1407,11 @@ public:
|
|||
|
||||
/// Emit an l-value for an assignment (simple or compound) of complex type.
|
||||
LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
|
||||
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
|
||||
|
||||
// Note: only availabe for agg return types
|
||||
LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
|
||||
LValue EmitCompoundAssignOperatorLValue(const CompoundAssignOperator *E);
|
||||
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
|
||||
// Note: only available for agg return types
|
||||
LValue EmitCallExprLValue(const CallExpr *E);
|
||||
// Note: only available for agg return types
|
||||
|
|
|
@ -22,9 +22,8 @@ void test() {
|
|||
|
||||
asm("nop"); // CHECK: call void asm
|
||||
|
||||
// FIXME: should not load
|
||||
// should not load
|
||||
i;
|
||||
// CHECK-NEXT: volatile load [[INT]]* @i
|
||||
|
||||
(float)(ci);
|
||||
// CHECK-NEXT: volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
|
||||
|
@ -47,7 +46,6 @@ void test() {
|
|||
// CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* @j
|
||||
// CHECK-NEXT: volatile store [[INT]] [[T]], [[INT]]* @i
|
||||
|
||||
// FIXME: extra load at end!
|
||||
ci+=ci;
|
||||
// CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
|
||||
// CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
|
||||
|
@ -58,8 +56,6 @@ void test() {
|
|||
// CHECK-NEXT: [[I:%.*]] = add [[INT]] [[I2]], [[I1]]
|
||||
// CHECK-NEXT: volatile store [[INT]] [[R]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
|
||||
// CHECK-NEXT: volatile store [[INT]] [[I]], [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
|
||||
// CHECK-NEXT: [[R1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 0)
|
||||
// CHECK-NEXT: [[I1:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
|
||||
|
||||
// Note that C++ requires an extra volatile load over C from the LHS of the '+'.
|
||||
(ci += ci) + ci;
|
||||
|
@ -112,9 +108,7 @@ void test() {
|
|||
// CHECK-NEXT: add [[INT]]
|
||||
// CHECK-NEXT: add [[INT]]
|
||||
|
||||
// FIXME: should not load
|
||||
__real i;
|
||||
// CHECK-NEXT: volatile load
|
||||
|
||||
+ci;
|
||||
// CHECK-NEXT: volatile load
|
||||
|
@ -149,31 +143,28 @@ void test() {
|
|||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile store
|
||||
|
||||
// FIXME: shouldn't get these extra loads here, or the phi
|
||||
// FIXME: the phi-equivalent is unnecessary
|
||||
k ? (i=i) : (j=j);
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: icmp
|
||||
// CHECK-NEXT: br i1
|
||||
// CHECK: volatile load
|
||||
// CHECK-NEXT: volatile store
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: store [[INT]]* @i
|
||||
// CHECK-NEXT: br label
|
||||
// CHECK: volatile load
|
||||
// CHECK-NEXT: volatile store
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: store [[INT]]* @j
|
||||
// CHECK-NEXT: br label
|
||||
// CHECK: phi
|
||||
// CHECK: load [[INT]]**
|
||||
|
||||
(void)(i,(i=i));
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile store
|
||||
|
||||
// FIXME: should not load k
|
||||
i=i,k;
|
||||
// CHECK-NEXT: volatile load [[INT]]* @i
|
||||
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i
|
||||
// CHECK-NEXT: volatile load [[INT]]* @k
|
||||
|
||||
(i=j,k=j);
|
||||
// CHECK-NEXT: volatile load [[INT]]* @j
|
||||
|
@ -181,16 +172,11 @@ void test() {
|
|||
// CHECK-NEXT: volatile load [[INT]]* @j
|
||||
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @k
|
||||
|
||||
// FIXME: should not load 'k'
|
||||
(i=j,k);
|
||||
// CHECK-NEXT: volatile load [[INT]]* @j
|
||||
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i
|
||||
// CHECK-NEXT: volatile load [[INT]]* @k
|
||||
|
||||
// FIXME: should not load either
|
||||
(i,j);
|
||||
// CHECK-NEXT: volatile load [[INT]]* @i
|
||||
// CHECK-NEXT: volatile load [[INT]]* @j
|
||||
|
||||
// Extra load in C++.
|
||||
i=c=k;
|
||||
|
@ -207,10 +193,7 @@ void test() {
|
|||
// CHECK-NEXT: add nsw [[INT]]
|
||||
// CHECK-NEXT: volatile store
|
||||
|
||||
// FIXME: should not load!
|
||||
ci;
|
||||
// CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 0
|
||||
// CHECK-NEXT: volatile load {{.*}} @ci, i32 0, i32 1
|
||||
|
||||
asm("nop"); // CHECK-NEXT: call void asm
|
||||
|
||||
|
@ -225,18 +208,14 @@ void test() {
|
|||
// CHECK-NEXT: icmp ne
|
||||
// CHECK-NEXT: or i1
|
||||
|
||||
// FIXME: should not load!
|
||||
ci=ci;
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile store
|
||||
// CHECK-NEXT: volatile store
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile load
|
||||
|
||||
asm("nop"); // CHECK-NEXT: call void asm
|
||||
|
||||
// FIXME: should not load at end
|
||||
// Extra load in C++.
|
||||
ci=ci=ci;
|
||||
// CHECK-NEXT: volatile load
|
||||
|
@ -247,8 +226,6 @@ void test() {
|
|||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile store
|
||||
// CHECK-NEXT: volatile store
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile load
|
||||
|
||||
__imag ci = __imag ci = __imag ci;
|
||||
// CHECK-NEXT: [[T:%.*]] = volatile load [[INT]]* getelementptr inbounds ([[CINT]]* @ci, i32 0, i32 1)
|
||||
|
@ -261,7 +238,6 @@ void test() {
|
|||
// CHECK-NEXT: volatile store
|
||||
|
||||
__imag i;
|
||||
// CHECK-NEXT: volatile load
|
||||
|
||||
// ============================================================
|
||||
// FIXME: Test cases we get wrong.
|
||||
|
@ -338,13 +314,11 @@ void test() {
|
|||
|
||||
// A use. gcc treats this a not a use, that's probably a bug due to tree
|
||||
// folding ignoring volatile.
|
||||
// FIXME: extra load at end
|
||||
__real (ci=ci);
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: volatile store
|
||||
// CHECK-NEXT: volatile store
|
||||
// CHECK-NEXT: volatile load
|
||||
|
||||
// A use.
|
||||
i + 0;
|
||||
|
@ -367,17 +341,15 @@ void test() {
|
|||
// CHECK-NEXT: volatile load
|
||||
// CHECK-NEXT: add
|
||||
|
||||
// FIXME: extra load of 'i'
|
||||
(i,j)=k;
|
||||
// CHECK-NEXT: volatile load [[INT]]* @k
|
||||
// CHECK-NEXT: volatile load [[INT]]* @i
|
||||
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @j
|
||||
|
||||
// FIXME: extra load of 'j'
|
||||
(j=k,i)=i;
|
||||
// CHECK-NEXT: volatile load [[INT]]* @i
|
||||
// CHECK-NEXT: volatile load [[INT]]* @k
|
||||
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @j
|
||||
// CHECK-NEXT: volatile load [[INT]]* @j
|
||||
// CHECK-NEXT: volatile load [[INT]]* @i
|
||||
// CHECK-NEXT: volatile store {{.*}}, [[INT]]* @i
|
||||
|
||||
// CHECK-NEXT: ret void
|
||||
}
|
||||
|
|
|
@ -27,8 +27,6 @@ namespace test1 {
|
|||
// CHECK: define void @_ZN5test14testEv()
|
||||
void test() {
|
||||
// CHECK: [[TMP:%.*]] = load i32** @_ZN5test11xE, align 8
|
||||
// *** FIXME: no! bad! should not be loaded! ***
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = volatile load i32* [[TMP]]
|
||||
// CHECK-NEXT: ret void
|
||||
*x;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче