зеркало из https://github.com/microsoft/clang-1.git
Revert r153613 as it's causing large compile-time regressions on the nightly testers.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@153660 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Родитель
ecae5965cf
Коммит
649b4a1a9b
|
@ -734,8 +734,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
|
|||
AggValueSlot::forAddr(blockField, Align, Qualifiers(),
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
AggValueSlot::IsNotAliased);
|
||||
EmitAggExpr(copyExpr, Slot);
|
||||
} else {
|
||||
EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
|
||||
|
|
|
@ -1875,8 +1875,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
|||
if (Align > AI->getAlignment())
|
||||
AI->setAlignment(Align);
|
||||
Args.push_back(AI);
|
||||
EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified(),
|
||||
/*destIsCompleteObject*/ true);
|
||||
EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
|
||||
|
||||
// Validate argument match.
|
||||
checkArgMatches(AI, IRArgNo, IRFuncTy);
|
||||
|
|
|
@ -401,8 +401,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
|
|||
AggValueSlot::forAddr(V, Alignment, Qualifiers(),
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsNotCompleteObject);
|
||||
AggValueSlot::IsNotAliased);
|
||||
|
||||
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
|
||||
|
||||
|
@ -450,8 +449,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
|
|||
AggValueSlot::forLValue(LV,
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
AggValueSlot::IsNotAliased);
|
||||
|
||||
CGF.EmitAggExpr(Init, Slot);
|
||||
}
|
||||
|
@ -591,8 +589,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
|
|||
|
||||
// Copy the aggregate.
|
||||
CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
|
||||
LHS.isVolatileQualified(),
|
||||
/*destIsCompleteObject*/ true);
|
||||
LHS.isVolatileQualified());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1374,10 +1371,7 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
|
|||
AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
CurGD.getCtorType() == Ctor_Complete
|
||||
? AggValueSlot::IsCompleteObject
|
||||
: AggValueSlot::IsNotCompleteObject);
|
||||
AggValueSlot::IsNotAliased);
|
||||
|
||||
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
|
||||
|
||||
|
|
|
@ -1092,10 +1092,9 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
|
|||
} else {
|
||||
// TODO: how can we delay here if D is captured by its initializer?
|
||||
EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject));
|
||||
AggValueSlot::IsNotAliased));
|
||||
MaybeEmitStdInitializerListCleanup(lvalue.getAddress(), init);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,11 +46,9 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
|
|||
} else if (type->isAnyComplexType()) {
|
||||
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
|
||||
} else {
|
||||
CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject));
|
||||
CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -373,7 +373,8 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
|
|||
// evaluated but before the exception is caught. But the best way
|
||||
// to handle that is to teach EmitAggExpr to do the final copy
|
||||
// differently if it can't be elided.
|
||||
CGF.EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers());
|
||||
CGF.EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
|
||||
/*IsInit*/ true);
|
||||
|
||||
// Deactivate the cleanup block.
|
||||
CGF.DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr));
|
||||
|
@ -1047,8 +1048,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
|
|||
if (!copyExpr) {
|
||||
llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
|
||||
llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
|
||||
CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType,
|
||||
/*volatile*/ false, 0, /*destIsCompleteObject*/ true);
|
||||
CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1076,8 +1076,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
|
|||
AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(),
|
||||
AggValueSlot::IsNotDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject));
|
||||
AggValueSlot::IsNotAliased));
|
||||
|
||||
// Leave the terminate scope.
|
||||
CGF.EHStack.popTerminate();
|
||||
|
|
|
@ -133,17 +133,17 @@ RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
|
|||
/// location.
|
||||
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
|
||||
llvm::Value *Location,
|
||||
Qualifiers Quals) {
|
||||
Qualifiers Quals,
|
||||
bool IsInit) {
|
||||
// FIXME: This function should take an LValue as an argument.
|
||||
if (E->getType()->isAnyComplexType()) {
|
||||
EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
|
||||
} else if (hasAggregateLLVMType(E->getType())) {
|
||||
CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
|
||||
EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::IsDestructed_t(IsInit),
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject));
|
||||
AggValueSlot::IsAliased_t(!IsInit)));
|
||||
} else {
|
||||
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
|
||||
LValue LV = MakeAddrLValue(Location, E->getType());
|
||||
|
@ -366,8 +366,7 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
|
|||
AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
|
||||
Qualifiers(), isDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
AggValueSlot::IsNotAliased);
|
||||
}
|
||||
|
||||
if (InitializedDecl) {
|
||||
|
@ -2152,7 +2151,8 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
|
|||
const Expr *InitExpr = E->getInitializer();
|
||||
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
|
||||
|
||||
EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers());
|
||||
EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
|
||||
/*Init*/ true);
|
||||
|
||||
return Result;
|
||||
}
|
||||
|
@ -2283,7 +2283,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
|
|||
// as a value, copy it into a temporary, and return an lvalue referring to
|
||||
// that temporary.
|
||||
llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
|
||||
EmitAnyExprToMem(E, V, E->getType().getQualifiers());
|
||||
EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
|
||||
return MakeAddrLValue(V, E->getType());
|
||||
}
|
||||
|
||||
|
@ -2754,7 +2754,8 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
|
|||
static llvm::Value *
|
||||
EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
|
||||
llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
|
||||
CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers());
|
||||
CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
|
||||
/*Init*/ true);
|
||||
return DeclPtr;
|
||||
}
|
||||
|
||||
|
|
|
@ -179,8 +179,7 @@ public:
|
|||
|
||||
void VisitVAArgExpr(VAArgExpr *E);
|
||||
|
||||
void EmitInitializationToLValue(Expr *E, LValue Address,
|
||||
AggValueSlot::IsCompleteObject_t isCompleteObject);
|
||||
void EmitInitializationToLValue(Expr *E, LValue Address);
|
||||
void EmitNullInitializationToLValue(LValue Address);
|
||||
// case Expr::ChooseExprClass:
|
||||
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
|
||||
|
@ -280,7 +279,7 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
|
|||
// is volatile, unless copy has volatile for both source and destination..
|
||||
CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
|
||||
Dest.isVolatile()|Src.isVolatileQualified(),
|
||||
Alignment, Dest.isCompleteObject());
|
||||
Alignment);
|
||||
}
|
||||
|
||||
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
|
||||
|
@ -442,8 +441,7 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
|
|||
EmitStdInitializerList(element, initList);
|
||||
} else {
|
||||
LValue elementLV = CGF.MakeAddrLValue(element, elementType);
|
||||
EmitInitializationToLValue(E->getInit(i), elementLV,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
EmitInitializationToLValue(E->getInit(i), elementLV);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -490,8 +488,7 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
|
|||
// Emit the actual filler expression.
|
||||
LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
|
||||
if (filler)
|
||||
EmitInitializationToLValue(filler, elementLV,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
EmitInitializationToLValue(filler, elementLV);
|
||||
else
|
||||
EmitNullInitializationToLValue(elementLV);
|
||||
|
||||
|
@ -570,8 +567,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
|
|||
llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
|
||||
CGF.ConvertType(PtrTy));
|
||||
EmitInitializationToLValue(E->getSubExpr(),
|
||||
CGF.MakeAddrLValue(CastPtr, Ty),
|
||||
Dest.isCompleteObject());
|
||||
CGF.MakeAddrLValue(CastPtr, Ty));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -679,29 +675,6 @@ void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
|
|||
EmitFinalDestCopy(E, LV);
|
||||
}
|
||||
|
||||
/// Quickly check whether the object looks like it might be a complete
|
||||
/// object.
|
||||
static AggValueSlot::IsCompleteObject_t isCompleteObject(const Expr *E) {
|
||||
E = E->IgnoreParens();
|
||||
|
||||
QualType objectType;
|
||||
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
|
||||
objectType = DRE->getDecl()->getType();
|
||||
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
|
||||
objectType = ME->getMemberDecl()->getType();
|
||||
} else {
|
||||
// Be conservative.
|
||||
return AggValueSlot::MayNotBeCompleteObject;
|
||||
}
|
||||
|
||||
// The expression refers directly to some sort of object.
|
||||
// If that object has reference type, be conservative.
|
||||
if (objectType->isReferenceType())
|
||||
return AggValueSlot::MayNotBeCompleteObject;
|
||||
|
||||
return AggValueSlot::IsCompleteObject;
|
||||
}
|
||||
|
||||
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
||||
// For an assignment to work, the value on the right has
|
||||
// to be compatible with the value on the left.
|
||||
|
@ -709,8 +682,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
|||
E->getRHS()->getType())
|
||||
&& "Invalid assignment");
|
||||
|
||||
if (const DeclRefExpr *DRE
|
||||
= dyn_cast<DeclRefExpr>(E->getLHS()->IgnoreParens()))
|
||||
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
|
||||
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
|
||||
if (VD->hasAttr<BlocksAttr>() &&
|
||||
E->getRHS()->HasSideEffects(CGF.getContext())) {
|
||||
|
@ -720,20 +692,18 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
|
|||
LValue LHS = CGF.EmitLValue(E->getLHS());
|
||||
Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
||||
needsGC(E->getLHS()->getType()),
|
||||
AggValueSlot::IsAliased,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
AggValueSlot::IsAliased);
|
||||
EmitFinalDestCopy(E, RHS, true);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
LValue LHS = CGF.EmitLValue(E->getLHS());
|
||||
|
||||
// Codegen the RHS so that it stores directly into the LHS.
|
||||
AggValueSlot LHSSlot =
|
||||
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
|
||||
needsGC(E->getLHS()->getType()),
|
||||
AggValueSlot::IsAliased,
|
||||
isCompleteObject(E->getLHS()));
|
||||
AggValueSlot::IsAliased);
|
||||
CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
|
||||
EmitFinalDestCopy(E, LHS, true);
|
||||
}
|
||||
|
@ -866,8 +836,7 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
|
|||
|
||||
|
||||
void
|
||||
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV,
|
||||
AggValueSlot::IsCompleteObject_t isCompleteObject) {
|
||||
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
|
||||
QualType type = LV.getType();
|
||||
// FIXME: Ignore result?
|
||||
// FIXME: Are initializers affected by volatile?
|
||||
|
@ -885,7 +854,6 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV,
|
|||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
isCompleteObject,
|
||||
Dest.isZeroed()));
|
||||
} else if (LV.isSimple()) {
|
||||
CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
|
||||
|
@ -1001,8 +969,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
|
|||
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
|
||||
if (NumInitElements) {
|
||||
// Store the initializer into the field
|
||||
EmitInitializationToLValue(E->getInit(0), FieldLoc,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
EmitInitializationToLValue(E->getInit(0), FieldLoc);
|
||||
} else {
|
||||
// Default-initialize to null.
|
||||
EmitNullInitializationToLValue(FieldLoc);
|
||||
|
@ -1044,8 +1011,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
|
|||
|
||||
if (curInitIndex < NumInitElements) {
|
||||
// Store the initializer into the field.
|
||||
EmitInitializationToLValue(E->getInit(curInitIndex++), LV,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
|
||||
} else {
|
||||
// We're out of initalizers; default-initialize to null
|
||||
EmitNullInitializationToLValue(LV);
|
||||
|
@ -1220,94 +1186,30 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
|
|||
LValue LV = MakeAddrLValue(Temp, E->getType());
|
||||
EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject));
|
||||
AggValueSlot::IsNotAliased));
|
||||
return LV;
|
||||
}
|
||||
|
||||
void CodeGenFunction::EmitAggregateCopy(llvm::Value *dest, llvm::Value *src,
|
||||
QualType type,
|
||||
bool isVolatile, unsigned alignment,
|
||||
bool destIsCompleteObject) {
|
||||
assert(!type->isAnyComplexType() && "Shouldn't happen for complex");
|
||||
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
|
||||
llvm::Value *SrcPtr, QualType Ty,
|
||||
bool isVolatile, unsigned Alignment) {
|
||||
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
|
||||
|
||||
// Get size and alignment info for this type. Note that the type
|
||||
// might include an alignment attribute, so we can't just rely on
|
||||
// the layout.
|
||||
// FIXME: Do we need to handle VLAs here?
|
||||
std::pair<CharUnits, CharUnits> typeInfo =
|
||||
getContext().getTypeInfoInChars(type);
|
||||
|
||||
// If we weren't given an alignment, use the natural alignment.
|
||||
if (!alignment) alignment = typeInfo.second.getQuantity();
|
||||
|
||||
CharUnits sizeToCopy = typeInfo.first;
|
||||
|
||||
// There's some special logic that applies to C++ classes:
|
||||
if (getContext().getLangOpts().CPlusPlus) {
|
||||
if (const RecordType *RT = type->getAs<RecordType>()) {
|
||||
// First, we want to assert that we're not doing this to
|
||||
// something with a non-trivial operator/constructor.
|
||||
CXXRecordDecl *record = cast<CXXRecordDecl>(RT->getDecl());
|
||||
assert((record->hasTrivialCopyConstructor() ||
|
||||
record->hasTrivialCopyAssignment() ||
|
||||
record->hasTrivialMoveConstructor() ||
|
||||
record->hasTrivialMoveAssignment()) &&
|
||||
if (const RecordType *RT = Ty->getAs<RecordType>()) {
|
||||
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
|
||||
assert((Record->hasTrivialCopyConstructor() ||
|
||||
Record->hasTrivialCopyAssignment() ||
|
||||
Record->hasTrivialMoveConstructor() ||
|
||||
Record->hasTrivialMoveAssignment()) &&
|
||||
"Trying to aggregate-copy a type without a trivial copy "
|
||||
"constructor or assignment operator");
|
||||
|
||||
// Second, we want to ignore empty classes.
|
||||
if (record->isEmpty())
|
||||
// Ignore empty classes in C++.
|
||||
if (Record->isEmpty())
|
||||
return;
|
||||
|
||||
// Third, if it's possible that the destination might not be a
|
||||
// complete object, then we need to make sure we only copy the
|
||||
// data size, not the full sizeof, so that we don't overwrite
|
||||
// subclass fields in the tailing padding. It's generally going
|
||||
// to be more efficient to copy the sizeof, since we can use
|
||||
// larger stores.
|
||||
//
|
||||
// Unions and final classes can never be base classes.
|
||||
if (!destIsCompleteObject && !record->isUnion() &&
|
||||
!record->hasAttr<FinalAttr>()) {
|
||||
const ASTRecordLayout &layout
|
||||
= getContext().getASTRecordLayout(record);
|
||||
sizeToCopy = layout.getDataSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
llvm::PointerType *DPT = cast<llvm::PointerType>(dest->getType());
|
||||
llvm::Type *DBP =
|
||||
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
|
||||
dest = Builder.CreateBitCast(dest, DBP);
|
||||
|
||||
llvm::PointerType *SPT = cast<llvm::PointerType>(src->getType());
|
||||
llvm::Type *SBP =
|
||||
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
|
||||
src = Builder.CreateBitCast(src, SBP);
|
||||
|
||||
llvm::Value *sizeVal =
|
||||
llvm::ConstantInt::get(CGM.SizeTy, sizeToCopy.getQuantity());
|
||||
|
||||
// Don't do any of the memmove_collectable tests if GC isn't set.
|
||||
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
|
||||
// fall through
|
||||
} else if (const RecordType *RT = type->getAs<RecordType>()) {
|
||||
if (RT->getDecl()->hasObjectMember()) {
|
||||
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, dest, src, sizeVal);
|
||||
return;
|
||||
}
|
||||
} else if (type->isArrayType()) {
|
||||
QualType baseType = getContext().getBaseElementType(type);
|
||||
if (const RecordType *RT = baseType->getAs<RecordType>()) {
|
||||
if (RT->getDecl()->hasObjectMember()) {
|
||||
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, dest, src,sizeVal);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
|
||||
// C99 6.5.16.1p3, which states "If the value being stored in an object is
|
||||
// read from another object that overlaps in anyway the storage of the first
|
||||
|
@ -1318,8 +1220,71 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *dest, llvm::Value *src,
|
|||
// equal, but other compilers do this optimization, and almost every memcpy
|
||||
// implementation handles this case safely. If there is a libc that does not
|
||||
// safely handle this, we can add a target hook.
|
||||
|
||||
// Get size and alignment info for this aggregate.
|
||||
std::pair<CharUnits, CharUnits> TypeInfo =
|
||||
getContext().getTypeInfoInChars(Ty);
|
||||
|
||||
if (!Alignment)
|
||||
Alignment = TypeInfo.second.getQuantity();
|
||||
|
||||
// FIXME: Handle variable sized types.
|
||||
|
||||
// FIXME: If we have a volatile struct, the optimizer can remove what might
|
||||
// appear to be `extra' memory ops:
|
||||
//
|
||||
// volatile struct { int i; } a, b;
|
||||
//
|
||||
// int main() {
|
||||
// a = b;
|
||||
// a = b;
|
||||
// }
|
||||
//
|
||||
// we need to use a different call here. We use isVolatile to indicate when
|
||||
// either the source or the destination is volatile.
|
||||
|
||||
llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
|
||||
llvm::Type *DBP =
|
||||
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
|
||||
DestPtr = Builder.CreateBitCast(DestPtr, DBP);
|
||||
|
||||
llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
|
||||
llvm::Type *SBP =
|
||||
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
|
||||
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
|
||||
|
||||
// Don't do any of the memmove_collectable tests if GC isn't set.
|
||||
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
|
||||
// fall through
|
||||
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
|
||||
RecordDecl *Record = RecordTy->getDecl();
|
||||
if (Record->hasObjectMember()) {
|
||||
CharUnits size = TypeInfo.first;
|
||||
llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
|
||||
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
|
||||
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
|
||||
SizeVal);
|
||||
return;
|
||||
}
|
||||
} else if (Ty->isArrayType()) {
|
||||
QualType BaseType = getContext().getBaseElementType(Ty);
|
||||
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
|
||||
if (RecordTy->getDecl()->hasObjectMember()) {
|
||||
CharUnits size = TypeInfo.first;
|
||||
llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
|
||||
llvm::Value *SizeVal =
|
||||
llvm::ConstantInt::get(SizeTy, size.getQuantity());
|
||||
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
|
||||
SizeVal);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Builder.CreateMemCpy(dest, src, sizeVal, alignment, isVolatile);
|
||||
Builder.CreateMemCpy(DestPtr, SrcPtr,
|
||||
llvm::ConstantInt::get(IntPtrTy,
|
||||
TypeInfo.first.getQuantity()),
|
||||
Alignment, isVolatile);
|
||||
}
|
||||
|
||||
void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
|
||||
|
|
|
@ -781,8 +781,7 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
|
|||
= AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject);
|
||||
AggValueSlot::IsNotAliased);
|
||||
CGF.EmitAggExpr(Init, Slot);
|
||||
|
||||
CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
|
||||
|
|
|
@ -887,8 +887,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
|
|||
// The return value slot is guaranteed to not be aliased, but
|
||||
// that's not necessarily the same as "on the stack", so
|
||||
// we still potentially need objc_memmove_collectable.
|
||||
EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType,
|
||||
/*volatile*/ false, 0, /*destIsCompleteObject*/ true);
|
||||
EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
|
||||
} else {
|
||||
llvm::Value *value;
|
||||
if (propType->isReferenceType()) {
|
||||
|
@ -1313,8 +1312,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
|
|||
EmitAggExpr(IvarInit->getInit(),
|
||||
AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject));
|
||||
AggValueSlot::IsNotAliased));
|
||||
}
|
||||
// constructor returns 'self'.
|
||||
CodeGenTypes &Types = CGM.getTypes();
|
||||
|
@ -2939,8 +2937,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
|
|||
AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject));
|
||||
AggValueSlot::IsNotAliased));
|
||||
|
||||
FinishFunction();
|
||||
HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
|
||||
|
|
|
@ -722,8 +722,7 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
|
|||
if (RV.isScalar()) {
|
||||
Builder.CreateStore(RV.getScalarVal(), ReturnValue);
|
||||
} else if (RV.isAggregate()) {
|
||||
EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty,
|
||||
/*volatile*/ false, 0, /*destIsCompleteObject*/ true);
|
||||
EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
|
||||
} else {
|
||||
StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
|
||||
}
|
||||
|
@ -770,8 +769,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
|
|||
EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, Qualifiers(),
|
||||
AggValueSlot::IsDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject));
|
||||
AggValueSlot::IsNotAliased));
|
||||
}
|
||||
|
||||
EmitBranchThroughCleanup(ReturnBlock);
|
||||
|
|
|
@ -318,22 +318,22 @@ class AggValueSlot {
|
|||
// Qualifiers
|
||||
Qualifiers Quals;
|
||||
|
||||
unsigned Alignment : 16;
|
||||
unsigned short Alignment;
|
||||
|
||||
/// DestructedFlag - This is set to true if some external code is
|
||||
/// responsible for setting up a destructor for the slot. Otherwise
|
||||
/// the code which constructs it should push the appropriate cleanup.
|
||||
unsigned DestructedFlag : 1;
|
||||
bool DestructedFlag : 1;
|
||||
|
||||
/// ObjCGCFlag - This is set to true if writing to the memory in the
|
||||
/// slot might require calling an appropriate Objective-C GC
|
||||
/// barrier. The exact interaction here is unnecessarily mysterious.
|
||||
unsigned ObjCGCFlag : 1;
|
||||
bool ObjCGCFlag : 1;
|
||||
|
||||
/// ZeroedFlag - This is set to true if the memory in the slot is
|
||||
/// known to be zero before the assignment into it. This means that
|
||||
/// zero fields don't need to be set.
|
||||
unsigned ZeroedFlag : 1;
|
||||
bool ZeroedFlag : 1;
|
||||
|
||||
/// AliasedFlag - This is set to true if the slot might be aliased
|
||||
/// and it's not undefined behavior to access it through such an
|
||||
|
@ -347,32 +347,19 @@ class AggValueSlot {
|
|||
/// over. Since it's invalid in general to memcpy a non-POD C++
|
||||
/// object, it's important that this flag never be set when
|
||||
/// evaluating an expression which constructs such an object.
|
||||
unsigned AliasedFlag : 1;
|
||||
|
||||
/// CompleteObjectFlag - This is set to true if the slot is known to
|
||||
/// be a complete object. When emitting an aggregate copy of a
|
||||
/// non-POD C++ struct to a location which may not be a complete
|
||||
/// object, only the data size of the type can be copied in order to
|
||||
/// prevent unrelated fields from being overwritten.
|
||||
unsigned CompleteObjectFlag : 1;
|
||||
bool AliasedFlag : 1;
|
||||
|
||||
public:
|
||||
enum IsAliased_t { IsNotAliased, IsAliased };
|
||||
enum IsDestructed_t { IsNotDestructed, IsDestructed };
|
||||
enum IsZeroed_t { IsNotZeroed, IsZeroed };
|
||||
enum IsCompleteObject_t {
|
||||
IsNotCompleteObject,
|
||||
MayNotBeCompleteObject = IsNotCompleteObject,
|
||||
IsCompleteObject
|
||||
};
|
||||
enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers };
|
||||
|
||||
/// ignored - Returns an aggregate value slot indicating that the
|
||||
/// aggregate value is being ignored.
|
||||
static AggValueSlot ignored() {
|
||||
return forAddr(0, CharUnits(), Qualifiers(), IsNotDestructed,
|
||||
DoesNotNeedGCBarriers, IsNotAliased,
|
||||
IsCompleteObject);
|
||||
DoesNotNeedGCBarriers, IsNotAliased);
|
||||
}
|
||||
|
||||
/// forAddr - Make a slot for an aggregate value.
|
||||
|
@ -390,7 +377,6 @@ public:
|
|||
IsDestructed_t isDestructed,
|
||||
NeedsGCBarriers_t needsGC,
|
||||
IsAliased_t isAliased,
|
||||
IsCompleteObject_t isCompleteObject,
|
||||
IsZeroed_t isZeroed = IsNotZeroed) {
|
||||
AggValueSlot AV;
|
||||
AV.Addr = addr;
|
||||
|
@ -400,18 +386,15 @@ public:
|
|||
AV.ObjCGCFlag = needsGC;
|
||||
AV.ZeroedFlag = isZeroed;
|
||||
AV.AliasedFlag = isAliased;
|
||||
AV.CompleteObjectFlag = isCompleteObject;
|
||||
return AV;
|
||||
}
|
||||
|
||||
static AggValueSlot forLValue(LValue LV, IsDestructed_t isDestructed,
|
||||
NeedsGCBarriers_t needsGC,
|
||||
IsAliased_t isAliased,
|
||||
IsCompleteObject_t isCompleteObject,
|
||||
IsZeroed_t isZeroed = IsNotZeroed) {
|
||||
return forAddr(LV.getAddress(), LV.getAlignment(),
|
||||
LV.getQuals(), isDestructed, needsGC, isAliased,
|
||||
isCompleteObject, isZeroed);
|
||||
LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
|
||||
}
|
||||
|
||||
IsDestructed_t isExternallyDestructed() const {
|
||||
|
@ -451,10 +434,6 @@ public:
|
|||
return IsAliased_t(AliasedFlag);
|
||||
}
|
||||
|
||||
IsCompleteObject_t isCompleteObject() const {
|
||||
return IsCompleteObject_t(CompleteObjectFlag);
|
||||
}
|
||||
|
||||
// FIXME: Alignment?
|
||||
RValue asRValue() const {
|
||||
return RValue::getAggregate(getAddr(), isVolatile());
|
||||
|
|
|
@ -1596,9 +1596,7 @@ public:
|
|||
T.getQualifiers(),
|
||||
AggValueSlot::IsNotDestructed,
|
||||
AggValueSlot::DoesNotNeedGCBarriers,
|
||||
AggValueSlot::IsNotAliased,
|
||||
AggValueSlot::IsCompleteObject,
|
||||
AggValueSlot::IsNotZeroed);
|
||||
AggValueSlot::IsNotAliased);
|
||||
}
|
||||
|
||||
/// Emit a cast to void* in the appropriate address space.
|
||||
|
@ -1630,10 +1628,9 @@ public:
|
|||
RValue EmitAnyExprToTemp(const Expr *E);
|
||||
|
||||
/// EmitAnyExprToMem - Emits the code necessary to evaluate an
|
||||
/// arbitrary expression as an initialization of the given memory
|
||||
/// location.
|
||||
/// arbitrary expression into the given memory location.
|
||||
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
|
||||
Qualifiers Quals);
|
||||
Qualifiers Quals, bool IsInitializer);
|
||||
|
||||
/// EmitExprAsInit - Emits the code necessary to initialize a
|
||||
/// location in memory with the given initializer.
|
||||
|
@ -1644,12 +1641,9 @@ public:
|
|||
///
|
||||
/// \param isVolatile - True iff either the source or the destination is
|
||||
/// volatile.
|
||||
/// \param destIsCompleteObject - True if the destination is known to be
|
||||
/// a complete object.
|
||||
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
|
||||
QualType EltTy, bool isVolatile=false,
|
||||
unsigned alignment = 0,
|
||||
bool destIsCompleteObject = false);
|
||||
unsigned Alignment = 0);
|
||||
|
||||
/// StartBlock - Start new block named N. If insert block is a dummy block
|
||||
/// then reuse it.
|
||||
|
|
|
@ -28,24 +28,3 @@ namespace test1 {
|
|||
|
||||
A<int> a;
|
||||
}
|
||||
|
||||
// PR12204
|
||||
namespace test2 {
|
||||
struct A {
|
||||
A() {} // make this non-POD to enable tail layout
|
||||
void *ptr;
|
||||
char c;
|
||||
};
|
||||
|
||||
void test(A &out) {
|
||||
out = A();
|
||||
}
|
||||
}
|
||||
// CHECK: define void @_ZN5test24testERNS_1AE(
|
||||
// CHECK: [[OUT:%.*]] = alloca [[A:%.*]]*, align 8
|
||||
// CHECK-NEXT: [[TMP:%.*]] = alloca [[A]], align 8
|
||||
// CHECK: [[REF:%.*]] = load [[A]]** [[OUT]], align 8
|
||||
// CHECK-NEXT: call void @_ZN5test21AC1Ev([[A]]* [[TMP]])
|
||||
// CHECK-NEXT: [[T0:%.*]] = bitcast [[A]]* [[REF]] to i8*
|
||||
// CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[TMP]] to i8*
|
||||
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[T0]], i8* [[T1]], i64 9, i32 8, i1 false)
|
||||
|
|
Загрузка…
Ссылка в новой задаче