Update for LLVM API change, and contextify a bunch of related stuff.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@75705 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Owen Anderson 2009-07-14 23:10:40 +00:00
Родитель b4aa4845b0
Коммит a1cf15f468
20 изменённых файлов: 916 добавлений и 815 удалений

Просмотреть файл

@ -17,6 +17,7 @@
namespace llvm {
class Type;
class Value;
class LLVMContext;
}
namespace clang {
@ -128,7 +129,8 @@ namespace clang {
virtual ~ABIInfo();
virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
ASTContext &Ctx) const = 0;
ASTContext &Ctx,
llvm::LLVMContext &VMContext) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.

Просмотреть файл

@ -29,15 +29,17 @@ BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size,
llvm::Constant *C;
std::vector<llvm::Constant*> Elts;
llvm::LLVMContext &VMContext = CGM.getLLVMContext();
// reserved
C = llvm::ConstantInt::get(UnsignedLongTy, 0);
C = VMContext.getConstantInt(UnsignedLongTy, 0);
Elts.push_back(C);
// Size
// FIXME: What is the right way to say this doesn't fit? We should give
// a user diagnostic in that case. Better fix would be to change the
// API to size_t.
C = llvm::ConstantInt::get(UnsignedLongTy, Size);
C = VMContext.getConstantInt(UnsignedLongTy, Size);
Elts.push_back(C);
if (BlockHasCopyDispose) {
@ -48,7 +50,7 @@ BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size,
Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
}
C = llvm::ConstantStruct::get(Elts);
C = VMContext.getConstantStruct(Elts);
C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage,
@ -140,17 +142,17 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// __isa
C = CGM.getNSConcreteStackBlock();
C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
C = VMContext.getConstantExprBitCast(C, PtrToInt8Ty);
Elts[0] = C;
// __flags
const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().IntTy));
C = llvm::ConstantInt::get(IntTy, flags);
C = VMContext.getConstantInt(IntTy, flags);
Elts[1] = C;
// __reserved
C = llvm::ConstantInt::get(IntTy, 0);
C = VMContext.getConstantInt(IntTy, 0);
Elts[2] = C;
if (subBlockDeclRefDecls.size() == 0) {
@ -159,9 +161,9 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// Optimize to being a global block.
Elts[0] = CGM.getNSConcreteGlobalBlock();
Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
Elts[1] = VMContext.getConstantInt(IntTy, flags|BLOCK_IS_GLOBAL);
C = llvm::ConstantStruct::get(Elts);
C = VMContext.getConstantStruct(Elts);
char Name[32];
sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount());
@ -169,7 +171,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::GlobalValue::InternalLinkage,
C, Name);
QualType BPT = BE->getType();
C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
C = VMContext.getConstantExprBitCast(C, ConvertType(BPT));
return C;
}
@ -184,12 +186,12 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
QualType Ty = E->getType();
if (BDRE && BDRE->isByRef()) {
uint64_t Align = getContext().getDeclAlignInBytes(BDRE->getDecl());
Types[i+5] = llvm::PointerType::get(BuildByRefType(Ty, Align), 0);
Types[i+5] = VMContext.getPointerType(BuildByRefType(Ty, Align), 0);
} else
Types[i+5] = ConvertType(Ty);
}
llvm::StructType *Ty = llvm::StructType::get(Types, true);
llvm::StructType *Ty = VMContext.getStructType(Types, true);
llvm::AllocaInst *A = CreateTempAlloca(Ty);
A->setAlignment(subBlockAlign);
@ -265,10 +267,10 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::Value *BlockLiteral = LoadBlockStruct();
Loc = Builder.CreateGEP(BlockLiteral,
llvm::ConstantInt::get(llvm::Type::Int64Ty,
VMContext.getConstantInt(llvm::Type::Int64Ty,
offset),
"block.literal");
Ty = llvm::PointerType::get(Ty, 0);
Ty = VMContext.getPointerType(Ty, 0);
Loc = Builder.CreateBitCast(Loc, Ty);
Loc = Builder.CreateLoad(Loc, false);
// Loc = Builder.CreateBitCast(Loc, Ty);
@ -310,7 +312,7 @@ const llvm::Type *BlockModule::getBlockDescriptorType() {
// unsigned long reserved;
// unsigned long block_size;
// };
BlockDescriptorType = llvm::StructType::get(UnsignedLongTy,
BlockDescriptorType = VMContext.getStructType(UnsignedLongTy,
UnsignedLongTy,
NULL);
@ -325,7 +327,7 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() {
return GenericBlockLiteralType;
const llvm::Type *BlockDescPtrTy =
llvm::PointerType::getUnqual(getBlockDescriptorType());
VMContext.getPointerTypeUnqual(getBlockDescriptorType());
const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
getTypes().ConvertType(getContext().IntTy));
@ -337,7 +339,7 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() {
// void (*__invoke)(void *);
// struct __block_descriptor *__descriptor;
// };
GenericBlockLiteralType = llvm::StructType::get(PtrToInt8Ty,
GenericBlockLiteralType = VMContext.getStructType(PtrToInt8Ty,
IntTy,
IntTy,
PtrToInt8Ty,
@ -355,7 +357,7 @@ const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
return GenericExtendedBlockLiteralType;
const llvm::Type *BlockDescPtrTy =
llvm::PointerType::getUnqual(getBlockDescriptorType());
VMContext.getPointerTypeUnqual(getBlockDescriptorType());
const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
getTypes().ConvertType(getContext().IntTy));
@ -369,7 +371,7 @@ const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
// void *__copy_func_helper_decl;
// void *__destroy_func_decl;
// };
GenericExtendedBlockLiteralType = llvm::StructType::get(PtrToInt8Ty,
GenericExtendedBlockLiteralType = VMContext.getStructType(PtrToInt8Ty,
IntTy,
IntTy,
PtrToInt8Ty,
@ -392,7 +394,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) {
// Get a pointer to the generic block literal.
const llvm::Type *BlockLiteralTy =
llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
VMContext.getPointerTypeUnqual(CGM.getGenericBlockLiteralType());
// Bitcast the callee to a block literal.
llvm::Value *BlockLiteral =
@ -403,7 +405,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) {
BlockLiteral =
Builder.CreateBitCast(BlockLiteral,
llvm::PointerType::getUnqual(llvm::Type::Int8Ty),
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty),
"tmp");
// Add the block literal.
@ -429,7 +431,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) {
const llvm::Type *BlockFTy =
CGM.getTypes().GetFunctionType(FnInfo, false);
const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
const llvm::Type *BlockFTyPtr = VMContext.getPointerTypeUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
// And call the block.
@ -453,18 +455,18 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
llvm::Value *BlockLiteral = LoadBlockStruct();
llvm::Value *V = Builder.CreateGEP(BlockLiteral,
llvm::ConstantInt::get(llvm::Type::Int64Ty,
VMContext.getConstantInt(llvm::Type::Int64Ty,
offset),
"block.literal");
if (E->isByRef()) {
bool needsCopyDispose = BlockRequiresCopying(E->getType());
uint64_t Align = getContext().getDeclAlignInBytes(E->getDecl());
const llvm::Type *PtrStructTy
= llvm::PointerType::get(BuildByRefType(E->getType(), Align), 0);
= VMContext.getPointerType(BuildByRefType(E->getType(), Align), 0);
// The block literal will need a copy/destroy helper.
BlockHasCopyDispose = true;
Ty = PtrStructTy;
Ty = llvm::PointerType::get(Ty, 0);
Ty = VMContext.getPointerType(Ty, 0);
V = Builder.CreateBitCast(V, Ty);
V = Builder.CreateLoad(V, false);
V = Builder.CreateStructGEP(V, 1, "forwarding");
@ -472,7 +474,7 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
V = Builder.CreateBitCast(V, PtrStructTy);
V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x");
} else {
Ty = llvm::PointerType::get(Ty, 0);
Ty = VMContext.getPointerType(Ty, 0);
V = Builder.CreateBitCast(V, Ty);
}
return V;
@ -507,10 +509,11 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
// block literal struct.
uint64_t BlockLiteralSize =
TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8;
DescriptorFields[1] = llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize);
DescriptorFields[1] =
VMContext.getConstantInt(UnsignedLongTy,BlockLiteralSize);
llvm::Constant *DescriptorStruct =
llvm::ConstantStruct::get(&DescriptorFields[0], 2);
VMContext.getConstantStruct(&DescriptorFields[0], 2);
llvm::GlobalVariable *Descriptor =
new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
@ -539,7 +542,7 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
// Flags
LiteralFields[1] =
llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR);
VMContext.getConstantInt(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR);
// Reserved
LiteralFields[2] = getModule().getContext().getNullValue(IntTy);
@ -551,7 +554,7 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
LiteralFields[4] = Descriptor;
llvm::Constant *BlockLiteralStruct =
llvm::ConstantStruct::get(&LiteralFields[0], 5);
VMContext.getConstantStruct(&LiteralFields[0], 5);
llvm::GlobalVariable *BlockLiteral =
new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true,
@ -685,7 +688,7 @@ uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
uint64_t Pad = BlockOffset - OldOffset;
if (Pad) {
llvm::ArrayType::get(llvm::Type::Int8Ty, Pad);
VMContext.getArrayType(llvm::Type::Int8Ty, Pad);
QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
llvm::APInt(32, Pad),
ArrayType::Normal, 0);
@ -749,13 +752,13 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
if (NoteForHelperp) {
std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
PtrPtrT = VMContext.getPointerType(VMContext.getPointerType(T, 0), 0);
SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
SrcObj = Builder.CreateLoad(SrcObj);
llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
llvm::Type *PtrPtrT;
PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
PtrPtrT = VMContext.getPointerType(VMContext.getPointerType(T, 0), 0);
DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
DstObj = Builder.CreateLoad(DstObj);
@ -768,13 +771,13 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
llvm::Value *Srcv = SrcObj;
Srcv = Builder.CreateStructGEP(Srcv, index);
Srcv = Builder.CreateBitCast(Srcv,
llvm::PointerType::get(PtrToInt8Ty, 0));
VMContext.getPointerType(PtrToInt8Ty, 0));
Srcv = Builder.CreateLoad(Srcv);
llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
llvm::Value *N = VMContext.getConstantInt(llvm::Type::Int32Ty, flag);
llvm::Value *F = getBlockObjectAssign();
Builder.CreateCall3(F, Dstv, Srcv, N);
}
@ -783,7 +786,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
CGF.FinishFunction();
return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
return VMContext.getConstantExprBitCast(Fn, PtrToInt8Ty);
}
llvm::Constant *BlockFunction::
@ -829,7 +832,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
llvm::Type *PtrPtrT;
PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
PtrPtrT = VMContext.getPointerType(VMContext.getPointerType(T, 0), 0);
SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
SrcObj = Builder.CreateLoad(SrcObj);
@ -842,7 +845,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
llvm::Value *Srcv = SrcObj;
Srcv = Builder.CreateStructGEP(Srcv, index);
Srcv = Builder.CreateBitCast(Srcv,
llvm::PointerType::get(PtrToInt8Ty, 0));
VMContext.getPointerType(PtrToInt8Ty, 0));
Srcv = Builder.CreateLoad(Srcv);
BuildBlockRelease(Srcv, flag);
@ -852,7 +855,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
CGF.FinishFunction();
return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
return VMContext.getConstantExprBitCast(Fn, PtrToInt8Ty);
}
llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
@ -910,7 +913,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
// dst->x
llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
V = Builder.CreateBitCast(V, VMContext.getPointerType(T, 0));
V = Builder.CreateLoad(V);
V = Builder.CreateStructGEP(V, 6, "x");
llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
@ -920,18 +923,18 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
V = Builder.CreateLoad(V);
V = Builder.CreateBitCast(V, T);
V = Builder.CreateStructGEP(V, 6, "x");
V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
V = Builder.CreateBitCast(V, VMContext.getPointerType(PtrToInt8Ty, 0));
llvm::Value *SrcObj = Builder.CreateLoad(V);
flag |= BLOCK_BYREF_CALLER;
llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
llvm::Value *N = VMContext.getConstantInt(llvm::Type::Int32Ty, flag);
llvm::Value *F = getBlockObjectAssign();
Builder.CreateCall3(F, DstObj, SrcObj, N);
CGF.FinishFunction();
return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
return VMContext.getConstantExprBitCast(Fn, PtrToInt8Ty);
}
llvm::Constant *
@ -972,17 +975,17 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
V = Builder.CreateBitCast(V, VMContext.getPointerType(T, 0));
V = Builder.CreateLoad(V);
V = Builder.CreateStructGEP(V, 6, "x");
V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
V = Builder.CreateBitCast(V, VMContext.getPointerType(PtrToInt8Ty, 0));
V = Builder.CreateLoad(V);
flag |= BLOCK_BYREF_CALLER;
BuildBlockRelease(V, flag);
CGF.FinishFunction();
return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
return VMContext.getConstantExprBitCast(Fn, PtrToInt8Ty);
}
llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
@ -1025,7 +1028,7 @@ llvm::Value *BlockFunction::getBlockObjectDispose() {
const llvm::Type *ResultType = llvm::Type::VoidTy;
ArgTys.push_back(PtrToInt8Ty);
ArgTys.push_back(llvm::Type::Int32Ty);
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
FTy = VMContext.getFunctionType(ResultType, ArgTys, false);
CGM.BlockObjectDispose
= CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
}
@ -1040,7 +1043,7 @@ llvm::Value *BlockFunction::getBlockObjectAssign() {
ArgTys.push_back(PtrToInt8Ty);
ArgTys.push_back(PtrToInt8Ty);
ArgTys.push_back(llvm::Type::Int32Ty);
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
FTy = VMContext.getFunctionType(ResultType, ArgTys, false);
CGM.BlockObjectAssign
= CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
}
@ -1051,7 +1054,7 @@ void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
llvm::Value *F = getBlockObjectDispose();
llvm::Value *N;
V = Builder.CreateBitCast(V, PtrToInt8Ty);
N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
N = VMContext.getConstantInt(llvm::Type::Int32Ty, flag);
Builder.CreateCall2(F, V, N);
}
@ -1059,8 +1062,8 @@ ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
CGBuilderTy &B)
: CGM(cgm), CGF(cgf), Builder(B) {
PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
: CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) {
PtrToInt8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
BlockHasCopyDispose = false;
}

Просмотреть файл

@ -16,6 +16,7 @@
#include "CodeGenTypes.h"
#include "clang/AST/Type.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "clang/Basic/TargetInfo.h"
@ -38,6 +39,7 @@ namespace llvm {
class TargetData;
class FunctionType;
class Value;
class LLVMContext;
}
namespace clang {
@ -63,6 +65,7 @@ class BlockModule : public BlockBase {
const llvm::TargetData &TheTargetData;
CodeGenTypes &Types;
CodeGenModule &CGM;
llvm::LLVMContext &VMContext;
ASTContext &getContext() const { return Context; }
llvm::Module &getModule() const { return TheModule; }
@ -104,7 +107,7 @@ public:
BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
CodeGenTypes &T, CodeGenModule &CodeGen)
: Context(C), TheModule(M), TheTargetData(TD), Types(T),
CGM(CodeGen),
CGM(CodeGen), VMContext(M.getContext()),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0),
GenericBlockLiteralType(0), GenericExtendedBlockLiteralType(0),
BlockObjectAssign(0), BlockObjectDispose(0) {
@ -118,6 +121,9 @@ class BlockFunction : public BlockBase {
CodeGenFunction &CGF;
ASTContext &getContext() const;
protected:
llvm::LLVMContext &VMContext;
public:
const llvm::Type *PtrToInt8Ty;
struct HelperInfo {

Просмотреть файл

@ -63,9 +63,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Expr::EvalResult Result;
if (E->Evaluate(Result, CGM.getContext())) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(Result.Val.getInt()));
return RValue::get(VMContext.getConstantInt(Result.Val.getInt()));
else if (Result.Val.isFloat())
return RValue::get(llvm::ConstantFP::get(Result.Val.getFloat()));
return RValue::get(VMContext.getConstantFP(Result.Val.getFloat()));
}
switch (BuiltinID) {
@ -77,7 +77,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_va_end: {
Value *ArgValue = EmitVAListRef(E->getArg(0));
const llvm::Type *DestType =
llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
if (ArgValue->getType() != DestType)
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
ArgValue->getNameStart());
@ -91,7 +91,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *SrcPtr = EmitVAListRef(E->getArg(1));
const llvm::Type *Type =
llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
@ -104,7 +104,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
Value *CmpResult =
Builder.CreateICmpSGE(ArgValue,
getLLVMContext().getNullValue(ArgValue->getType()),
VMContext.getNullValue(ArgValue->getType()),
"abscond");
Value *Result =
Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
@ -150,8 +150,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
ConstantInt::get(ArgType, 1), "tmp");
Value *Zero = getLLVMContext().getNullValue(ArgType);
VMContext.getConstantInt(ArgType, 1), "tmp");
Value *Zero = VMContext.getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
if (Result->getType() != ResultType)
@ -169,7 +169,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
Value *Result = Builder.CreateAnd(Tmp, ConstantInt::get(ArgType, 1),
Value *Result = Builder.CreateAnd(Tmp, VMContext.getConstantInt(ArgType, 1),
"tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, "cast");
@ -206,15 +206,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
const llvm::Type *ResType = ConvertType(E->getType());
// bool UseSubObject = TypeArg.getZExtValue() & 1;
bool UseMinimum = TypeArg.getZExtValue() & 2;
return RValue::get(ConstantInt::get(ResType, UseMinimum ? 0 : -1LL));
return RValue::get(
VMContext.getConstantInt(ResType, UseMinimum ? 0 : -1LL));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
ConstantInt::get(llvm::Type::Int32Ty, 0);
VMContext.getConstantInt(llvm::Type::Int32Ty, 0);
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
ConstantInt::get(llvm::Type::Int32Ty, 3);
VMContext.getConstantInt(llvm::Type::Int32Ty, 3);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
}
@ -279,9 +280,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_bzero: {
Value *Address = EmitScalarExpr(E->getArg(0));
Builder.CreateCall4(CGM.getMemSetFn(), Address,
llvm::ConstantInt::get(llvm::Type::Int8Ty, 0),
VMContext.getConstantInt(llvm::Type::Int8Ty, 0),
EmitScalarExpr(E->getArg(1)),
llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
VMContext.getConstantInt(llvm::Type::Int32Ty, 1));
return RValue::get(Address);
}
case Builtin::BI__builtin_memcpy: {
@ -289,7 +290,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateCall4(CGM.getMemCpyFn(), Address,
EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2)),
llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
VMContext.getConstantInt(llvm::Type::Int32Ty, 1));
return RValue::get(Address);
}
case Builtin::BI__builtin_memmove: {
@ -297,7 +298,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateCall4(CGM.getMemMoveFn(), Address,
EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2)),
llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
VMContext.getConstantInt(llvm::Type::Int32Ty, 1));
return RValue::get(Address);
}
case Builtin::BI__builtin_memset: {
@ -306,7 +307,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
llvm::Type::Int8Ty),
EmitScalarExpr(E->getArg(2)),
llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
VMContext.getConstantInt(llvm::Type::Int32Ty, 1));
return RValue::get(Address);
}
case Builtin::BI__builtin_return_address: {
@ -480,7 +481,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
{
const llvm::Type *ResType[2];
ResType[0]= ConvertType(E->getArg(1)->getType());
ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
ResType[1] = VMContext.getPointerTypeUnqual(ResType[0]);
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
Value *OldVal = EmitScalarExpr(E->getArg(1));
Value *PrevVal = Builder.CreateCall3(AtomF,
@ -506,14 +507,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Ptr = EmitScalarExpr(E->getArg(0));
const llvm::Type *ElTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
Builder.CreateStore(getLLVMContext().getNullValue(ElTy), Ptr, true);
Builder.CreateStore(VMContext.getNullValue(ElTy), Ptr, true);
return RValue::get(0);
}
case Builtin::BI__sync_synchronize: {
Value *C[5];
C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::Int1Ty, 1);
C[4] = ConstantInt::get(llvm::Type::Int1Ty, 0);
C[0] = C[1] = C[2] = C[3] = VMContext.getConstantInt(llvm::Type::Int1Ty, 1);
C[4] = VMContext.getConstantInt(llvm::Type::Int1Ty, 0);
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
return RValue::get(0);
}
@ -603,7 +604,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Unknown builtin, for now just dump it out and return undef.
if (hasAggregateLLVMType(E->getType()))
return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
return RValue::get(UndefValue::get(ConvertType(E->getType())));
return RValue::get(VMContext.getUndef(ConvertType(E->getType())));
}
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
@ -635,9 +636,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrlqi128:
case X86::BI__builtin_ia32_psrlwi128: {
Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext");
const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 2);
llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
const llvm::Type *Ty = VMContext.getVectorType(llvm::Type::Int64Ty, 2);
llvm::Value *Zero = VMContext.getConstantInt(llvm::Type::Int32Ty, 0);
Ops[1] = Builder.CreateInsertElement(VMContext.getUndef(Ty),
Ops[1], Zero, "insert");
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
const char *name = 0;
@ -690,7 +691,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrlqi:
case X86::BI__builtin_ia32_psrlwi: {
Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext");
const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1);
const llvm::Type *Ty = VMContext.getVectorType(llvm::Type::Int64Ty, 1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@ -742,16 +743,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1);
llvm::Type *PtrTy = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
Value *One = VMContext.getConstantInt(llvm::Type::Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp");
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1);
llvm::Type *PtrTy = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
Value *One = VMContext.getConstantInt(llvm::Type::Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp");
One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
@ -768,15 +769,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
const llvm::Type *EltTy = llvm::Type::Int64Ty;
llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
llvm::Type *PtrTy = VMContext.getPointerTypeUnqual(EltTy);
llvm::Type *VecTy = VMContext.getVectorType(EltTy, 2);
// cast val v2i64
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
// extract (0, 1)
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index);
llvm::Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, Index);
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
// cast pointer to i64 & store

Просмотреть файл

@ -40,16 +40,16 @@ CodeGenFunction::GenerateStaticCXXBlockVarDeclInit(const VarDecl &D,
llvm::GlobalValue *GuardV =
new llvm::GlobalVariable(CGM.getModule(), llvm::Type::Int64Ty, false,
GV->getLinkage(),
getLLVMContext().getNullValue(llvm::Type::Int64Ty),
VMContext.getNullValue(llvm::Type::Int64Ty),
GuardVName.c_str());
// Load the first byte of the guard variable.
const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::Int8Ty, 0);
const llvm::Type *PtrTy = VMContext.getPointerType(llvm::Type::Int8Ty, 0);
llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy),
"tmp");
// Compare it against 0.
llvm::Value *nullValue = getLLVMContext().getNullValue(llvm::Type::Int8Ty);
llvm::Value *nullValue = VMContext.getNullValue(llvm::Type::Int8Ty);
llvm::Value *ICmp = Builder.CreateICmpEQ(V, nullValue , "tobool");
llvm::BasicBlock *InitBlock = createBasicBlock("init");
@ -70,7 +70,7 @@ CodeGenFunction::GenerateStaticCXXBlockVarDeclInit(const VarDecl &D,
EmitAggExpr(Init, GV, D.getType().isVolatileQualified());
}
Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::Int8Ty, 1),
Builder.CreateStore(VMContext.getConstantInt(llvm::Type::Int8Ty, 1),
Builder.CreateBitCast(GuardV, PtrTy));
EmitBlock(EndBlock);
@ -191,7 +191,7 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
if (E->isArray()) {
ErrorUnsupported(E, "new[] expression");
return llvm::UndefValue::get(ConvertType(E->getType()));
return VMContext.getUndef(ConvertType(E->getType()));
}
QualType AllocType = E->getAllocatedType();
@ -203,7 +203,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// The allocation size is the first argument.
QualType SizeTy = getContext().getSizeType();
llvm::Value *AllocSize =
llvm::ConstantInt::get(ConvertType(SizeTy),
VMContext.getConstantInt(ConvertType(SizeTy),
getContext().getTypeSize(AllocType) / 8);
NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
@ -267,7 +267,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *IsNull =
Builder.CreateICmpEQ(NewPtr,
getLLVMContext().getNullValue(NewPtr->getType()),
VMContext.getNullValue(NewPtr->getType()),
"isnull");
Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
@ -308,7 +308,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
PHI->reserveOperandSpace(2);
PHI->addIncoming(NewPtr, NewNotNull);
PHI->addIncoming(getLLVMContext().getNullValue(NewPtr->getType()), NewNull);
PHI->addIncoming(VMContext.getNullValue(NewPtr->getType()), NewNull);
NewPtr = PHI;
}

Просмотреть файл

@ -118,7 +118,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
FunctionInfos.InsertNode(FI, InsertPos);
// Compute ABI information.
getABIInfo().computeInfo(*FI, getContext());
getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
return *FI;
}

Просмотреть файл

@ -106,7 +106,7 @@ CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
return new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
getLLVMContext().getNullValue(LTy), Name,
VMContext.getNullValue(LTy), Name,
0, D.isThreadSpecified(),
Ty.getAddressSpace());
}
@ -161,7 +161,7 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
// Replace all uses of the old global with the new global
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
VMContext.getConstantExprBitCast(GV, OldGV->getType());
OldGV->replaceAllUsesWith(NewPtrForOldDecl);
// Erase the old global, since it is no longer used.
@ -194,8 +194,8 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
// RAUW's the GV uses of this constant will be invalid.
const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
const llvm::Type *LPtrTy =
llvm::PointerType::get(LTy, D.getType().getAddressSpace());
DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
VMContext.getPointerType(LTy, D.getType().getAddressSpace());
DMEntry = VMContext.getConstantExprBitCast(GV, LPtrTy);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
@ -225,7 +225,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty,
bool needsCopyDispose = BlockRequiresCopying(Ty);
std::vector<const llvm::Type *> Types(needsCopyDispose*2+5);
const llvm::PointerType *PtrToInt8Ty
= llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
= VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
Types[0] = PtrToInt8Ty;
Types[1] = PtrToInt8Ty;
Types[2] = llvm::Type::Int32Ty;
@ -238,7 +238,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty,
assert((Align <= unsigned(Target.getPointerAlign(0))/8)
&& "Can't align more than pointer yet");
Types[needsCopyDispose*2 + 4] = LTy;
return llvm::StructType::get(Types, false);
return VMContext.getStructType(Types, false);
}
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
@ -279,7 +279,8 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
} else {
if (!DidCallStackSave) {
// Save the stack.
const llvm::Type *LTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *LTy =
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
@ -302,7 +303,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
// Get the element type.
const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
const llvm::Type *LElemPtrTy =
llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
VMContext.getPointerType(LElemTy, D.getType().getAddressSpace());
llvm::Value *VLASize = EmitVLASize(Ty);
@ -358,7 +359,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
if (isByRef) {
const llvm::PointerType *PtrToInt8Ty
= llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
= VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
@ -385,19 +386,19 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
int isa = 0;
if (flag&BLOCK_FIELD_IS_WEAK)
isa = 1;
V = llvm::ConstantInt::get(llvm::Type::Int32Ty, isa);
V = VMContext.getConstantInt(llvm::Type::Int32Ty, isa);
V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
Builder.CreateStore(V, isa_field);
V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding");
Builder.CreateStore(V, forwarding_field);
V = llvm::ConstantInt::get(llvm::Type::Int32Ty, flags);
V = VMContext.getConstantInt(llvm::Type::Int32Ty, flags);
Builder.CreateStore(V, flags_field);
const llvm::Type *V1;
V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
V = llvm::ConstantInt::get(llvm::Type::Int32Ty,
V = VMContext.getConstantInt(llvm::Type::Int32Ty,
(CGM.getTargetData().getTypeStoreSizeInBits(V1)
/ 8));
Builder.CreateStore(V, size_field);

Просмотреть файл

@ -31,7 +31,7 @@ llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
const char *Name) {
if (!Builder.isNamePreserving())
Name = "";
return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
return new llvm::AllocaInst(VMContext, Ty, 0, Name, AllocaInsertPt);
}
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
@ -121,13 +121,13 @@ RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
return RValue::get(0);
} else if (const ComplexType *CTy = Ty->getAsComplexType()) {
const llvm::Type *EltTy = ConvertType(CTy->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
llvm::Value *U = VMContext.getUndef(EltTy);
return RValue::getComplex(std::make_pair(U, U));
} else if (hasAggregateLLVMType(Ty)) {
const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty));
return RValue::getAggregate(llvm::UndefValue::get(LTy));
const llvm::Type *LTy = VMContext.getPointerTypeUnqual(ConvertType(Ty));
return RValue::getAggregate(VMContext.getUndef(LTy));
} else {
return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
return RValue::get(VMContext.getUndef(ConvertType(Ty)));
}
}
@ -140,8 +140,8 @@ RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
return LValue::MakeAddr(llvm::UndefValue::get(Ty),
llvm::Type *Ty = VMContext.getPointerTypeUnqual(ConvertType(E->getType()));
return LValue::MakeAddr(VMContext.getUndef(Ty),
E->getType().getCVRQualifiers(),
getContext().getObjCGCAttrKind(E->getType()));
}
@ -253,7 +253,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
if (DstPtr->getElementType() != SrcTy) {
const llvm::Type *MemTy =
llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
VMContext.getPointerType(SrcTy, DstPtr->getAddressSpace());
Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
}
}
@ -328,19 +328,19 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
// Shift to proper location.
if (StartBit)
Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit),
Val = Builder.CreateLShr(Val, VMContext.getConstantInt(EltTy, StartBit),
"bf.lo");
// Mask off unused bits.
llvm::Constant *LowMask =
llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, LowBits));
VMContext.getConstantInt(llvm::APInt::getLowBitsSet(EltTySize, LowBits));
Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared");
// Fetch the high bits if necessary.
if (LowBits < BitfieldSize) {
unsigned HighBits = BitfieldSize - LowBits;
llvm::Value *HighPtr =
Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1),
Builder.CreateGEP(Ptr, VMContext.getConstantInt(llvm::Type::Int32Ty, 1),
"bf.ptr.hi");
llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
LV.isVolatileQualified(),
@ -348,18 +348,18 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
// Mask off unused bits.
llvm::Constant *HighMask =
llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, HighBits));
VMContext.getConstantInt(llvm::APInt::getLowBitsSet(EltTySize, HighBits));
HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared");
// Shift to proper location and or in to bitfield value.
HighVal = Builder.CreateShl(HighVal,
llvm::ConstantInt::get(EltTy, LowBits));
VMContext.getConstantInt(EltTy, LowBits));
Val = Builder.CreateOr(Val, HighVal, "bf.val");
}
// Sign extend if necessary.
if (LV.isBitfieldSigned()) {
llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy,
llvm::Value *ExtraBits = VMContext.getConstantInt(EltTy,
EltTySize - BitfieldSize);
Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits),
ExtraBits, "bf.val.sext");
@ -396,7 +396,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
const VectorType *ExprVT = ExprType->getAsVectorType();
if (!ExprVT) {
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx);
llvm::Value *Elt = VMContext.getConstantInt(llvm::Type::Int32Ty, InIdx);
return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
}
@ -406,12 +406,12 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
llvm::SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumResultElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx));
Mask.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, InIdx));
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
llvm::Value *MaskV = VMContext.getConstantVector(&Mask[0], Mask.size());
Vec = Builder.CreateShuffleVector(Vec,
llvm::UndefValue::get(Vec->getType()),
VMContext.getUndef(Vec->getType()),
MaskV, "tmp");
return RValue::get(Vec);
}
@ -501,7 +501,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
llvm::Value *SrcVal = Src.getScalarVal();
llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp");
llvm::Constant *Mask =
llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize));
VMContext.getConstantInt(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize));
NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value");
// Return the new value of the bit-field, if requested.
@ -514,7 +514,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Sign extend if necessary.
if (Dst.isBitfieldSigned()) {
unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy);
llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy,
llvm::Value *ExtraBits = VMContext.getConstantInt(SrcTy,
SrcTySize - BitfieldSize);
SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits),
ExtraBits, "bf.reload.sext");
@ -532,14 +532,14 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Compute the mask for zero-ing the low part of this bitfield.
llvm::Constant *InvMask =
llvm::ConstantInt::get(~llvm::APInt::getBitsSet(EltTySize, StartBit,
VMContext.getConstantInt(~llvm::APInt::getBitsSet(EltTySize, StartBit,
StartBit + LowBits));
// Compute the new low part as
// LowVal = (LowVal & InvMask) | (NewVal << StartBit),
// with the shift of NewVal implicitly stripping the high bits.
llvm::Value *NewLowVal =
Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit),
Builder.CreateShl(NewVal, VMContext.getConstantInt(EltTy, StartBit),
"bf.value.lo");
LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared");
LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo");
@ -551,7 +551,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
if (LowBits < BitfieldSize) {
unsigned HighBits = BitfieldSize - LowBits;
llvm::Value *HighPtr =
Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1),
Builder.CreateGEP(Ptr, VMContext.getConstantInt(llvm::Type::Int32Ty, 1),
"bf.ptr.hi");
llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
Dst.isVolatileQualified(),
@ -559,14 +559,15 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Compute the mask for zero-ing the high part of this bitfield.
llvm::Constant *InvMask =
llvm::ConstantInt::get(~llvm::APInt::getLowBitsSet(EltTySize, HighBits));
VMContext.getConstantInt(~llvm::APInt::getLowBitsSet(EltTySize,
HighBits));
// Compute the new high part as
// HighVal = (HighVal & InvMask) | (NewVal lshr LowBits),
// where the high bits of NewVal have already been cleared and the
// shift stripping the low bits.
llvm::Value *NewHighVal =
Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits),
Builder.CreateLShr(NewVal, VMContext.getConstantInt(EltTy, LowBits),
"bf.value.high");
HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared");
HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi");
@ -610,12 +611,12 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
Mask[InIdx] = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
Mask[InIdx] = VMContext.getConstantInt(llvm::Type::Int32Ty, i);
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
llvm::Value *MaskV = VMContext.getConstantVector(&Mask[0], Mask.size());
Vec = Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(Vec->getType()),
VMContext.getUndef(Vec->getType()),
MaskV, "tmp");
}
else if (NumDstElts > NumSrcElts) {
@ -626,26 +627,26 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::SmallVector<llvm::Constant*, 4> ExtMask;
unsigned i;
for (i = 0; i != NumSrcElts; ++i)
ExtMask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i));
ExtMask.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, i));
for (; i != NumDstElts; ++i)
ExtMask.push_back(llvm::UndefValue::get(llvm::Type::Int32Ty));
llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
ExtMask.push_back(VMContext.getUndef(llvm::Type::Int32Ty));
llvm::Value *ExtMaskV = VMContext.getConstantVector(&ExtMask[0],
ExtMask.size());
llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(SrcVal->getType()),
VMContext.getUndef(SrcVal->getType()),
ExtMaskV, "tmp");
// build identity
llvm::SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i) {
Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i));
Mask.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, i));
}
// modify when what gets shuffled in
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned Idx = getAccessedFieldNo(i, Elts);
Mask[Idx] =llvm::ConstantInt::get(llvm::Type::Int32Ty, i+NumDstElts);
Mask[Idx] = VMContext.getConstantInt(llvm::Type::Int32Ty, i+NumDstElts);
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
llvm::Value *MaskV = VMContext.getConstantVector(&Mask[0], Mask.size());
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
}
else {
@ -655,7 +656,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
} else {
// If the Src is a scalar (not a vector) it must be updating one element.
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx);
llvm::Value *Elt = VMContext.getConstantInt(llvm::Type::Int32Ty, InIdx);
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
}
@ -689,7 +690,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
bool needsCopyDispose = BlockRequiresCopying(VD->getType());
const llvm::Type *PtrStructTy = V->getType();
const llvm::Type *Ty = PtrStructTy;
Ty = llvm::PointerType::get(Ty, 0);
Ty = VMContext.getPointerType(Ty, 0);
V = Builder.CreateStructGEP(V, 1, "forwarding");
V = Builder.CreateBitCast(V, Ty);
V = Builder.CreateLoad(V, false);
@ -864,7 +865,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Extend or truncate the index type to 32 or 64-bits.
unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
if (IdxBitwidth != LLVMPointerWidth)
Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth),
Idx = Builder.CreateIntCast(Idx, VMContext.getIntegerType(LLVMPointerWidth),
IdxSigned, "idxprom");
// We know that the pointer points to a type of the correct size,
@ -880,18 +881,18 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8;
Idx = Builder.CreateUDiv(Idx,
llvm::ConstantInt::get(Idx->getType(),
VMContext.getConstantInt(Idx->getType(),
BaseTypeSize));
Address = Builder.CreateGEP(Base, Idx, "arrayidx");
} else if (const ObjCInterfaceType *OIT =
dyn_cast<ObjCInterfaceType>(E->getType())) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
VMContext.getConstantInt(Idx->getType(),
getContext().getTypeSize(OIT) / 8);
Idx = Builder.CreateMul(Idx, InterfaceSize);
llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
llvm::Type *i8PTy = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
Idx, "arrayidx");
Address = Builder.CreateBitCast(Address, Base->getType());
@ -913,13 +914,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
}
static
llvm::Constant *GenerateConstantVector(llvm::SmallVector<unsigned, 4> &Elts) {
llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
llvm::SmallVector<unsigned, 4> &Elts) {
llvm::SmallVector<llvm::Constant *, 4> CElts;
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, Elts[i]));
CElts.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, Elts[i]));
return llvm::ConstantVector::get(&CElts[0], CElts.size());
return VMContext.getConstantVector(&CElts[0], CElts.size());
}
LValue CodeGenFunction::
@ -942,7 +944,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
llvm::Constant *CV = GenerateConstantVector(Indices);
llvm::Constant *CV = GenerateConstantVector(VMContext, Indices);
return LValue::MakeExtVectorElt(Base.getAddress(), CV,
Base.getQualifiers());
}
@ -953,11 +955,11 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
if (isa<llvm::ConstantAggregateZero>(BaseElts))
CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
CElts.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, 0));
else
CElts.push_back(BaseElts->getOperand(Indices[i]));
}
llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
llvm::Constant *CV = VMContext.getConstantVector(&CElts[0], CElts.size());
return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
Base.getQualifiers());
}
@ -1020,10 +1022,10 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
cast<llvm::PointerType>(BaseValue->getType());
unsigned AS = BaseTy->getAddressSpace();
BaseValue = Builder.CreateBitCast(BaseValue,
llvm::PointerType::get(FieldTy, AS),
VMContext.getPointerType(FieldTy, AS),
"tmp");
llvm::Value *V = Builder.CreateGEP(BaseValue,
llvm::ConstantInt::get(llvm::Type::Int32Ty, idx),
VMContext.getConstantInt(llvm::Type::Int32Ty, idx),
"tmp");
CodeGenTypes::BitFieldInfo bitFieldInfo =
@ -1052,7 +1054,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
cast<llvm::PointerType>(BaseValue->getType());
unsigned AS = BaseTy->getAddressSpace();
V = Builder.CreateBitCast(V,
llvm::PointerType::get(FieldTy, AS),
VMContext.getPointerType(FieldTy, AS),
"tmp");
}
if (Field->getType()->isReferenceType())

Просмотреть файл

@ -542,7 +542,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// equal, but other compilers do this optimization, and almost every memcpy
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *BP = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
if (SrcPtr->getType() != BP)
@ -552,7 +552,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
// FIXME: Handle variable sized types.
const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
const llvm::Type *IntPtr = VMContext.getIntegerType(LLVMPointerWidth);
// FIXME: If we have a volatile struct, the optimizer can remove what might
// appear to be `extra' memory ops:
@ -569,7 +569,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
Builder.CreateCall4(CGM.getMemCpyFn(),
DestPtr, SrcPtr,
// TypeInfo.first describes size in bits.
llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
llvm::ConstantInt::get(llvm::Type::Int32Ty,
VMContext.getConstantInt(IntPtr, TypeInfo.first/8),
VMContext.getConstantInt(llvm::Type::Int32Ty,
TypeInfo.second/8));
}

Просмотреть файл

@ -367,6 +367,8 @@ ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
bool isInc, bool isPre) {
llvm::LLVMContext &VMContext = CGF.getLLVMContext();
LValue LV = CGF.EmitLValue(E->getSubExpr());
ComplexPairTy InVal = EmitLoadOfComplex(LV.getAddress(),
LV.isVolatileQualified());
@ -374,7 +376,7 @@ ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
llvm::Value *NextVal;
if (isa<llvm::IntegerType>(InVal.first->getType())) {
uint64_t AmountVal = isInc ? 1 : -1;
NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
NextVal = VMContext.getConstantInt(InVal.first->getType(), AmountVal, true);
// Add the inc/dec to the real part.
NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
@ -384,7 +386,7 @@ ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
llvm::APFloat FVal(CGF.getContext().getFloatTypeSemantics(ElemTy), 1);
if (!isInc)
FVal.changeSign();
NextVal = llvm::ConstantFP::get(FVal);
NextVal = VMContext.getConstantFP(FVal);
// Add the inc/dec to the real part.
NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");

Просмотреть файл

@ -31,9 +31,10 @@ class VISIBILITY_HIDDEN ConstExprEmitter :
public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
CodeGenModule &CGM;
CodeGenFunction *CGF;
llvm::LLVMContext &VMContext;
public:
ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
: CGM(cgm), CGF(cgf) {
: CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
}
//===--------------------------------------------------------------------===//
@ -106,18 +107,18 @@ public:
// Initialize remaining array elements.
// FIXME: This doesn't handle member pointers correctly!
for (; i < NumElements; ++i)
Elts.push_back(CGM.getLLVMContext().getNullValue(ElemTy));
Elts.push_back(VMContext.getNullValue(ElemTy));
if (RewriteType) {
// FIXME: Try to avoid packing the array
std::vector<const llvm::Type*> Types;
for (unsigned i = 0; i < Elts.size(); ++i)
Types.push_back(Elts[i]->getType());
const llvm::StructType *SType = llvm::StructType::get(Types, true);
return llvm::ConstantStruct::get(SType, Elts);
const llvm::StructType *SType = VMContext.getStructType(Types, true);
return VMContext.getConstantStruct(SType, Elts);
}
return llvm::ConstantArray::get(AType, Elts);
return VMContext.getConstantArray(AType, Elts);
}
void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts,
@ -169,8 +170,9 @@ public:
unsigned curBits = std::min(8 - (fieldOffset & 7), bitsToInsert);
unsigned byte = V.getLoBits(curBits).getZExtValue() << (fieldOffset & 7);
do {
llvm::Constant* byteC = llvm::ConstantInt::get(llvm::Type::Int8Ty, byte);
Elts[i] = CGM.getLLVMContext().getConstantExprOr(Elts[i], byteC);
llvm::Constant* byteC =
VMContext.getConstantInt(llvm::Type::Int8Ty, byte);
Elts[i] = VMContext.getConstantExprOr(Elts[i], byteC);
++i;
V = V.lshr(curBits);
bitsToInsert -= curBits;
@ -193,7 +195,7 @@ public:
// FIXME: This doesn't handle member pointers correctly!
for (unsigned i = 0; i < SType->getNumElements(); ++i) {
const llvm::Type *FieldTy = SType->getElementType(i);
Elts.push_back(CGM.getLLVMContext().getNullValue(FieldTy));
Elts.push_back(VMContext.getNullValue(FieldTy));
}
// Copy initializer elements. Skip padding fields.
@ -223,10 +225,10 @@ public:
std::vector<const llvm::Type*> Types;
for (unsigned i = 0; i < Elts.size(); ++i)
Types.push_back(Elts[i]->getType());
SType = llvm::StructType::get(Types, true);
SType = VMContext.getStructType(Types, true);
}
return llvm::ConstantStruct::get(SType, Elts);
return VMContext.getConstantStruct(SType, Elts);
}
llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) {
@ -242,15 +244,15 @@ public:
unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
while (CurSize < TotalSize) {
Elts.push_back(CGM.getLLVMContext().getNullValue(llvm::Type::Int8Ty));
Elts.push_back(VMContext.getNullValue(llvm::Type::Int8Ty));
Types.push_back(llvm::Type::Int8Ty);
CurSize++;
}
// This always generates a packed struct
// FIXME: Try to generate an unpacked struct when we can
llvm::StructType* STy = llvm::StructType::get(Types, true);
return llvm::ConstantStruct::get(STy, Elts);
llvm::StructType* STy = VMContext.getStructType(Types, true);
return VMContext.getConstantStruct(STy, Elts);
}
llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
@ -268,20 +270,20 @@ public:
Field != FieldEnd; ++Field)
assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
return CGM.getLLVMContext().getNullValue(Ty);
return VMContext.getNullValue(Ty);
}
if (curField->isBitField()) {
// Create a dummy struct for bit-field insertion
unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty);
llvm::Constant* NV =
CGM.getLLVMContext().getNullValue(llvm::Type::Int8Ty);
VMContext.getNullValue(llvm::Type::Int8Ty);
std::vector<llvm::Constant*> Elts(NumElts, NV);
InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0));
const llvm::ArrayType *RetTy =
llvm::ArrayType::get(NV->getType(), NumElts);
return llvm::ConstantArray::get(RetTy, Elts);
VMContext.getArrayType(NV->getType(), NumElts);
return VMContext.getConstantArray(RetTy, Elts);
}
llvm::Constant *InitElem;
@ -315,9 +317,9 @@ public:
}
for (; i < NumElements; ++i)
Elts.push_back(CGM.getLLVMContext().getNullValue(ElemTy));
Elts.push_back(VMContext.getNullValue(ElemTy));
return llvm::ConstantVector::get(VType, Elts);
return VMContext.getConstantVector(VType, Elts);
}
llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
@ -358,7 +360,7 @@ public:
// This must be a string initializing an array in a static initializer.
// Don't emit it as the address of the string, emit the string data itself
// as an inline array.
return llvm::ConstantArray::get(CGM.GetStringForStringLiteral(E), false);
return VMContext.getConstantArray(CGM.GetStringForStringLiteral(E), false);
}
llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
@ -372,7 +374,7 @@ public:
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
Str.resize(CAT->getSize().getZExtValue(), '\0');
return llvm::ConstantArray::get(Str, false);
return VMContext.getConstantArray(Str, false);
}
llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
@ -426,7 +428,7 @@ public:
case Expr::ObjCStringLiteralClass: {
ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL);
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
return VMContext.getConstantExprBitCast(C, ConvertType(E->getType()));
}
case Expr::PredefinedExprClass: {
// __func__/__FUNCTION__ -> "". __PRETTY_FUNCTION__ -> "top level".
@ -440,8 +442,8 @@ public:
case Expr::AddrLabelExprClass: {
assert(CGF && "Invalid address of label expression outside function.");
unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id);
return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType()));
llvm::Constant *C = VMContext.getConstantInt(llvm::Type::Int32Ty, id);
return VMContext.getConstantExprIntToPtr(C, ConvertType(E->getType()));
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
@ -492,7 +494,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
case APValue::LValue: {
const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
llvm::Constant *Offset =
llvm::ConstantInt::get(llvm::Type::Int64Ty,
VMContext.getConstantInt(llvm::Type::Int64Ty,
Result.Val.getLValueOffset());
llvm::Constant *C;
@ -502,59 +504,59 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
// Apply offset if necessary.
if (!Offset->isNullValue()) {
const llvm::Type *Type =
llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
llvm::Constant *Casted = VMContext.getConstantExprBitCast(C, Type);
Casted = VMContext.getConstantExprGetElementPtr(Casted, &Offset, 1);
C = VMContext.getConstantExprBitCast(Casted, C->getType());
}
// Convert to the appropriate type; this could be an lvalue for
// an integer.
if (isa<llvm::PointerType>(DestTy))
return llvm::ConstantExpr::getBitCast(C, DestTy);
return VMContext.getConstantExprBitCast(C, DestTy);
return llvm::ConstantExpr::getPtrToInt(C, DestTy);
return VMContext.getConstantExprPtrToInt(C, DestTy);
} else {
C = Offset;
// Convert to the appropriate type; this could be an lvalue for
// an integer.
if (isa<llvm::PointerType>(DestTy))
return llvm::ConstantExpr::getIntToPtr(C, DestTy);
return VMContext.getConstantExprIntToPtr(C, DestTy);
// If the types don't match this should only be a truncate.
if (C->getType() != DestTy)
return llvm::ConstantExpr::getTrunc(C, DestTy);
return VMContext.getConstantExprTrunc(C, DestTy);
return C;
}
}
case APValue::Int: {
llvm::Constant *C = llvm::ConstantInt::get(Result.Val.getInt());
llvm::Constant *C = VMContext.getConstantInt(Result.Val.getInt());
if (C->getType() == llvm::Type::Int1Ty) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
C = VMContext.getConstantExprZExt(C, BoolTy);
}
return C;
}
case APValue::ComplexInt: {
llvm::Constant *Complex[2];
Complex[0] = llvm::ConstantInt::get(Result.Val.getComplexIntReal());
Complex[1] = llvm::ConstantInt::get(Result.Val.getComplexIntImag());
Complex[0] = VMContext.getConstantInt(Result.Val.getComplexIntReal());
Complex[1] = VMContext.getConstantInt(Result.Val.getComplexIntImag());
return llvm::ConstantStruct::get(Complex, 2);
return VMContext.getConstantStruct(Complex, 2);
}
case APValue::Float:
return llvm::ConstantFP::get(Result.Val.getFloat());
return VMContext.getConstantFP(Result.Val.getFloat());
case APValue::ComplexFloat: {
llvm::Constant *Complex[2];
Complex[0] = llvm::ConstantFP::get(Result.Val.getComplexFloatReal());
Complex[1] = llvm::ConstantFP::get(Result.Val.getComplexFloatImag());
Complex[0] = VMContext.getConstantFP(Result.Val.getComplexFloatReal());
Complex[1] = VMContext.getConstantFP(Result.Val.getComplexFloatImag());
return llvm::ConstantStruct::get(Complex, 2);
return VMContext.getConstantStruct(Complex, 2);
}
case APValue::Vector: {
llvm::SmallVector<llvm::Constant *, 4> Inits;
@ -563,11 +565,11 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
for (unsigned i = 0; i != NumElts; ++i) {
APValue &Elt = Result.Val.getVectorElt(i);
if (Elt.isInt())
Inits.push_back(llvm::ConstantInt::get(Elt.getInt()));
Inits.push_back(VMContext.getConstantInt(Elt.getInt()));
else
Inits.push_back(llvm::ConstantFP::get(Elt.getFloat()));
Inits.push_back(VMContext.getConstantFP(Elt.getFloat()));
}
return llvm::ConstantVector::get(&Inits[0], Inits.size());
return VMContext.getConstantVector(&Inits[0], Inits.size());
}
}
}
@ -575,7 +577,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
if (C && C->getType() == llvm::Type::Int1Ty) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
C = VMContext.getConstantExprZExt(C, BoolTy);
}
return C;
}

Просмотреть файл

@ -48,10 +48,12 @@ class VISIBILITY_HIDDEN ScalarExprEmitter
CodeGenFunction &CGF;
CGBuilderTy &Builder;
bool IgnoreResultAssign;
llvm::LLVMContext &VMContext;
public:
ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
: CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira) {
: CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
VMContext(cgf.getLLVMContext()) {
}
//===--------------------------------------------------------------------===//
@ -106,32 +108,32 @@ public:
// Leaves.
Value *VisitIntegerLiteral(const IntegerLiteral *E) {
return llvm::ConstantInt::get(E->getValue());
return VMContext.getConstantInt(E->getValue());
}
Value *VisitFloatingLiteral(const FloatingLiteral *E) {
return llvm::ConstantFP::get(E->getValue());
return VMContext.getConstantFP(E->getValue());
}
Value *VisitCharacterLiteral(const CharacterLiteral *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
return VMContext.getConstantInt(ConvertType(E->getType()), E->getValue());
}
Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
return VMContext.getConstantInt(ConvertType(E->getType()), E->getValue());
}
Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
return CGF.getLLVMContext().getNullValue(ConvertType(E->getType()));
return VMContext.getNullValue(ConvertType(E->getType()));
}
Value *VisitGNUNullExpr(const GNUNullExpr *E) {
return CGF.getLLVMContext().getNullValue(ConvertType(E->getType()));
return VMContext.getNullValue(ConvertType(E->getType()));
}
Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()),
return VMContext.getConstantInt(ConvertType(E->getType()),
CGF.getContext().typesAreCompatible(
E->getArgType1(), E->getArgType2()));
}
Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
llvm::Value *V =
llvm::ConstantInt::get(llvm::Type::Int32Ty,
VMContext.getConstantInt(llvm::Type::Int32Ty,
CGF.GetIDForAddrOfLabel(E->getLabel()));
return Builder.CreateIntToPtr(V, ConvertType(E->getType()));
@ -140,7 +142,7 @@ public:
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl()))
return llvm::ConstantInt::get(EC->getInitVal());
return VMContext.getConstantInt(EC->getInitVal());
return EmitLoadOfLValue(E);
}
Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
@ -197,20 +199,20 @@ public:
const llvm::Type *ElementType = VType->getElementType();
// Emit individual vector element stores.
llvm::Value *V = llvm::UndefValue::get(VType);
llvm::Value *V = VMContext.getUndef(VType);
// Emit initializers
unsigned i;
for (i = 0; i < NumInitElements; ++i) {
Value *NewV = Visit(E->getInit(i));
Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, i);
V = Builder.CreateInsertElement(V, NewV, Idx);
}
// Emit remaining default initializers
for (/* Do not initialize i*/; i < NumVectorElements; ++i) {
Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
llvm::Value *NewV = CGF.getLLVMContext().getNullValue(ElementType);
Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, i);
llvm::Value *NewV = VMContext.getNullValue(ElementType);
V = Builder.CreateInsertElement(V, NewV, Idx);
}
@ -218,7 +220,7 @@ public:
}
Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return CGF.getLLVMContext().getNullValue(ConvertType(E->getType()));
return VMContext.getNullValue(ConvertType(E->getType()));
}
Value *VisitImplicitCastExpr(const ImplicitCastExpr *E);
Value *VisitCastExpr(const CastExpr *E) {
@ -384,7 +386,7 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
if (SrcType->isRealFloatingType()) {
// Compare against 0.0 for fp scalars.
llvm::Value *Zero = CGF.getLLVMContext().getNullValue(Src->getType());
llvm::Value *Zero = VMContext.getNullValue(Src->getType());
return Builder.CreateFCmpUNE(Src, Zero, "tobool");
}
@ -407,7 +409,7 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
}
// Compare against an integer or pointer null.
llvm::Value *Zero = CGF.getLLVMContext().getNullValue(Src->getType());
llvm::Value *Zero = VMContext.getNullValue(Src->getType());
return Builder.CreateICmpNE(Src, Zero, "tobool");
}
@ -442,7 +444,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
// extension.
const llvm::Type *MiddleTy = llvm::IntegerType::get(CGF.LLVMPointerWidth);
const llvm::Type *MiddleTy = VMContext.getIntegerType(CGF.LLVMPointerWidth);
bool InputSigned = SrcType->isSignedIntegerType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@ -463,17 +465,17 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
llvm::Value *UnV = VMContext.getUndef(DstTy);
llvm::Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, 0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i < NumElements; i++)
Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
Args.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, 0));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
llvm::Constant *Mask = VMContext.getConstantVector(&Args[0], NumElements);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
@ -542,7 +544,7 @@ Value *ScalarExprEmitter::VisitExpr(Expr *E) {
CGF.ErrorUnsupported(E, "scalar expression");
if (E->getType()->isVoidType())
return 0;
return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
return VMContext.getUndef(CGF.ConvertType(E->getType()));
}
Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
@ -552,7 +554,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
}
Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
Value* SV = VMContext.getConstantVector(indices.begin(), indices.size());
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
@ -685,11 +687,13 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
Value *NextVal;
if (const llvm::PointerType *PT =
dyn_cast<llvm::PointerType>(InVal->getType())) {
llvm::Constant *Inc =llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal);
llvm::Constant *Inc =
VMContext.getConstantInt(llvm::Type::Int32Ty, AmountVal);
if (!isa<llvm::FunctionType>(PT->getElementType())) {
NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec");
} else {
const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *i8Ty =
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
@ -700,24 +704,24 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
// Bool = ((int)Bool+1) != 0
// An interesting aspect of this is that increment is always true.
// Decrement does not have this property.
NextVal = llvm::ConstantInt::getTrue();
NextVal = VMContext.getConstantIntTrue();
} else if (isa<llvm::IntegerType>(InVal->getType())) {
NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
NextVal = VMContext.getConstantInt(InVal->getType(), AmountVal);
NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
} else {
// Add the inc/dec to the real part.
if (InVal->getType() == llvm::Type::FloatTy)
NextVal =
llvm::ConstantFP::get(llvm::APFloat(static_cast<float>(AmountVal)));
VMContext.getConstantFP(llvm::APFloat(static_cast<float>(AmountVal)));
else if (InVal->getType() == llvm::Type::DoubleTy)
NextVal =
llvm::ConstantFP::get(llvm::APFloat(static_cast<double>(AmountVal)));
VMContext.getConstantFP(llvm::APFloat(static_cast<double>(AmountVal)));
else {
llvm::APFloat F(static_cast<float>(AmountVal));
bool ignored;
F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
&ignored);
NextVal = llvm::ConstantFP::get(F);
NextVal = VMContext.getConstantFP(F);
}
NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
}
@ -787,7 +791,7 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
// constant folding logic so we don't have to duplicate it here.
Expr::EvalResult Result;
E->Evaluate(Result, CGF.getContext());
return llvm::ConstantInt::get(Result.Val.getInt());
return VMContext.getConstantInt(Result.Val.getInt());
}
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
@ -807,7 +811,7 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
CGF.EmitLValue(Op);
else
CGF.EmitScalarExpr(Op, true);
return CGF.getLLVMContext().getNullValue(ConvertType(E->getType()));
return VMContext.getNullValue(ConvertType(E->getType()));
}
Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E)
@ -844,7 +848,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
// (Note that we do actually need the imaginary part of the RHS for
// multiplication and division.)
CGF.ErrorUnsupported(E, "complex compound assignment");
return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
return VMContext.getUndef(CGF.ConvertType(E->getType()));
}
// Emit the RHS first. __block variables need to have the rhs evaluated
@ -956,18 +960,18 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
handerArgTypes.push_back(llvm::Type::Int64Ty);
handerArgTypes.push_back(llvm::Type::Int8Ty);
handerArgTypes.push_back(llvm::Type::Int8Ty);
llvm::FunctionType *handlerTy = llvm::FunctionType::get(llvm::Type::Int64Ty,
llvm::FunctionType *handlerTy = VMContext.getFunctionType(llvm::Type::Int64Ty,
handerArgTypes, false);
llvm::Value *handlerFunction =
CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
llvm::PointerType::getUnqual(handlerTy));
VMContext.getPointerTypeUnqual(handlerTy));
handlerFunction = Builder.CreateLoad(handlerFunction);
llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty),
Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty),
llvm::ConstantInt::get(llvm::Type::Int8Ty, OpID),
llvm::ConstantInt::get(llvm::Type::Int8Ty,
VMContext.getConstantInt(llvm::Type::Int8Ty, OpID),
VMContext.getConstantInt(llvm::Type::Int8Ty,
cast<llvm::IntegerType>(opTy)->getBitWidth()));
handlerResult = Builder.CreateTrunc(handlerResult, opTy);
@ -1024,7 +1028,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
const llvm::Type *IdxType = VMContext.getIntegerType(CGF.LLVMPointerWidth);
if (IdxExp->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
@ -1035,10 +1039,10 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
// type.
if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
VMContext.getConstantInt(Idx->getType(),
CGF.getContext().getTypeSize(OIT) / 8);
Idx = Builder.CreateMul(Idx, InterfaceSize);
const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
return Builder.CreateBitCast(Res, Ptr->getType());
@ -1048,7 +1052,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
// extensions. The GNU void* casts amount to no-ops since our void*
// type is i8*, but this is future proof.
if (ElementType->isVoidType() || ElementType->isFunctionType()) {
const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *i8Ty = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
return Builder.CreateBitCast(Res, Ptr->getType());
@ -1086,7 +1090,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
const llvm::Type *IdxType =
VMContext.getIntegerType(CGF.LLVMPointerWidth);
if (Ops.E->getRHS()->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
@ -1099,10 +1104,11 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (const ObjCInterfaceType *OIT =
dyn_cast<ObjCInterfaceType>(LHSElementType)) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
VMContext.getConstantInt(Idx->getType(),
CGF.getContext().getTypeSize(OIT) / 8);
Idx = Builder.CreateMul(Idx, InterfaceSize);
const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *i8Ty =
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
return Builder.CreateBitCast(Res, Ops.LHS->getType());
@ -1112,7 +1118,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
// extensions. The GNU void* casts amount to no-ops since our
// void* type is i8*, but this is future proof.
if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *i8Ty =
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
return Builder.CreateBitCast(Res, Ops.LHS->getType());
@ -1148,12 +1155,12 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
// better code. See PR2247.
if (llvm::isPowerOf2_64(ElementSize)) {
Value *ShAmt =
llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize));
VMContext.getConstantInt(ResultType, llvm::Log2_64(ElementSize));
return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr");
}
// Otherwise, do a full sdiv.
Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize);
Value *BytesPerElt = VMContext.getConstantInt(ResultType, ElementSize);
return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
}
}
@ -1278,7 +1285,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// 0 && RHS: If it is safe, just elide the RHS, and return 0.
if (!CGF.ContainsLabel(E->getRHS()))
return CGF.getLLVMContext().getNullValue(CGF.LLVMIntTy);
return VMContext.getNullValue(CGF.LLVMIntTy);
}
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
@ -1294,7 +1301,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
PN->reserveOperandSpace(2); // Normal case, two inputs.
for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getFalse(), *PI);
PN->addIncoming(VMContext.getConstantIntFalse(), *PI);
CGF.PushConditionalTempDestruction();
CGF.EmitBlock(RHSBlock);
@ -1325,7 +1332,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
// 1 || RHS: If it is safe, just elide the RHS, and return 1.
if (!CGF.ContainsLabel(E->getRHS()))
return llvm::ConstantInt::get(CGF.LLVMIntTy, 1);
return VMContext.getConstantInt(CGF.LLVMIntTy, 1);
}
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
@ -1341,7 +1348,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
PN->reserveOperandSpace(2); // Normal case, two inputs.
for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getTrue(), *PI);
PN->addIncoming(VMContext.getConstantIntTrue(), *PI);
CGF.PushConditionalTempDestruction();
@ -1565,13 +1572,13 @@ Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) {
int n = va_arg(va, int);
assert(n >= 0 && n < (int)NumElements * 2 &&
"Vector shuffle index out of bounds!");
Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n));
Args.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, n));
}
const char *Name = va_arg(va, const char *);
va_end(va);
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
llvm::Constant *Mask = VMContext.getConstantVector(&Args[0], NumElements);
return Builder.CreateShuffleVector(V1, V2, Mask, Name);
}
@ -1579,11 +1586,11 @@ Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) {
llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals,
unsigned NumVals, bool isSplat) {
llvm::Value *Vec
= llvm::UndefValue::get(llvm::VectorType::get(Vals[0]->getType(), NumVals));
= VMContext.getUndef(VMContext.getVectorType(Vals[0]->getType(), NumVals));
for (unsigned i = 0, e = NumVals; i != e; ++i) {
llvm::Value *Val = isSplat ? Vals[0] : Vals[i];
llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
llvm::Value *Idx = VMContext.getConstantInt(llvm::Type::Int32Ty, i);
Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp");
}

Просмотреть файл

@ -28,7 +28,7 @@ llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(E);
// FIXME: This bitcast should just be made an invariant on the Runtime.
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
return VMContext.getConstantExprBitCast(C, ConvertType(E->getType()));
}
/// Emit a selector.
@ -179,7 +179,7 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
llvm::Value *True =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
VMContext.getConstantInt(Types.ConvertType(getContext().BoolTy), 1);
CallArgList Args;
Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
@ -262,9 +262,9 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
Types.ConvertType(IdTy));
llvm::Value *True =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
VMContext.getConstantInt(Types.ConvertType(getContext().BoolTy), 1);
llvm::Value *False =
llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
VMContext.getConstantInt(Types.ConvertType(getContext().BoolTy), 0);
CallArgList Args;
Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
@ -471,7 +471,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
getContext().getPointerType(ItemsTy)));
const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
llvm::Constant *Count = VMContext.getConstantInt(UnsignedLongLTy, NumItems);
Args.push_back(std::make_pair(RValue::get(Count),
getContext().UnsignedLongTy));
@ -488,7 +488,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
llvm::Value *Zero = getLLVMContext().getNullValue(UnsignedLongLTy);
llvm::Value *Zero = VMContext.getNullValue(UnsignedLongLTy);
llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
@ -574,7 +574,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Increment the counter.
Counter = Builder.CreateAdd(Counter,
llvm::ConstantInt::get(UnsignedLongLTy, 1));
VMContext.getConstantInt(UnsignedLongLTy, 1));
Builder.CreateStore(Counter, CounterPtr);
llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
@ -618,7 +618,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
LValue LV = EmitLValue(cast<Expr>(S.getElement()));
// Set the value to null.
Builder.CreateStore(getLLVMContext().getNullValue(ConvertType(ElementTy)),
Builder.CreateStore(VMContext.getNullValue(ConvertType(ElementTy)),
LV.getAddress());
}

Просмотреть файл

@ -70,6 +70,7 @@ private:
// Some zeros used for GEPs in lots of places.
llvm::Constant *Zeros[2];
llvm::Constant *NULLPtr;
llvm::LLVMContext &VMContext;
private:
llvm::Constant *GenerateIvarList(
const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
@ -204,24 +205,24 @@ static std::string SymbolNameForMethod(const std::string &ClassName, const
CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
: CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
MetaClassPtrAlias(0) {
MetaClassPtrAlias(0), VMContext(cgm.getLLVMContext()) {
IntTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().IntTy));
LongTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().LongTy));
Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
Zeros[0] = VMContext.getConstantInt(LongTy, 0);
Zeros[1] = Zeros[0];
NULLPtr = llvm::ConstantPointerNull::get(
llvm::PointerType::getUnqual(llvm::Type::Int8Ty));
NULLPtr = VMContext.getConstantPointerNull(
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty));
// C string type. Used in lots of places.
PtrToInt8Ty =
llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
// Get the selector Type.
SelectorTy = cast<llvm::PointerType>(
CGM.getTypes().ConvertType(CGM.getContext().getObjCSelType()));
PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
PtrToIntTy = VMContext.getPointerTypeUnqual(IntTy);
PtrTy = PtrToInt8Ty;
// Object type
@ -232,7 +233,7 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
std::vector<const llvm::Type*> IMPArgs;
IMPArgs.push_back(IdTy);
IMPArgs.push_back(SelectorTy);
IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true);
IMPTy = VMContext.getFunctionType(IdTy, IMPArgs, true);
}
// This has to perform the lookup every time, since posing and related
// techniques can modify the name -> class mapping.
@ -244,7 +245,7 @@ llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
std::vector<const llvm::Type*> Params(1, PtrToInt8Ty);
llvm::Constant *ClassLookupFn =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy,
CGM.CreateRuntimeFunction(VMContext.getFunctionType(IdTy,
Params,
true),
"objc_lookup_class");
@ -254,7 +255,7 @@ llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) {
llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
if (US == 0)
US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
US = new llvm::GlobalAlias(VMContext.getPointerTypeUnqual(SelectorTy),
llvm::GlobalValue::InternalLinkage,
".objc_untyped_selector_alias",
NULL, &TheModule);
@ -280,7 +281,7 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
// If it isn't, cache it.
llvm::GlobalAlias *Sel = new llvm::GlobalAlias(
llvm::PointerType::getUnqual(SelectorTy),
VMContext.getPointerTypeUnqual(SelectorTy),
llvm::GlobalValue::InternalLinkage, SelName,
NULL, &TheModule);
TypedSelectors[Selector] = Sel;
@ -290,21 +291,21 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
const std::string &Name) {
llvm::Constant * ConstStr = llvm::ConstantArray::get(Str);
llvm::Constant * ConstStr = VMContext.getConstantArray(Str);
ConstStr = new llvm::GlobalVariable(TheModule, ConstStr->getType(), true,
llvm::GlobalValue::InternalLinkage,
ConstStr, Name);
return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
return VMContext.getConstantExprGetElementPtr(ConstStr, Zeros, 2);
}
llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
std::vector<llvm::Constant*> &V, const std::string &Name) {
llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
llvm::Constant *C = VMContext.getConstantStruct(Ty, V);
return new llvm::GlobalVariable(TheModule, Ty, false,
llvm::GlobalValue::InternalLinkage, C, Name);
}
llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
std::vector<llvm::Constant*> &V, const std::string &Name) {
llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
llvm::Constant *C = VMContext.getConstantArray(Ty, V);
return new llvm::GlobalVariable(TheModule, Ty, false,
llvm::GlobalValue::InternalLinkage, C, Name);
}
@ -319,12 +320,12 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const ObjCStringLiteral *SL) {
std::vector<llvm::Constant*> Ivars;
Ivars.push_back(NULLPtr);
Ivars.push_back(MakeConstantString(Str));
Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
Ivars.push_back(VMContext.getConstantInt(IntTy, Str.size()));
llvm::Constant *ObjCStr = MakeGlobal(
llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
VMContext.getStructType(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
Ivars, ".objc_str");
ConstantStrings.push_back(
llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty));
VMContext.getConstantExprBitCast(ObjCStr, PtrToInt8Ty));
return ObjCStr;
}
@ -361,10 +362,10 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
std::vector<const llvm::Type*> Params;
Params.push_back(PtrTy);
if (IsClassMessage) {
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
classLookupFunction = CGM.CreateRuntimeFunction(VMContext.getFunctionType(
IdTy, Params, true), "objc_get_meta_class");
} else {
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
classLookupFunction = CGM.CreateRuntimeFunction(VMContext.getFunctionType(
IdTy, Params, true), "objc_get_class");
}
ReceiverClass = CGF.Builder.CreateCall(classLookupFunction,
@ -393,13 +394,14 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
}
// Cast the pointer to a simplified version of the class structure
ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
llvm::PointerType::getUnqual(llvm::StructType::get(IdTy, IdTy, NULL)));
VMContext.getPointerTypeUnqual(
VMContext.getStructType(IdTy, IdTy, NULL)));
// Get the superclass pointer
ReceiverClass = CGF.Builder.CreateStructGEP(ReceiverClass, 1);
// Load the superclass pointer
ReceiverClass = CGF.Builder.CreateLoad(ReceiverClass);
// Construct the structure used to look up the IMP
llvm::StructType *ObjCSuperTy = llvm::StructType::get(Receiver->getType(),
llvm::StructType *ObjCSuperTy = VMContext.getStructType(Receiver->getType(),
IdTy, NULL);
llvm::Value *ObjCSuper = CGF.Builder.CreateAlloca(ObjCSuperTy);
@ -409,11 +411,11 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// Get the IMP
std::vector<const llvm::Type*> Params;
Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
Params.push_back(VMContext.getPointerTypeUnqual(ObjCSuperTy));
Params.push_back(SelectorTy);
llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
CGM.CreateRuntimeFunction(VMContext.getFunctionType(
VMContext.getPointerTypeUnqual(impType), Params, true),
"objc_msg_lookup_super");
llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
@ -463,19 +465,19 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) {
self = CGF.LoadObjCSelf();
} else {
self = llvm::ConstantPointerNull::get(IdTy);
self = VMContext.getConstantPointerNull(IdTy);
}
Params.push_back(self->getType());
llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
CGM.CreateRuntimeFunction(VMContext.getFunctionType(
VMContext.getPointerTypeUnqual(impType), Params, true),
"objc_msg_lookup_sender");
imp = CGF.Builder.CreateCall3(lookupFunction, Receiver, cmd, self);
} else {
llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
CGM.CreateRuntimeFunction(VMContext.getFunctionType(
VMContext.getPointerTypeUnqual(impType), Params, true),
"objc_msg_lookup");
imp = CGF.Builder.CreateCall2(lookupFunction, Receiver, cmd);
@ -492,10 +494,10 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList) {
// Get the method structure type.
llvm::StructType *ObjCMethodTy = llvm::StructType::get(
llvm::StructType *ObjCMethodTy = VMContext.getStructType(
PtrToInt8Ty, // Really a selector, but the runtime creates it us.
PtrToInt8Ty, // Method types
llvm::PointerType::getUnqual(IMPTy), //Method pointer
VMContext.getPointerTypeUnqual(IMPTy), //Method pointer
NULL);
std::vector<llvm::Constant*> Methods;
std::vector<llvm::Constant*> Elements;
@ -507,27 +509,27 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
isClassMethodList))) {
llvm::Constant *C =
CGM.GetAddrOfConstantCString(MethodSels[i].getAsString());
Elements.push_back(llvm::ConstantExpr::getGetElementPtr(C, Zeros, 2));
Elements.push_back(VMContext.getConstantExprGetElementPtr(C, Zeros, 2));
Elements.push_back(
llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2));
Method = llvm::ConstantExpr::getBitCast(Method,
llvm::PointerType::getUnqual(IMPTy));
VMContext.getConstantExprGetElementPtr(MethodTypes[i], Zeros, 2));
Method = VMContext.getConstantExprBitCast(Method,
VMContext.getPointerTypeUnqual(IMPTy));
Elements.push_back(Method);
Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
Methods.push_back(VMContext.getConstantStruct(ObjCMethodTy, Elements));
}
}
// Array of method structures
llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
llvm::ArrayType *ObjCMethodArrayTy = VMContext.getArrayType(ObjCMethodTy,
Methods.size());
llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
llvm::Constant *MethodArray = VMContext.getConstantArray(ObjCMethodArrayTy,
Methods);
// Structure containing list pointer, array and array count
llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get();
llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
llvm::StructType *ObjCMethodListTy = llvm::StructType::get(NextPtrTy,
llvm::PATypeHolder OpaqueNextTy = VMContext.getOpaqueType();
llvm::Type *NextPtrTy = VMContext.getPointerTypeUnqual(OpaqueNextTy);
llvm::StructType *ObjCMethodListTy = VMContext.getStructType(NextPtrTy,
IntTy,
ObjCMethodArrayTy,
NULL);
@ -537,9 +539,9 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get());
Methods.clear();
Methods.push_back(llvm::ConstantPointerNull::get(
llvm::PointerType::getUnqual(ObjCMethodListTy)));
Methods.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty,
Methods.push_back(VMContext.getConstantPointerNull(
VMContext.getPointerTypeUnqual(ObjCMethodListTy)));
Methods.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty,
MethodTypes.size()));
Methods.push_back(MethodArray);
@ -553,7 +555,7 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
// Get the method structure type.
llvm::StructType *ObjCIvarTy = llvm::StructType::get(
llvm::StructType *ObjCIvarTy = VMContext.getStructType(
PtrToInt8Ty,
PtrToInt8Ty,
IntTy,
@ -562,24 +564,24 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
std::vector<llvm::Constant*> Elements;
for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
Elements.clear();
Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarNames[i],
Elements.push_back( VMContext.getConstantExprGetElementPtr(IvarNames[i],
Zeros, 2));
Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarTypes[i],
Elements.push_back( VMContext.getConstantExprGetElementPtr(IvarTypes[i],
Zeros, 2));
Elements.push_back(IvarOffsets[i]);
Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
Ivars.push_back(VMContext.getConstantStruct(ObjCIvarTy, Elements));
}
// Array of method structures
llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
llvm::ArrayType *ObjCIvarArrayTy = VMContext.getArrayType(ObjCIvarTy,
IvarNames.size());
Elements.clear();
Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
Elements.push_back(VMContext.getConstantInt(IntTy, (int)IvarNames.size()));
Elements.push_back(VMContext.getConstantArray(ObjCIvarArrayTy, Ivars));
// Structure containing array and array count
llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
llvm::StructType *ObjCIvarListTy = VMContext.getStructType(IntTy,
ObjCIvarArrayTy,
NULL);
@ -601,7 +603,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// Set up the class structure
// Note: Several of these are char*s when they should be ids. This is
// because the runtime performs this translation on load.
llvm::StructType *ClassTy = llvm::StructType::get(
llvm::StructType *ClassTy = VMContext.getStructType(
PtrToInt8Ty, // class_pointer
PtrToInt8Ty, // super_class
PtrToInt8Ty, // name
@ -617,23 +619,23 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
PtrTy, // protocols
PtrTy, // gc_object_type
NULL);
llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
llvm::Constant *Zero = VMContext.getConstantInt(LongTy, 0);
llvm::Constant *NullP =
llvm::ConstantPointerNull::get(PtrTy);
VMContext.getConstantPointerNull(PtrTy);
// Fill in the structure
std::vector<llvm::Constant*> Elements;
Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
Elements.push_back(VMContext.getConstantExprBitCast(MetaClass, PtrToInt8Ty));
Elements.push_back(SuperClass);
Elements.push_back(MakeConstantString(Name, ".class_name"));
Elements.push_back(Zero);
Elements.push_back(llvm::ConstantInt::get(LongTy, info));
Elements.push_back(VMContext.getConstantInt(LongTy, info));
Elements.push_back(InstanceSize);
Elements.push_back(IVars);
Elements.push_back(Methods);
Elements.push_back(NullP);
Elements.push_back(NullP);
Elements.push_back(NullP);
Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
Elements.push_back(VMContext.getConstantExprBitCast(Protocols, PtrTy));
Elements.push_back(NullP);
// Create an instance of the structure
return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name));
@ -643,7 +645,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
// Get the method structure type.
llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
llvm::StructType *ObjCMethodDescTy = VMContext.getStructType(
PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
PtrToInt8Ty,
NULL);
@ -651,28 +653,29 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
std::vector<llvm::Constant*> Elements;
for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
Elements.clear();
Elements.push_back( llvm::ConstantExpr::getGetElementPtr(MethodNames[i],
Elements.push_back(VMContext.getConstantExprGetElementPtr(MethodNames[i],
Zeros, 2));
Elements.push_back(
llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2));
Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
VMContext.getConstantExprGetElementPtr(MethodTypes[i], Zeros, 2));
Methods.push_back(VMContext.getConstantStruct(ObjCMethodDescTy, Elements));
}
llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
llvm::ArrayType *ObjCMethodArrayTy = VMContext.getArrayType(ObjCMethodDescTy,
MethodNames.size());
llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy, Methods);
llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
llvm::Constant *Array = VMContext.getConstantArray(ObjCMethodArrayTy,
Methods);
llvm::StructType *ObjCMethodDescListTy = VMContext.getStructType(
IntTy, ObjCMethodArrayTy, NULL);
Methods.clear();
Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
Methods.push_back(VMContext.getConstantInt(IntTy, MethodNames.size()));
Methods.push_back(Array);
return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
}
// Create the protocol list structure used in classes, categories and so on
llvm::Constant *CGObjCGNU::GenerateProtocolList(
const llvm::SmallVectorImpl<std::string> &Protocols) {
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
llvm::ArrayType *ProtocolArrayTy = VMContext.getArrayType(PtrToInt8Ty,
Protocols.size());
llvm::StructType *ProtocolListTy = llvm::StructType::get(
llvm::StructType *ProtocolListTy = VMContext.getStructType(
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
LongTy,//FIXME: Should be size_t
ProtocolArrayTy,
@ -683,15 +686,15 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(
llvm::Constant *protocol = ExistingProtocols[*iter];
if (!protocol)
protocol = GenerateEmptyProtocol(*iter);
llvm::Constant *Ptr =
llvm::ConstantExpr::getBitCast(protocol, PtrToInt8Ty);
llvm::Constant *Ptr = VMContext.getConstantExprBitCast(protocol,
PtrToInt8Ty);
Elements.push_back(Ptr);
}
llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
llvm::Constant * ProtocolArray = VMContext.getConstantArray(ProtocolArrayTy,
Elements);
Elements.clear();
Elements.push_back(NULLPtr);
Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
Elements.push_back(VMContext.getConstantInt(LongTy, Protocols.size()));
Elements.push_back(ProtocolArray);
return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
}
@ -701,7 +704,7 @@ llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
const llvm::Type *T =
CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
return Builder.CreateBitCast(protocol, VMContext.getPointerTypeUnqual(T));
}
llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
@ -716,7 +719,7 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
llvm::StructType *ProtocolTy = VMContext.getStructType(IdTy,
PtrToInt8Ty,
ProtocolList->getType(),
InstanceMethodList->getType(),
@ -725,8 +728,8 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
std::vector<llvm::Constant*> Elements;
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
Elements.push_back(VMContext.getConstantExprIntToPtr(
VMContext.getConstantInt(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
Elements.push_back(ProtocolList);
Elements.push_back(InstanceMethodList);
@ -771,7 +774,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
llvm::StructType *ProtocolTy = VMContext.getStructType(IdTy,
PtrToInt8Ty,
ProtocolList->getType(),
InstanceMethodList->getType(),
@ -780,14 +783,14 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
std::vector<llvm::Constant*> Elements;
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
Elements.push_back(VMContext.getConstantExprIntToPtr(
VMContext.getConstantInt(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
Elements.push_back(ProtocolList);
Elements.push_back(InstanceMethodList);
Elements.push_back(ClassMethodList);
ExistingProtocols[ProtocolName] =
llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
VMContext.getConstantExprBitCast(MakeGlobal(ProtocolTy, Elements,
".objc_protocol"), IdTy);
}
@ -830,18 +833,18 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Elements.push_back(MakeConstantString(CategoryName));
Elements.push_back(MakeConstantString(ClassName));
// Instance method list
Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
Elements.push_back(VMContext.getConstantExprBitCast(GenerateMethodList(
ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
false), PtrTy));
// Class method list
Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
Elements.push_back(VMContext.getConstantExprBitCast(GenerateMethodList(
ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
PtrTy));
// Protocol list
Elements.push_back(llvm::ConstantExpr::getBitCast(
Elements.push_back(VMContext.getConstantExprBitCast(
GenerateProtocolList(Protocols), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, PtrTy,
Categories.push_back(VMContext.getConstantExprBitCast(
MakeGlobal(VMContext.getStructType(PtrToInt8Ty, PtrToInt8Ty, PtrTy,
PtrTy, PtrTy, NULL), Elements), PtrTy));
}
@ -866,10 +869,10 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
std::string classSymbolName = "__objc_class_name_" + ClassName;
if (llvm::GlobalVariable *symbol =
TheModule.getGlobalVariable(classSymbolName)) {
symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
symbol->setInitializer(VMContext.getConstantInt(LongTy, 0));
} else {
new llvm::GlobalVariable(TheModule, LongTy, false,
llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
llvm::GlobalValue::ExternalLinkage, VMContext.getConstantInt(LongTy, 0),
classSymbolName);
}
@ -907,7 +910,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter);
}
IvarOffsets.push_back(
llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset));
VMContext.getConstantInt(llvm::Type::Int32Ty, Offset));
}
// Collect information about instance methods
@ -964,7 +967,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
if (!SuperClassName.empty()) {
SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
} else {
SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
SuperClass = VMContext.getConstantPointerNull(PtrToInt8Ty);
}
// Empty vector used to construct empty method lists
llvm::SmallVector<llvm::Constant*, 1> empty;
@ -984,23 +987,23 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::Constant *ClassStruct =
GenerateClassStructure(MetaClassStruct, SuperClass, 0x1L,
ClassName.c_str(), 0,
llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
VMContext.getConstantInt(LongTy, instanceSize), IvarList,
MethodList, GenerateProtocolList(Protocols));
// Resolve the class aliases, if they exist.
if (ClassPtrAlias) {
ClassPtrAlias->setAliasee(
llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
VMContext.getConstantExprBitCast(ClassStruct, IdTy));
ClassPtrAlias = 0;
}
if (MetaClassPtrAlias) {
MetaClassPtrAlias->setAliasee(
llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
VMContext.getConstantExprBitCast(MetaClassStruct, IdTy));
MetaClassPtrAlias = 0;
}
// Add class structure to list to be added to the symtab later
ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
ClassStruct = VMContext.getConstantExprBitCast(ClassStruct, PtrToInt8Ty);
Classes.push_back(ClassStruct);
}
@ -1020,8 +1023,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
const llvm::Type *SelStructPtrTy = SelectorTy;
bool isSelOpaque = false;
if (SelStructTy == 0) {
SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL);
SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
SelStructTy = VMContext.getStructType(PtrToInt8Ty, PtrToInt8Ty, NULL);
SelStructPtrTy = VMContext.getPointerTypeUnqual(SelStructTy);
isSelOpaque = true;
}
@ -1034,29 +1037,30 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::Constant *Statics = NULLPtr;
// Generate statics list:
if (ConstantStrings.size()) {
llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
llvm::ArrayType *StaticsArrayTy = VMContext.getArrayType(PtrToInt8Ty,
ConstantStrings.size() + 1);
ConstantStrings.push_back(NULLPtr);
Elements.push_back(MakeConstantString("NSConstantString",
".objc_static_class_name"));
Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
Elements.push_back(VMContext.getConstantArray(StaticsArrayTy,
ConstantStrings));
llvm::StructType *StaticsListTy =
llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL);
llvm::Type *StaticsListPtrTy = llvm::PointerType::getUnqual(StaticsListTy);
VMContext.getStructType(PtrToInt8Ty, StaticsArrayTy, NULL);
llvm::Type *StaticsListPtrTy =
VMContext.getPointerTypeUnqual(StaticsListTy);
Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
llvm::ArrayType *StaticsListArrayTy =
llvm::ArrayType::get(StaticsListPtrTy, 2);
llvm::ArrayType *StaticsListArrayTy =
VMContext.getArrayType(StaticsListPtrTy, 2);
Elements.clear();
Elements.push_back(Statics);
Elements.push_back(TheModule.getContext().getNullValue(StaticsListPtrTy));
Elements.push_back(VMContext.getNullValue(StaticsListPtrTy));
Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
Statics = VMContext.getConstantExprBitCast(Statics, PtrTy);
}
// Array of classes, categories, and constant objects
llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
llvm::ArrayType *ClassListTy = VMContext.getArrayType(PtrToInt8Ty,
Classes.size() + Categories.size() + 2);
llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
llvm::StructType *SymTabTy = VMContext.getStructType(LongTy, SelStructPtrTy,
llvm::Type::Int16Ty,
llvm::Type::Int16Ty,
ClassListTy, NULL);
@ -1070,7 +1074,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(MakeConstantString(iter->first.first, ".objc_sel_name"));
Elements.push_back(MakeConstantString(iter->first.second,
".objc_sel_types"));
Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
Selectors.push_back(VMContext.getConstantStruct(SelStructTy, Elements));
Elements.clear();
}
for (llvm::StringMap<llvm::GlobalAlias*>::iterator
@ -1079,19 +1083,19 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(
MakeConstantString(iter->getKeyData(), ".objc_sel_name"));
Elements.push_back(NULLPtr);
Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
Selectors.push_back(VMContext.getConstantStruct(SelStructTy, Elements));
Elements.clear();
}
Elements.push_back(NULLPtr);
Elements.push_back(NULLPtr);
Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
Selectors.push_back(VMContext.getConstantStruct(SelStructTy, Elements));
Elements.clear();
// Number of static selectors
Elements.push_back(llvm::ConstantInt::get(LongTy, Selectors.size() ));
Elements.push_back(VMContext.getConstantInt(LongTy, Selectors.size() ));
llvm::Constant *SelectorList = MakeGlobal(
llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
VMContext.getArrayType(SelStructTy, Selectors.size()), Selectors,
".objc_selector_list");
Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
Elements.push_back(VMContext.getConstantExprBitCast(SelectorList,
SelStructPtrTy));
// Now that all of the static selectors exist, create pointers to them.
@ -1100,16 +1104,16 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end();
iter != iterEnd; ++iter) {
llvm::Constant *Idxs[] = {Zeros[0],
llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]};
VMContext.getConstantInt(llvm::Type::Int32Ty, index++), Zeros[0]};
llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
true, llvm::GlobalValue::InternalLinkage,
llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
VMContext.getConstantExprGetElementPtr(SelectorList, Idxs, 2),
".objc_sel_ptr");
// If selectors are defined as an opaque type, cast the pointer to this
// type.
if (isSelOpaque) {
SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
llvm::PointerType::getUnqual(SelectorTy));
SelPtr = VMContext.getConstantExprBitCast(SelPtr,
VMContext.getPointerTypeUnqual(SelectorTy));
}
(*iter).second->setAliasee(SelPtr);
}
@ -1117,50 +1121,51 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
iter != iterEnd; iter++) {
llvm::Constant *Idxs[] = {Zeros[0],
llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]};
VMContext.getConstantInt(llvm::Type::Int32Ty, index++), Zeros[0]};
llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
true, llvm::GlobalValue::InternalLinkage,
llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
VMContext.getConstantExprGetElementPtr(SelectorList, Idxs, 2),
".objc_sel_ptr");
// If selectors are defined as an opaque type, cast the pointer to this
// type.
if (isSelOpaque) {
SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
llvm::PointerType::getUnqual(SelectorTy));
SelPtr = VMContext.getConstantExprBitCast(SelPtr,
VMContext.getPointerTypeUnqual(SelectorTy));
}
(*iter).second->setAliasee(SelPtr);
}
// Number of classes defined.
Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty,
Elements.push_back(VMContext.getConstantInt(llvm::Type::Int16Ty,
Classes.size()));
// Number of categories defined
Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty,
Elements.push_back(VMContext.getConstantInt(llvm::Type::Int16Ty,
Categories.size()));
// Create an array of classes, then categories, then static object instances
Classes.insert(Classes.end(), Categories.begin(), Categories.end());
// NULL-terminated list of static object instances (mainly constant strings)
Classes.push_back(Statics);
Classes.push_back(NULLPtr);
llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
llvm::Constant *ClassList = VMContext.getConstantArray(ClassListTy, Classes);
Elements.push_back(ClassList);
// Construct the symbol table
llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
// The symbol table is contained in a module which has some version-checking
// constants
llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
llvm::StructType * ModuleTy = VMContext.getStructType(LongTy, LongTy,
PtrToInt8Ty, VMContext.getPointerTypeUnqual(SymTabTy), NULL);
Elements.clear();
// Runtime version used for compatibility checking.
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
Elements.push_back(llvm::ConstantInt::get(LongTy,
Elements.push_back(VMContext.getConstantInt(LongTy,
NonFragileRuntimeVersion));
} else {
Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
Elements.push_back(VMContext.getConstantInt(LongTy, RuntimeVersion));
}
// sizeof(ModuleTy)
llvm::TargetData td = llvm::TargetData::TargetData(&TheModule);
Elements.push_back(llvm::ConstantInt::get(LongTy, td.getTypeSizeInBits(ModuleTy)/8));
Elements.push_back(VMContext.getConstantInt(LongTy,
td.getTypeSizeInBits(ModuleTy)/8));
//FIXME: Should be the path to the file where this module was declared
Elements.push_back(NULLPtr);
Elements.push_back(SymTab);
@ -1169,16 +1174,16 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Create the load function calling the runtime entry point with the module
// structure
llvm::Function * LoadFunction = llvm::Function::Create(
llvm::FunctionType::get(llvm::Type::VoidTy, false),
VMContext.getFunctionType(llvm::Type::VoidTy, false),
llvm::GlobalValue::InternalLinkage, ".objc_load_function",
&TheModule);
llvm::BasicBlock *EntryBB = llvm::BasicBlock::Create("entry", LoadFunction);
CGBuilderTy Builder(TheModule.getContext());
CGBuilderTy Builder(VMContext);
Builder.SetInsertPoint(EntryBB);
std::vector<const llvm::Type*> Params(1,
llvm::PointerType::getUnqual(ModuleTy));
llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
VMContext.getPointerTypeUnqual(ModuleTy));
llvm::Value *Register = CGM.CreateRuntimeFunction(VMContext.getFunctionType(
llvm::Type::VoidTy, Params, true), "__objc_exec_class");
Builder.CreateCall(Register, Module);
Builder.CreateRetVoid();
@ -1219,7 +1224,7 @@ llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
Params.push_back(BoolTy);
// void objc_getProperty (id, SEL, ptrdiff_t, bool)
const llvm::FunctionType *FTy =
llvm::FunctionType::get(IdTy, Params, false);
VMContext.getFunctionType(IdTy, Params, false);
return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
"objc_getProperty"));
}
@ -1237,7 +1242,7 @@ llvm::Function *CGObjCGNU::GetPropertySetFunction() {
Params.push_back(BoolTy);
// void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::VoidTy, Params, false);
VMContext.getFunctionType(llvm::Type::VoidTy, Params, false);
return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
"objc_setProperty"));
}
@ -1245,7 +1250,7 @@ llvm::Function *CGObjCGNU::GetPropertySetFunction() {
llvm::Function *CGObjCGNU::EnumerationMutationFunction() {
std::vector<const llvm::Type*> Params(1, IdTy);
return cast<llvm::Function>(CGM.CreateRuntimeFunction(
llvm::FunctionType::get(llvm::Type::VoidTy, Params, true),
VMContext.getFunctionType(llvm::Type::VoidTy, Params, true),
"objc_enumerationMutation"));
}
@ -1253,14 +1258,14 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S) {
// Pointer to the personality function
llvm::Constant *Personality =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
CGM.CreateRuntimeFunction(VMContext.getFunctionType(llvm::Type::Int32Ty,
true),
"__gnu_objc_personality_v0");
Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
Personality = VMContext.getConstantExprBitCast(Personality, PtrTy);
std::vector<const llvm::Type*> Params;
Params.push_back(PtrTy);
llvm::Value *RethrowFn =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
CGM.CreateRuntimeFunction(VMContext.getFunctionType(llvm::Type::VoidTy,
Params, false), "_Unwind_Resume_or_Rethrow");
bool isTry = isa<ObjCAtTryStmt>(S);
@ -1276,7 +1281,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
if (!isTry) {
std::vector<const llvm::Type*> Args(1, IdTy);
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
VMContext.getFunctionType(llvm::Type::VoidTy, Args, false);
llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
@ -1363,7 +1368,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// We use a cleanup unless there was already a catch all.
if (!HasCatchAll) {
ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
ESelArgs.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, 0));
Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
}
@ -1428,7 +1433,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
ESelArgs.clear();
ESelArgs.push_back(Exc);
ESelArgs.push_back(Personality);
ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
ESelArgs.push_back(VMContext.getConstantInt(llvm::Type::Int32Ty, 0));
CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
"selector");
CGF.Builder.CreateCall(llvm_eh_typeid_for,
@ -1452,7 +1457,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// @synchronized.
std::vector<const llvm::Type*> Args(1, IdTy);
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
VMContext.getFunctionType(llvm::Type::VoidTy, Args, false);
llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
@ -1482,7 +1487,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
std::vector<const llvm::Type*> Args(1, IdTy);
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
VMContext.getFunctionType(llvm::Type::VoidTy, Args, false);
llvm::Value *ThrowFn =
CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
@ -1567,7 +1572,7 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
if (!IvarOffsetGV) {
uint64_t Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
llvm::ConstantInt *OffsetGuess =
llvm::ConstantInt::get(LongTy, Offset, "ivar");
VMContext.getConstantInt(LongTy, Offset, "ivar");
IvarOffsetGV = new llvm::GlobalVariable(TheModule, LongTy, false,
llvm::GlobalValue::CommonLinkage, OffsetGuess, Name);
}
@ -1610,7 +1615,7 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
false, "ivar");
}
uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
return llvm::ConstantInt::get(LongTy, Offset, "ivar");
return VMContext.getConstantInt(LongTy, Offset, "ivar");
}
CodeGen::CGObjCRuntime *CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM){

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -156,7 +156,7 @@ void CodeGenFunction::StartFunction(const Decl *D, QualType RetTy,
// Create a marker to make it easy to insert allocas into the entryblock
// later. Don't create this with the builder, because we don't want it
// folded.
llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::Int32Ty);
llvm::Value *Undef = VMContext.getUndef(llvm::Type::Int32Ty);
AllocaInsertPt = new llvm::BitCastInst(Undef, llvm::Type::Int32Ty, "",
EntryBB);
if (Builder.isNamePreserving())
@ -399,7 +399,7 @@ unsigned CodeGenFunction::GetIDForAddrOfLabel(const LabelStmt *L) {
}
void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *BP = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
@ -411,13 +411,13 @@ void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
return;
// FIXME: Handle variable sized types.
const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
const llvm::Type *IntPtr = VMContext.getIntegerType(LLVMPointerWidth);
Builder.CreateCall4(CGM.getMemSetFn(), DestPtr,
getLLVMContext().getNullValue(llvm::Type::Int8Ty),
// TypeInfo.first describes size in bits.
llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
llvm::ConstantInt::get(llvm::Type::Int32Ty,
VMContext.getConstantInt(IntPtr, TypeInfo.first/8),
VMContext.getConstantInt(llvm::Type::Int32Ty,
TypeInfo.second/8));
}
@ -443,7 +443,7 @@ void CodeGenFunction::EmitIndirectSwitches() {
I->setSuccessor(0, Default);
for (std::map<const LabelStmt*,unsigned>::iterator LI = LabelIDs.begin(),
LE = LabelIDs.end(); LI != LE; ++LI) {
I->addCase(llvm::ConstantInt::get(llvm::Type::Int32Ty,
I->addCase(VMContext.getConstantInt(llvm::Type::Int32Ty,
LI->second),
getBasicBlockForLabel(LI->first));
}
@ -477,7 +477,7 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty)
if (ElemTy->isVariableArrayType())
ElemSize = EmitVLASize(ElemTy);
else {
ElemSize = llvm::ConstantInt::get(SizeTy,
ElemSize = VMContext.getConstantInt(SizeTy,
getContext().getTypeSize(ElemTy) / 8);
}
@ -596,13 +596,13 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock()
// Check if we already have a destination for this block.
if (Dest == SI->getDefaultDest())
ID = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
ID = VMContext.getConstantInt(llvm::Type::Int32Ty, 0);
else {
ID = SI->findCaseDest(Dest);
if (!ID) {
// No code found, get a new unique one by using the number of
// switch successors.
ID = llvm::ConstantInt::get(llvm::Type::Int32Ty,
ID = VMContext.getConstantInt(llvm::Type::Int32Ty,
SI->getNumSuccessors());
SI->addCase(ID, Dest);
}
@ -619,7 +619,7 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock()
llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
// Create a unique case ID.
llvm::ConstantInt *ID = llvm::ConstantInt::get(llvm::Type::Int32Ty,
llvm::ConstantInt *ID = VMContext.getConstantInt(llvm::Type::Int32Ty,
SI->getNumSuccessors());
// Store the jump destination before the branch instruction.

Просмотреть файл

@ -294,7 +294,7 @@ public:
llvm::BasicBlock *getInvokeDest() { return InvokeDest; }
void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; }
llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
//===--------------------------------------------------------------------===//
// Objective-C

Просмотреть файл

@ -40,7 +40,8 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CompileOptions &compileOpts,
: BlockModule(C, M, TD, Types, *this), Context(C),
Features(C.getLangOptions()), CompileOpts(compileOpts), TheModule(M),
TheTargetData(TD), Diags(diags), Types(C, M, TD), Runtime(0),
MemCpyFn(0), MemMoveFn(0), MemSetFn(0), CFConstantStringClassRef(0) {
MemCpyFn(0), MemMoveFn(0), MemSetFn(0), CFConstantStringClassRef(0),
VMContext(M.getContext()) {
if (!Features.ObjC1)
Runtime = 0;
@ -195,30 +196,31 @@ void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy =
llvm::FunctionType::get(llvm::Type::VoidTy,
VMContext.getFunctionType(llvm::Type::VoidTy,
std::vector<const llvm::Type*>(),
false);
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
llvm::Type *CtorPFTy = VMContext.getPointerTypeUnqual(CtorFTy);
// Get the type of a ctor entry, { i32, void ()* }.
llvm::StructType* CtorStructTy =
llvm::StructType::get(llvm::Type::Int32Ty,
llvm::PointerType::getUnqual(CtorFTy), NULL);
VMContext.getStructType(llvm::Type::Int32Ty,
VMContext.getPointerTypeUnqual(CtorFTy), NULL);
// Construct the constructor and destructor arrays.
std::vector<llvm::Constant*> Ctors;
for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
std::vector<llvm::Constant*> S;
S.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, I->second, false));
S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
S.push_back(
VMContext.getConstantInt(llvm::Type::Int32Ty, I->second, false));
S.push_back(VMContext.getConstantExprBitCast(I->first, CtorPFTy));
Ctors.push_back(VMContext.getConstantStruct(CtorStructTy, S));
}
if (!Ctors.empty()) {
llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
llvm::ArrayType *AT = VMContext.getArrayType(CtorStructTy, Ctors.size());
new llvm::GlobalVariable(TheModule, AT, false,
llvm::GlobalValue::AppendingLinkage,
llvm::ConstantArray::get(AT, Ctors),
VMContext.getConstantArray(AT, Ctors),
GlobalName);
}
}
@ -229,7 +231,7 @@ void CodeGenModule::EmitAnnotations() {
// Create a new global variable for the ConstantStruct in the Module.
llvm::Constant *Array =
llvm::ConstantArray::get(llvm::ArrayType::get(Annotations[0]->getType(),
VMContext.getConstantArray(VMContext.getArrayType(Annotations[0]->getType(),
Annotations.size()),
Annotations);
llvm::GlobalValue *gv =
@ -418,26 +420,27 @@ void CodeGenModule::EmitLLVMUsed() {
if (LLVMUsed.empty() && !Runtime)
return;
llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
llvm::Type *i8PTy = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
// Convert LLVMUsed to what ConstantArray needs.
std::vector<llvm::Constant*> UsedArray;
UsedArray.resize(LLVMUsed.size());
for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
UsedArray[i] =
llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]), i8PTy);
VMContext.getConstantExprBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
i8PTy);
}
if (Runtime)
Runtime->MergeMetadataGlobals(UsedArray);
if (UsedArray.empty())
return;
llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, UsedArray.size());
llvm::ArrayType *ATy = VMContext.getArrayType(i8PTy, UsedArray.size());
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), ATy, false,
llvm::GlobalValue::AppendingLinkage,
llvm::ConstantArray::get(ATy, UsedArray),
VMContext.getConstantArray(ATy, UsedArray),
"llvm.used");
GV->setSection("llvm.metadata");
@ -484,9 +487,9 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
// get [N x i8] constants for the annotation string, and the filename string
// which are the 2nd and 3rd elements of the global annotation structure.
const llvm::Type *SBP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
llvm::Constant *anno = llvm::ConstantArray::get(AA->getAnnotation(), true);
llvm::Constant *unit = llvm::ConstantArray::get(M->getModuleIdentifier(),
const llvm::Type *SBP = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
llvm::Constant *anno = VMContext.getConstantArray(AA->getAnnotation(), true);
llvm::Constant *unit = VMContext.getConstantArray(M->getModuleIdentifier(),
true);
// Get the two global values corresponding to the ConstantArrays we just
@ -504,12 +507,12 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
// Create the ConstantStruct for the global annotation.
llvm::Constant *Fields[4] = {
llvm::ConstantExpr::getBitCast(GV, SBP),
llvm::ConstantExpr::getBitCast(annoGV, SBP),
llvm::ConstantExpr::getBitCast(unitGV, SBP),
llvm::ConstantInt::get(llvm::Type::Int32Ty, LineNo)
VMContext.getConstantExprBitCast(GV, SBP),
VMContext.getConstantExprBitCast(annoGV, SBP),
VMContext.getConstantExprBitCast(unitGV, SBP),
VMContext.getConstantInt(llvm::Type::Int32Ty, LineNo)
};
return llvm::ConstantStruct::get(Fields, 4, false);
return VMContext.getConstantStruct(Fields, 4, false);
}
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
@ -622,8 +625,8 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(const char *MangledName,
return Entry;
// Make sure the result is of the correct type.
const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
return llvm::ConstantExpr::getBitCast(Entry, PTy);
const llvm::Type *PTy = VMContext.getPointerTypeUnqual(Ty);
return VMContext.getConstantExprBitCast(Entry, PTy);
}
// This is the first use or definition of a mangled name. If there is a
@ -649,7 +652,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(const char *MangledName,
// sure not to try to set attributes.
bool IsIncompleteFunction = false;
if (!isa<llvm::FunctionType>(Ty)) {
Ty = llvm::FunctionType::get(llvm::Type::VoidTy,
Ty = VMContext.getFunctionType(llvm::Type::VoidTy,
std::vector<const llvm::Type*>(), false);
IsIncompleteFunction = true;
}
@ -702,7 +705,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMGlobal(const char *MangledName,
return Entry;
// Make sure the result is of the correct type.
return llvm::ConstantExpr::getBitCast(Entry, Ty);
return VMContext.getConstantExprBitCast(Entry, Ty);
}
// This is the first use or definition of a mangled name. If there is a
@ -757,7 +760,7 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
Ty = getTypes().ConvertTypeForMem(ASTTy);
const llvm::PointerType *PTy =
llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
VMContext.getPointerType(Ty, ASTTy.getAddressSpace());
return GetOrCreateLLVMGlobal(getMangledName(D), PTy, D);
}
@ -768,7 +771,7 @@ CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
const char *Name) {
// Convert Name to be a uniqued string from the IdentifierInfo table.
Name = getContext().Idents.get(Name).getName();
return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0);
return GetOrCreateLLVMGlobal(Name, VMContext.getPointerTypeUnqual(Ty), 0);
}
void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
@ -810,7 +813,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
if (!Init) {
ErrorUnsupported(D, "static initializer");
QualType T = D->getInit()->getType();
Init = llvm::UndefValue::get(getTypes().ConvertType(T));
Init = VMContext.getUndef(getTypes().ConvertType(T));
}
}
@ -848,7 +851,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// Replace all uses of the old global with the new global
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, Entry->getType());
VMContext.getConstantExprBitCast(GV, Entry->getType());
Entry->replaceAllUsesWith(NewPtrForOldDecl);
// Erase the old global, since it is no longer used.
@ -974,7 +977,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
// Just create the same type as was lowered by ConvertType
// but strip off the varargs bit.
std::vector<const llvm::Type*> Args(Ty->param_begin(), Ty->param_end());
Ty = llvm::FunctionType::get(Ty->getReturnType(), Args, false);
Ty = VMContext.getFunctionType(Ty->getReturnType(), Args, false);
}
}
@ -1018,7 +1021,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
// Replace uses of F with the Function we will endow with a body.
if (!Entry->use_empty()) {
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
VMContext.getConstantExprBitCast(NewFn, Entry->getType());
Entry->replaceAllUsesWith(NewPtrForOldDecl);
}
@ -1058,7 +1061,7 @@ void CodeGenModule::EmitAliasDefinition(const ValueDecl *D) {
Aliasee = GetOrCreateLLVMFunction(AliaseeName, DeclTy, GlobalDecl());
else
Aliasee = GetOrCreateLLVMGlobal(AliaseeName,
llvm::PointerType::getUnqual(DeclTy), 0);
VMContext.getPointerTypeUnqual(DeclTy), 0);
// Create the new alias itself, but don't set a name yet.
llvm::GlobalValue *GA =
@ -1086,7 +1089,7 @@ void CodeGenModule::EmitAliasDefinition(const ValueDecl *D) {
//
// Remove it and replace uses of it with the alias.
Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
Entry->replaceAllUsesWith(VMContext.getConstantExprBitCast(GA,
Entry->getType()));
Entry->eraseFromParent();
}
@ -1237,7 +1240,7 @@ GetAddrOfConstantCFString(const StringLiteral *Literal) {
if (!CFConstantStringClassRef) {
const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
Ty = VMContext.getArrayType(Ty, 0);
// FIXME: This is fairly broken if __CFConstantStringClassReference is
// already defined, in that it will get renamed and the user will most
@ -1250,7 +1253,7 @@ GetAddrOfConstantCFString(const StringLiteral *Literal) {
// Decay array -> ptr
CFConstantStringClassRef =
llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
VMContext.getConstantExprGetElementPtr(GV, Zeros, 2);
}
QualType CFTy = getContext().getCFConstantStringType();
@ -1273,14 +1276,14 @@ GetAddrOfConstantCFString(const StringLiteral *Literal) {
NextField = *Field++;
const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
appendFieldAndPadding(*this, Fields, CurField, NextField,
isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0)
: llvm::ConstantInt::get(Ty, 0x07C8),
isUTF16 ? VMContext.getConstantInt(Ty, 0x07d0)
: VMContext.getConstantInt(Ty, 0x07C8),
CFRD, STy);
// String pointer.
CurField = NextField;
NextField = *Field++;
llvm::Constant *C = llvm::ConstantArray::get(str);
llvm::Constant *C = VMContext.getConstantArray(str);
const char *Sect, *Prefix;
bool isConstant;
@ -1307,7 +1310,7 @@ GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV->setAlignment(Align);
}
appendFieldAndPadding(*this, Fields, CurField, NextField,
llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2),
VMContext.getConstantExprGetElementPtr(GV, Zeros, 2),
CFRD, STy);
// String length.
@ -1315,10 +1318,10 @@ GetAddrOfConstantCFString(const StringLiteral *Literal) {
NextField = 0;
Ty = getTypes().ConvertType(getContext().LongTy);
appendFieldAndPadding(*this, Fields, CurField, NextField,
llvm::ConstantInt::get(Ty, StringLength), CFRD, STy);
VMContext.getConstantInt(Ty, StringLength), CFRD, STy);
// The struct.
C = llvm::ConstantStruct::get(STy, Fields);
C = VMContext.getConstantStruct(STy, Fields);
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::InternalLinkage, C,
getContext().Target.getCFStringSymbolPrefix());
@ -1376,7 +1379,7 @@ static llvm::Constant *GenerateStringLiteral(const std::string &str,
CodeGenModule &CGM,
const char *GlobalName) {
// Create Constant for this string literal. Don't add a '\0'.
llvm::Constant *C = llvm::ConstantArray::get(str, false);
llvm::Constant *C = CGM.getLLVMContext().getConstantArray(str, false);
// Create a global variable for this string
return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,

Просмотреть файл

@ -176,6 +176,8 @@ class CodeGenModule : public BlockModule {
/// CFConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
llvm::Constant *CFConstantStringClassRef;
llvm::LLVMContext &VMContext;
public:
CodeGenModule(ASTContext &C, const CompileOptions &CompileOpts,
llvm::Module &M, const llvm::TargetData &TD, Diagnostic &Diags);
@ -204,7 +206,7 @@ public:
CodeGenTypes &getTypes() { return Types; }
Diagnostic &getDiags() const { return Diags; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
/// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;

Просмотреть файл

@ -183,16 +183,20 @@ namespace {
/// conform to any particular ABI.
class DefaultABIInfo : public ABIInfo {
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMContext) const;
ABIArgInfo classifyArgumentType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
llvm::LLVMContext &VMContext) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
it->info = classifyArgumentType(it->type, Context);
it->info = classifyArgumentType(it->type, Context, VMContext);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@ -215,16 +219,20 @@ class X86_32ABIInfo : public ABIInfo {
public:
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMContext) const;
ABIArgInfo classifyArgumentType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
llvm::LLVMContext &VMContext) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
it->info = classifyArgumentType(it->type, Context);
it->info = classifyArgumentType(it->type, Context, VMContext);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@ -286,7 +294,8 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
}
ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
if (RetTy->isVoidType()) {
return ABIArgInfo::getIgnore();
} else if (const VectorType *VT = RetTy->getAsVectorType()) {
@ -298,14 +307,15 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// registers and we need to make sure to pick a type the LLVM
// backend will like.
if (Size == 128)
return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty,
return
ABIArgInfo::getCoerce(VMContext.getVectorType(llvm::Type::Int64Ty,
2));
// Always return in register if it fits in a general purpose
// register, or if it is 64 bits and has a single element.
if ((Size == 8 || Size == 16 || Size == 32) ||
(Size == 64 && VT->getNumElements() == 1))
return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
return ABIArgInfo::getCoerce(VMContext.getIntegerType(Size));
return ABIArgInfo::getIndirect(0);
}
@ -329,7 +339,8 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// bit-fields can adjust that to be larger than the single
// element type.
uint64_t Size = Context.getTypeSize(RetTy);
return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
return ABIArgInfo::getCoerce(
VMContext.getIntegerType((unsigned) Size));
} else if (BT->getKind() == BuiltinType::Float) {
assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
"Unexpect single element structure size!");
@ -343,7 +354,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// FIXME: It would be really nice if this could come out as the proper
// pointer type.
llvm::Type *PtrTy =
llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
return ABIArgInfo::getCoerce(PtrTy);
} else if (SeltTy->isVectorType()) {
// 64- and 128-bit vectors are never returned in a
@ -352,7 +363,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (Size == 64 || Size == 128)
return ABIArgInfo::getIndirect(0);
return classifyReturnType(QualType(SeltTy, 0), Context);
return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
}
}
@ -360,7 +371,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// in a register.
if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
uint64_t Size = Context.getTypeSize(RetTy);
return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
return ABIArgInfo::getCoerce(VMContext.getIntegerType(Size));
}
return ABIArgInfo::getIndirect(0);
@ -381,7 +392,8 @@ unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
}
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
// FIXME: Set alignment on indirect arguments.
if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
// Structures with flexible arrays are always indirect.
@ -412,22 +424,23 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
llvm::LLVMContext &VMContext = CGF.getLLVMContext();
const llvm::Type *BP = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
const llvm::Type *BPP = VMContext.getPointerTypeUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
VMContext.getPointerTypeUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr,
llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
VMContext.getConstantInt(llvm::Type::Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
@ -502,15 +515,18 @@ class X86_64ABIInfo : public ABIInfo {
ASTContext &Context) const;
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMContext) const;
ABIArgInfo classifyArgumentType(QualType Ty,
ASTContext &Context,
llvm::LLVMContext &VMContext,
unsigned &neededInt,
unsigned &neededSSE) const;
public:
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
llvm::LLVMContext &VMContext) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@ -814,7 +830,8 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
}
ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
// AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
// classification algorithm.
X86_64ABIInfo::Class Lo, Hi;
@ -859,7 +876,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
// %st1.
case ComplexX87:
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
ResType = VMContext.getStructType(llvm::Type::X86_FP80Ty,
llvm::Type::X86_FP80Ty,
NULL);
break;
@ -876,10 +893,10 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
case NoClass: break;
case Integer:
ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
ResType = VMContext.getStructType(ResType, llvm::Type::Int64Ty, NULL);
break;
case SSE:
ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
ResType = VMContext.getStructType(ResType, llvm::Type::DoubleTy, NULL);
break;
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
@ -888,7 +905,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
// SSEUP should always be preceeded by SSE, just widen.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
ResType = VMContext.getVectorType(llvm::Type::DoubleTy, 2);
break;
// AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
@ -899,7 +916,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
// preceeded by X87. In such situations we follow gcc and pass the
// extra bits in an SSE reg.
if (Lo != X87)
ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
ResType = VMContext.getStructType(ResType, llvm::Type::DoubleTy, NULL);
break;
}
@ -907,6 +924,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
}
ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
llvm::LLVMContext &VMContext,
unsigned &neededInt,
unsigned &neededSSE) const {
X86_64ABIInfo::Class Lo, Hi;
@ -968,7 +986,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
case NoClass: break;
case Integer:
ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
ResType = VMContext.getStructType(ResType, llvm::Type::Int64Ty, NULL);
++neededInt;
break;
@ -976,7 +994,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
// memory), except in situations involving unions.
case X87Up:
case SSE:
ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
ResType = VMContext.getStructType(ResType, llvm::Type::DoubleTy, NULL);
++neededSSE;
break;
@ -985,15 +1003,17 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
// register.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
ResType = VMContext.getVectorType(llvm::Type::DoubleTy, 2);
break;
}
return getCoerceResult(Ty, ResType, Context);
}
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
llvm::LLVMContext &VMContext) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
Context, VMContext);
// Keep track of the number of assigned registers.
unsigned freeIntRegs = 6, freeSSERegs = 8;
@ -1008,7 +1028,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
unsigned neededInt, neededSSE;
it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE);
it->info = classifyArgumentType(it->type, Context, VMContext,
neededInt, neededSSE);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
@ -1026,6 +1047,7 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
QualType Ty,
CodeGenFunction &CGF) {
llvm::LLVMContext &VMContext = CGF.getLLVMContext();
llvm::Value *overflow_arg_area_p =
CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
llvm::Value *overflow_arg_area =
@ -1040,11 +1062,11 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
// shouldn't ever matter in practice.
// overflow_arg_area = (overflow_arg_area + 15) & ~15;
llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15);
llvm::Value *Offset = VMContext.getConstantInt(llvm::Type::Int32Ty, 15);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
llvm::Type::Int64Ty);
llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL);
llvm::Value *Mask = VMContext.getConstantInt(llvm::Type::Int64Ty, ~15LL);
overflow_arg_area =
CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
overflow_arg_area->getType(),
@ -1055,7 +1077,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *Res =
CGF.Builder.CreateBitCast(overflow_arg_area,
llvm::PointerType::getUnqual(LTy));
VMContext.getPointerTypeUnqual(LTy));
// AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
// l->overflow_arg_area + sizeof(type).
@ -1063,7 +1085,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
// an 8 byte boundary.
uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
llvm::Value *Offset = VMContext.getConstantInt(llvm::Type::Int32Ty,
(SizeInBytes + 7) & ~7);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
"overflow_arg_area.next");
@ -1075,6 +1097,8 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::LLVMContext &VMContext = CGF.getLLVMContext();
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i32 gp_offset;
@ -1083,7 +1107,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// i8* reg_save_area;
// };
unsigned neededInt, neededSSE;
ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(),
ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
neededInt, neededSSE);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
@ -1110,7 +1134,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
InRegs =
CGF.Builder.CreateICmpULE(gp_offset,
llvm::ConstantInt::get(llvm::Type::Int32Ty,
VMContext.getConstantInt(llvm::Type::Int32Ty,
48 - neededInt * 8),
"fits_in_gp");
}
@ -1120,7 +1144,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
llvm::Value *FitsInFP =
CGF.Builder.CreateICmpULE(fp_offset,
llvm::ConstantInt::get(llvm::Type::Int32Ty,
VMContext.getConstantInt(llvm::Type::Int32Ty,
176 - neededSSE * 16),
"fits_in_fp");
InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
@ -1159,8 +1183,8 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
const llvm::Type *TyHi = ST->getElementType(1);
assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
"Unexpected ABI info for mixed regs");
const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
const llvm::Type *PTyLo = VMContext.getPointerTypeUnqual(TyLo);
const llvm::Type *PTyHi = VMContext.getPointerTypeUnqual(TyHi);
llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
@ -1171,16 +1195,17 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy));
RegAddr = CGF.Builder.CreateBitCast(Tmp,
VMContext.getPointerTypeUnqual(LTy));
} else if (neededInt) {
RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
RegAddr = CGF.Builder.CreateBitCast(RegAddr,
llvm::PointerType::getUnqual(LTy));
VMContext.getPointerTypeUnqual(LTy));
} else {
if (neededSSE == 1) {
RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
RegAddr = CGF.Builder.CreateBitCast(RegAddr,
llvm::PointerType::getUnqual(LTy));
VMContext.getPointerTypeUnqual(LTy));
} else {
assert(neededSSE == 2 && "Invalid number of needed registers!");
// SSE registers are spaced 16 bytes apart in the register save
@ -1188,10 +1213,10 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi =
CGF.Builder.CreateGEP(RegAddrLo,
llvm::ConstantInt::get(llvm::Type::Int32Ty, 16));
VMContext.getConstantInt(llvm::Type::Int32Ty, 16));
const llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(llvm::Type::DoubleTy);
const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy,
VMContext.getPointerTypeUnqual(llvm::Type::DoubleTy);
const llvm::StructType *ST = VMContext.getStructType(llvm::Type::DoubleTy,
llvm::Type::DoubleTy,
NULL);
llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
@ -1202,7 +1227,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
DblPtrTy));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
RegAddr = CGF.Builder.CreateBitCast(Tmp,
llvm::PointerType::getUnqual(LTy));
VMContext.getPointerTypeUnqual(LTy));
}
}
@ -1210,13 +1235,13 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// l->gp_offset = l->gp_offset + num_gp * 8
// l->fp_offset = l->fp_offset + num_fp * 16.
if (neededInt) {
llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
llvm::Value *Offset = VMContext.getConstantInt(llvm::Type::Int32Ty,
neededInt * 8);
CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
gp_offset_p);
}
if (neededSSE) {
llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
llvm::Value *Offset = VMContext.getConstantInt(llvm::Type::Int32Ty,
neededSSE * 16);
CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
fp_offset_p);
@ -1243,16 +1268,20 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// ABI Info for PIC16
class PIC16ABIInfo : public ABIInfo {
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMContext) const;
ABIArgInfo classifyArgumentType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
llvm::LLVMContext &VMContext) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
it->info = classifyArgumentType(it->type, Context);
it->info = classifyArgumentType(it->type, Context, VMContext);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@ -1261,7 +1290,8 @@ class PIC16ABIInfo : public ABIInfo {
};
ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
if (RetTy->isVoidType()) {
return ABIArgInfo::getIgnore();
} else {
@ -1270,7 +1300,8 @@ ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
}
ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
return ABIArgInfo::getDirect();
}
@ -1281,27 +1312,33 @@ llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
class ARMABIInfo : public ABIInfo {
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMCOntext) const;
ABIArgInfo classifyArgumentType(QualType RetTy,
ASTContext &Context) const;
ASTContext &Context,
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
llvm::LLVMContext &VMContext) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
};
void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
llvm::LLVMContext &VMContext) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
it->info = classifyArgumentType(it->type, Context);
it->info = classifyArgumentType(it->type, Context, VMContext);
}
}
ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
@ -1319,13 +1356,14 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
}
std::vector<const llvm::Type*> LLVMFields;
LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
const llvm::Type* STy = llvm::StructType::get(LLVMFields, true);
LLVMFields.push_back(VMContext.getArrayType(ElemTy, SizeRegs));
const llvm::Type* STy = VMContext.getStructType(LLVMFields, true);
return ABIArgInfo::getCoerce(STy);
}
ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
if (RetTy->isVoidType()) {
return ABIArgInfo::getIgnore();
} else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
@ -1343,23 +1381,25 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::LLVMContext &VMContext = CGF.getLLVMContext();
// FIXME: Need to handle alignment
const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
const llvm::Type *BP = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
const llvm::Type *BPP = VMContext.getPointerTypeUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
VMContext.getPointerTypeUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr,
llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
VMContext.getConstantInt(llvm::Type::Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
@ -1367,7 +1407,8 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
if (RetTy->isVoidType()) {
return ABIArgInfo::getIgnore();
} else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
@ -1379,7 +1420,8 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
}
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
ASTContext &Context) const {
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
return ABIArgInfo::getIndirect(0);
} else {