At one point there were going to be lexer and parser tokens.

Since that point is now long gone, we should rename LexerToken to
Token, as it is the only kind of token we have.


git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@40105 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2007-07-20 16:59:19 +00:00
Родитель 25bdb51276
Коммит d217773f10
26 изменённых файлов: 279 добавлений и 279 удалений

Просмотреть файл

@ -91,7 +91,7 @@ static void FindExpectedDiags(Preprocessor &PP, unsigned MainFileID,
// Enter the cave.
PP.EnterSourceFile(MainFileID, 0, true);
LexerToken Tok;
Token Tok;
do {
PP.Lex(Tok);

Просмотреть файл

@ -128,9 +128,9 @@ public:
virtual void Ident(SourceLocation Loc, const std::string &str);
void HandleFirstTokOnLine(LexerToken &Tok);
void HandleFirstTokOnLine(Token &Tok);
void MoveToLine(SourceLocation Loc);
bool AvoidConcat(const LexerToken &PrevTok, const LexerToken &Tok);
bool AvoidConcat(const Token &PrevTok, const Token &Tok);
};
}
@ -250,7 +250,7 @@ void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, const std::string &S) {
/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
/// is called for the first token on each new line.
void PrintPPOutputPPCallbacks::HandleFirstTokOnLine(LexerToken &Tok) {
void PrintPPOutputPPCallbacks::HandleFirstTokOnLine(Token &Tok) {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
MoveToLine(Tok.getLocation());
@ -281,7 +281,7 @@ struct UnknownPragmaHandler : public PragmaHandler {
UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks)
: PragmaHandler(0), Prefix(prefix), Callbacks(callbacks) {}
virtual void HandlePragma(Preprocessor &PP, LexerToken &PragmaTok) {
virtual void HandlePragma(Preprocessor &PP, Token &PragmaTok) {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
Callbacks->MoveToLine(PragmaTok.getLocation());
@ -311,8 +311,8 @@ struct UnknownPragmaHandler : public PragmaHandler {
/// the resulting output won't have incorrect concatenations going on. Examples
/// include "..", which we print with a space between, because we don't want to
/// track enough to tell "x.." from "...".
bool PrintPPOutputPPCallbacks::AvoidConcat(const LexerToken &PrevTok,
const LexerToken &Tok) {
bool PrintPPOutputPPCallbacks::AvoidConcat(const Token &PrevTok,
const Token &Tok) {
char Buffer[256];
// If we haven't emitted a token on this line yet, PrevTok isn't useful to
@ -394,7 +394,7 @@ void clang::DoPrintPreprocessedInput(unsigned MainFileID, Preprocessor &PP,
InitOutputBuffer();
LexerToken Tok, PrevTok;
Token Tok, PrevTok;
char Buffer[256];
PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(PP);
PP.setPPCallbacks(Callbacks);

Просмотреть файл

@ -111,7 +111,7 @@ unsigned TextDiagnosticPrinter::GetTokenLength(SourceLocation Loc) {
// Create a lexer starting at the beginning of this token.
Lexer TheLexer(Loc, *ThePreprocessor, StrData);
LexerToken TheTok;
Token TheTok;
TheLexer.LexRawToken(TheTok);
return TheTok.getLength();
}

Просмотреть файл

@ -754,7 +754,7 @@ static unsigned InitializePreprocessor(Preprocessor &PP,
PP.EnterSourceFile(FileID, 0);
// Lex the file, which will read all the macros.
LexerToken Tok;
Token Tok;
PP.Lex(Tok);
assert(Tok.getKind() == tok::eof && "Didn't read entire file!");
@ -775,7 +775,7 @@ static void ProcessInputFile(Preprocessor &PP, unsigned MainFileID,
fprintf(stderr, "Unexpected program action!\n");
return;
case DumpTokens: { // Token dump mode.
LexerToken Tok;
Token Tok;
// Start parsing the specified input file.
PP.EnterSourceFile(MainFileID, 0, true);
do {
@ -786,7 +786,7 @@ static void ProcessInputFile(Preprocessor &PP, unsigned MainFileID,
break;
}
case RunPreprocessorOnly: { // Just lex as fast as we can, no output.
LexerToken Tok;
Token Tok;
// Start parsing the specified input file.
PP.EnterSourceFile(MainFileID, 0, true);
do {

Просмотреть файл

@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
// This file implements the Lexer and LexerToken interfaces.
// This file implements the Lexer and Token interfaces.
//
//===----------------------------------------------------------------------===//
//
@ -248,7 +248,7 @@ static char DecodeTrigraphChar(const char *CP, Lexer *L) {
/// be updated to match.
///
char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
LexerToken *Tok) {
Token *Tok) {
// If we have a slash, look for an escaped newline.
if (Ptr[0] == '\\') {
++Size;
@ -264,7 +264,7 @@ Slash:
++SizeTmp;
if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') {
// Remember that this token needs to be cleaned.
if (Tok) Tok->setFlag(LexerToken::NeedsCleaning);
if (Tok) Tok->setFlag(Token::NeedsCleaning);
// Warn if there was whitespace between the backslash and newline.
if (SizeTmp != 1 && Tok)
@ -294,7 +294,7 @@ Slash:
// a trigraph warning. If so, and if trigraphs are enabled, return it.
if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
// Remember that this token needs to be cleaned.
if (Tok) Tok->setFlag(LexerToken::NeedsCleaning);
if (Tok) Tok->setFlag(Token::NeedsCleaning);
Ptr += 3;
Size += 3;
@ -372,7 +372,7 @@ Slash:
// Helper methods for lexing.
//===----------------------------------------------------------------------===//
void Lexer::LexIdentifier(LexerToken &Result, const char *CurPtr) {
void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
// Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
unsigned Size;
unsigned char C = *CurPtr++;
@ -436,7 +436,7 @@ FinishIdentifier:
/// LexNumericConstant - Lex the remainer of a integer or floating point
/// constant. From[-1] is the first character lexed. Return the end of the
/// constant.
void Lexer::LexNumericConstant(LexerToken &Result, const char *CurPtr) {
void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
unsigned Size;
char C = getCharAndSize(CurPtr, Size);
char PrevCh = 0;
@ -463,7 +463,7 @@ void Lexer::LexNumericConstant(LexerToken &Result, const char *CurPtr) {
/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
/// either " or L".
void Lexer::LexStringLiteral(LexerToken &Result, const char *CurPtr, bool Wide){
void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide){
const char *NulCharacter = 0; // Does this string contain the \0 character?
char C = getAndAdvanceChar(CurPtr, Result);
@ -495,7 +495,7 @@ void Lexer::LexStringLiteral(LexerToken &Result, const char *CurPtr, bool Wide){
/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
/// after having lexed the '<' character. This is used for #include filenames.
void Lexer::LexAngledStringLiteral(LexerToken &Result, const char *CurPtr) {
void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
const char *NulCharacter = 0; // Does this string contain the \0 character?
char C = getAndAdvanceChar(CurPtr, Result);
@ -528,7 +528,7 @@ void Lexer::LexAngledStringLiteral(LexerToken &Result, const char *CurPtr) {
/// LexCharConstant - Lex the remainder of a character constant, after having
/// lexed either ' or L'.
void Lexer::LexCharConstant(LexerToken &Result, const char *CurPtr) {
void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
const char *NulCharacter = 0; // Does this character contain the \0 character?
// Handle the common case of 'x' and '\y' efficiently.
@ -576,7 +576,7 @@ void Lexer::LexCharConstant(LexerToken &Result, const char *CurPtr) {
/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
/// Update BufferPtr to point to the next non-whitespace character and return.
void Lexer::SkipWhitespace(LexerToken &Result, const char *CurPtr) {
void Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
// Whitespace - Skip it, then return the token after the whitespace.
unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently.
while (1) {
@ -596,16 +596,16 @@ void Lexer::SkipWhitespace(LexerToken &Result, const char *CurPtr) {
// ok, but handle newline.
// The returned token is at the start of the line.
Result.setFlag(LexerToken::StartOfLine);
Result.setFlag(Token::StartOfLine);
// No leading whitespace seen so far.
Result.clearFlag(LexerToken::LeadingSpace);
Result.clearFlag(Token::LeadingSpace);
Char = *++CurPtr;
}
// If this isn't immediately after a newline, there is leading space.
char PrevChar = CurPtr[-1];
if (PrevChar != '\n' && PrevChar != '\r')
Result.setFlag(LexerToken::LeadingSpace);
Result.setFlag(Token::LeadingSpace);
// If the next token is obviously a // or /* */ comment, skip it efficiently
// too (without going through the big switch stmt).
@ -625,7 +625,7 @@ void Lexer::SkipWhitespace(LexerToken &Result, const char *CurPtr) {
// SkipBCPLComment - We have just read the // characters from input. Skip until
// we find the newline character thats terminate the comment. Then update
/// BufferPtr and return.
bool Lexer::SkipBCPLComment(LexerToken &Result, const char *CurPtr) {
bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
// If BCPL comments aren't explicitly enabled for this language, emit an
// extension warning.
if (!Features.BCPLComment) {
@ -704,15 +704,15 @@ bool Lexer::SkipBCPLComment(LexerToken &Result, const char *CurPtr) {
++CurPtr;
// The next returned token is at the start of the line.
Result.setFlag(LexerToken::StartOfLine);
Result.setFlag(Token::StartOfLine);
// No leading whitespace seen so far.
Result.clearFlag(LexerToken::LeadingSpace);
Result.clearFlag(Token::LeadingSpace);
// It is common for the tokens immediately after a // comment to be
// whitespace (indentation for the next line). Instead of going through the
// big switch, handle it efficiently now.
if (isWhitespace(*CurPtr)) {
Result.setFlag(LexerToken::LeadingSpace);
Result.setFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr+1);
return true;
}
@ -723,7 +723,7 @@ bool Lexer::SkipBCPLComment(LexerToken &Result, const char *CurPtr) {
/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in
/// an appropriate way and return it.
bool Lexer::SaveBCPLComment(LexerToken &Result, const char *CurPtr) {
bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
Result.setKind(tok::comment);
FormTokenWithChars(Result, CurPtr);
@ -812,7 +812,7 @@ static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
/// because they cannot cause the comment to end. The only thing that can
/// happen is the comment could end with an escaped newline between the */ end
/// of comment.
bool Lexer::SkipBlockComment(LexerToken &Result, const char *CurPtr) {
bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
// Scan one character past where we should, looking for a '/' character. Once
// we find it, check to see if it was preceeded by a *. This common
// optimization helps people who like to put a lot of * characters in their
@ -907,14 +907,14 @@ bool Lexer::SkipBlockComment(LexerToken &Result, const char *CurPtr) {
// whitespace. Instead of going through the big switch, handle it
// efficiently now.
if (isHorizontalWhitespace(*CurPtr)) {
Result.setFlag(LexerToken::LeadingSpace);
Result.setFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr+1);
return true;
}
// Otherwise, just return so that the next character will be lexed as a token.
BufferPtr = CurPtr;
Result.setFlag(LexerToken::LeadingSpace);
Result.setFlag(Token::LeadingSpace);
return true;
}
@ -924,7 +924,7 @@ bool Lexer::SkipBlockComment(LexerToken &Result, const char *CurPtr) {
/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
/// (potentially) macro expand the filename.
void Lexer::LexIncludeFilename(LexerToken &FilenameTok) {
void Lexer::LexIncludeFilename(Token &FilenameTok) {
assert(ParsingPreprocessorDirective &&
ParsingFilename == false &&
"Must be in a preprocessing directive!");
@ -949,7 +949,7 @@ std::string Lexer::ReadToEndOfLine() {
assert(ParsingPreprocessorDirective && ParsingFilename == false &&
"Must be in a preprocessing directive!");
std::string Result;
LexerToken Tmp;
Token Tmp;
// CurPtr - Cache BufferPtr in an automatic variable.
const char *CurPtr = BufferPtr;
@ -987,7 +987,7 @@ std::string Lexer::ReadToEndOfLine() {
/// condition, reporting diagnostics and handling other edge cases as required.
/// This returns true if Result contains a token, false if PP.Lex should be
/// called again.
bool Lexer::LexEndOfFile(LexerToken &Result, const char *CurPtr) {
bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
// If we hit the end of the file while parsing a preprocessor directive,
// end the preprocessor directive first. The next token returned will
// then be the end of file.
@ -1046,7 +1046,7 @@ unsigned Lexer::isNextPPTokenLParen() {
// Save state that can be changed while lexing so that we can restore it.
const char *TmpBufferPtr = BufferPtr;
LexerToken Tok;
Token Tok;
Tok.startToken();
LexTokenInternal(Tok);
@ -1069,10 +1069,10 @@ unsigned Lexer::isNextPPTokenLParen() {
/// preprocessing token, not a normal token, as such, it is an internal
/// interface. It assumes that the Flags of result have been cleared before
/// calling this.
void Lexer::LexTokenInternal(LexerToken &Result) {
void Lexer::LexTokenInternal(Token &Result) {
LexNextToken:
// New token, can't need cleaning yet.
Result.clearFlag(LexerToken::NeedsCleaning);
Result.clearFlag(Token::NeedsCleaning);
Result.setIdentifierInfo(0);
// CurPtr - Cache BufferPtr in an automatic variable.
@ -1084,7 +1084,7 @@ LexNextToken:
while ((*CurPtr == ' ') || (*CurPtr == '\t'))
++CurPtr;
BufferPtr = CurPtr;
Result.setFlag(LexerToken::LeadingSpace);
Result.setFlag(Token::LeadingSpace);
}
unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
@ -1104,7 +1104,7 @@ LexNextToken:
}
Diag(CurPtr-1, diag::null_in_file);
Result.setFlag(LexerToken::LeadingSpace);
Result.setFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr);
goto LexNextToken; // GCC isn't tail call eliminating.
case '\n':
@ -1125,16 +1125,16 @@ LexNextToken:
break;
}
// The returned token is at the start of the line.
Result.setFlag(LexerToken::StartOfLine);
Result.setFlag(Token::StartOfLine);
// No leading whitespace seen so far.
Result.clearFlag(LexerToken::LeadingSpace);
Result.clearFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr);
goto LexNextToken; // GCC isn't tail call eliminating.
case ' ':
case '\t':
case '\f':
case '\v':
Result.setFlag(LexerToken::LeadingSpace);
Result.setFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr);
goto LexNextToken; // GCC isn't tail call eliminating.
@ -1346,7 +1346,7 @@ LexNextToken:
// want us starting at the beginning of the line again. If so, set
// the StartOfLine flag.
if (IsAtStartOfLine) {
Result.setFlag(LexerToken::StartOfLine);
Result.setFlag(Token::StartOfLine);
IsAtStartOfLine = false;
}
goto LexNextToken; // GCC isn't tail call eliminating.
@ -1475,7 +1475,7 @@ LexNextToken:
// want us starting at the beginning of the line again. If so, set
// the StartOfLine flag.
if (IsAtStartOfLine) {
Result.setFlag(LexerToken::StartOfLine);
Result.setFlag(Token::StartOfLine);
IsAtStartOfLine = false;
}
goto LexNextToken; // GCC isn't tail call eliminating.

Просмотреть файл

@ -535,7 +535,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
/// hex-digit hex-digit hex-digit hex-digit
///
StringLiteralParser::
StringLiteralParser(const LexerToken *StringToks, unsigned NumStringToks,
StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
Preprocessor &pp, TargetInfo &t)
: PP(pp), Target(t) {
// Scan all of the string portions, remember the max individual token length,

Просмотреть файл

@ -25,21 +25,21 @@ using namespace clang;
/// MacroArgs ctor function - This destroys the vector passed in.
MacroArgs *MacroArgs::create(const MacroInfo *MI,
const LexerToken *UnexpArgTokens,
const Token *UnexpArgTokens,
unsigned NumToks, bool VarargsElided) {
assert(MI->isFunctionLike() &&
"Can't have args for an object-like macro!");
// Allocate memory for the MacroArgs object with the lexer tokens at the end.
MacroArgs *Result = (MacroArgs*)malloc(sizeof(MacroArgs) +
NumToks*sizeof(LexerToken));
NumToks*sizeof(Token));
// Construct the macroargs object.
new (Result) MacroArgs(NumToks, VarargsElided);
// Copy the actual unexpanded tokens to immediately after the result ptr.
if (NumToks)
memcpy(const_cast<LexerToken*>(Result->getUnexpArgument(0)),
UnexpArgTokens, NumToks*sizeof(LexerToken));
memcpy(const_cast<Token*>(Result->getUnexpArgument(0)),
UnexpArgTokens, NumToks*sizeof(Token));
return Result;
}
@ -57,7 +57,7 @@ void MacroArgs::destroy() {
/// getArgLength - Given a pointer to an expanded or unexpanded argument,
/// return the number of tokens, not counting the EOF, that make up the
/// argument.
unsigned MacroArgs::getArgLength(const LexerToken *ArgPtr) {
unsigned MacroArgs::getArgLength(const Token *ArgPtr) {
unsigned NumArgTokens = 0;
for (; ArgPtr->getKind() != tok::eof; ++ArgPtr)
++NumArgTokens;
@ -67,11 +67,11 @@ unsigned MacroArgs::getArgLength(const LexerToken *ArgPtr) {
/// getUnexpArgument - Return the unexpanded tokens for the specified formal.
///
const LexerToken *MacroArgs::getUnexpArgument(unsigned Arg) const {
const Token *MacroArgs::getUnexpArgument(unsigned Arg) const {
// The unexpanded argument tokens start immediately after the MacroArgs object
// in memory.
const LexerToken *Start = (const LexerToken *)(this+1);
const LexerToken *Result = Start;
const Token *Start = (const Token *)(this+1);
const Token *Result = Start;
// Scan to find Arg.
for (; Arg; ++Result) {
assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
@ -84,7 +84,7 @@ const LexerToken *MacroArgs::getUnexpArgument(unsigned Arg) const {
/// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
/// by pre-expansion, return false. Otherwise, conservatively return true.
bool MacroArgs::ArgNeedsPreexpansion(const LexerToken *ArgTok) const {
bool MacroArgs::ArgNeedsPreexpansion(const Token *ArgTok) const {
// If there are no identifiers in the argument list, or if the identifiers are
// known to not be macros, pre-expansion won't modify it.
for (; ArgTok->getKind() != tok::eof; ++ArgTok)
@ -99,7 +99,7 @@ bool MacroArgs::ArgNeedsPreexpansion(const LexerToken *ArgTok) const {
/// getPreExpArgument - Return the pre-expanded form of the specified
/// argument.
const std::vector<LexerToken> &
const std::vector<Token> &
MacroArgs::getPreExpArgument(unsigned Arg, Preprocessor &PP) {
assert(Arg < NumUnexpArgTokens && "Invalid argument number!");
@ -107,10 +107,10 @@ MacroArgs::getPreExpArgument(unsigned Arg, Preprocessor &PP) {
if (PreExpArgTokens.empty())
PreExpArgTokens.resize(NumUnexpArgTokens);
std::vector<LexerToken> &Result = PreExpArgTokens[Arg];
std::vector<Token> &Result = PreExpArgTokens[Arg];
if (!Result.empty()) return Result;
const LexerToken *AT = getUnexpArgument(Arg);
const Token *AT = getUnexpArgument(Arg);
unsigned NumToks = getArgLength(AT)+1; // Include the EOF.
// Otherwise, we have to pre-expand this argument, populating Result. To do
@ -121,7 +121,7 @@ MacroArgs::getPreExpArgument(unsigned Arg, Preprocessor &PP) {
// Lex all of the macro-expanded tokens into Result.
do {
Result.push_back(LexerToken());
Result.push_back(Token());
PP.Lex(Result.back());
} while (Result.back().getKind() != tok::eof);
@ -139,20 +139,20 @@ MacroArgs::getPreExpArgument(unsigned Arg, Preprocessor &PP) {
/// tokens into the literal string token that should be produced by the C #
/// preprocessor operator.
///
static LexerToken StringifyArgument(const LexerToken *ArgToks,
static Token StringifyArgument(const Token *ArgToks,
Preprocessor &PP, bool Charify = false) {
LexerToken Tok;
Token Tok;
Tok.startToken();
Tok.setKind(tok::string_literal);
const LexerToken *ArgTokStart = ArgToks;
const Token *ArgTokStart = ArgToks;
// Stringify all the tokens.
std::string Result = "\"";
// FIXME: Optimize this loop to not use std::strings.
bool isFirst = true;
for (; ArgToks->getKind() != tok::eof; ++ArgToks) {
const LexerToken &Tok = *ArgToks;
const Token &Tok = *ArgToks;
if (!isFirst && (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()))
Result += ' ';
isFirst = false;
@ -214,7 +214,7 @@ static LexerToken StringifyArgument(const LexerToken *ArgToks,
/// getStringifiedArgument - Compute, cache, and return the specified argument
/// that has been 'stringified' as required by the # operator.
const LexerToken &MacroArgs::getStringifiedArgument(unsigned ArgNo,
const Token &MacroArgs::getStringifiedArgument(unsigned ArgNo,
Preprocessor &PP) {
assert(ArgNo < NumUnexpArgTokens && "Invalid argument number!");
if (StringifiedArgs.empty()) {
@ -233,7 +233,7 @@ const LexerToken &MacroArgs::getStringifiedArgument(unsigned ArgNo,
/// Create a macro expander for the specified macro with the specified actual
/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
void MacroExpander::Init(LexerToken &Tok, MacroArgs *Actuals) {
void MacroExpander::Init(Token &Tok, MacroArgs *Actuals) {
// If the client is reusing a macro expander, make sure to free any memory
// associated with it.
destroy();
@ -262,7 +262,7 @@ void MacroExpander::Init(LexerToken &Tok, MacroArgs *Actuals) {
/// Create a macro expander for the specified token stream. This does not
/// take ownership of the specified token vector.
void MacroExpander::Init(const LexerToken *TokArray, unsigned NumToks) {
void MacroExpander::Init(const Token *TokArray, unsigned NumToks) {
// If the client is reusing a macro expander, make sure to free any memory
// associated with it.
destroy();
@ -298,7 +298,7 @@ void MacroExpander::destroy() {
/// Expand the arguments of a function-like macro so that we can quickly
/// return preexpanded tokens from MacroTokens.
void MacroExpander::ExpandFunctionArguments() {
llvm::SmallVector<LexerToken, 128> ResultToks;
llvm::SmallVector<Token, 128> ResultToks;
// Loop through the MacroTokens tokens, expanding them into ResultToks. Keep
// track of whether we change anything. If not, no need to keep them. If so,
@ -314,12 +314,12 @@ void MacroExpander::ExpandFunctionArguments() {
// If we found the stringify operator, get the argument stringified. The
// preprocessor already verified that the following token is a macro name
// when the #define was parsed.
const LexerToken &CurTok = MacroTokens[i];
const Token &CurTok = MacroTokens[i];
if (CurTok.getKind() == tok::hash || CurTok.getKind() == tok::hashat) {
int ArgNo = Macro->getArgumentNum(MacroTokens[i+1].getIdentifierInfo());
assert(ArgNo != -1 && "Token following # is not an argument?");
LexerToken Res;
Token Res;
if (CurTok.getKind() == tok::hash) // Stringify
Res = ActualArgs->getStringifiedArgument(ArgNo, PP);
else {
@ -330,7 +330,7 @@ void MacroExpander::ExpandFunctionArguments() {
// The stringified/charified string leading space flag gets set to match
// the #/#@ operator.
if (CurTok.hasLeadingSpace() || NextTokGetsSpace)
Res.setFlag(LexerToken::LeadingSpace);
Res.setFlag(Token::LeadingSpace);
ResultToks.push_back(Res);
MadeChange = true;
@ -348,7 +348,7 @@ void MacroExpander::ExpandFunctionArguments() {
ResultToks.push_back(CurTok);
if (NextTokGetsSpace) {
ResultToks.back().setFlag(LexerToken::LeadingSpace);
ResultToks.back().setFlag(Token::LeadingSpace);
NextTokGetsSpace = false;
}
continue;
@ -368,11 +368,11 @@ void MacroExpander::ExpandFunctionArguments() {
// argument and substitute the expanded tokens into the result. This is
// C99 6.10.3.1p1.
if (!PasteBefore && !PasteAfter) {
const LexerToken *ResultArgToks;
const Token *ResultArgToks;
// Only preexpand the argument if it could possibly need it. This
// avoids some work in common cases.
const LexerToken *ArgTok = ActualArgs->getUnexpArgument(ArgNo);
const Token *ArgTok = ActualArgs->getUnexpArgument(ArgNo);
if (ActualArgs->ArgNeedsPreexpansion(ArgTok))
ResultArgToks = &ActualArgs->getPreExpArgument(ArgNo, PP)[0];
else
@ -387,7 +387,7 @@ void MacroExpander::ExpandFunctionArguments() {
// If any tokens were substituted from the argument, the whitespace
// before the first token should match the whitespace of the arg
// identifier.
ResultToks[FirstResult].setFlagValue(LexerToken::LeadingSpace,
ResultToks[FirstResult].setFlagValue(Token::LeadingSpace,
CurTok.hasLeadingSpace() ||
NextTokGetsSpace);
NextTokGetsSpace = false;
@ -401,7 +401,7 @@ void MacroExpander::ExpandFunctionArguments() {
// Okay, we have a token that is either the LHS or RHS of a paste (##)
// argument. It gets substituted as its non-pre-expanded tokens.
const LexerToken *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
unsigned NumToks = MacroArgs::getArgLength(ArgToks);
if (NumToks) { // Not an empty argument?
ResultToks.append(ArgToks, ArgToks+NumToks);
@ -409,7 +409,7 @@ void MacroExpander::ExpandFunctionArguments() {
// If the next token was supposed to get leading whitespace, ensure it has
// it now.
if (NextTokGetsSpace) {
ResultToks[ResultToks.size()-NumToks].setFlag(LexerToken::LeadingSpace);
ResultToks[ResultToks.size()-NumToks].setFlag(Token::LeadingSpace);
NextTokGetsSpace = false;
}
continue;
@ -451,16 +451,16 @@ void MacroExpander::ExpandFunctionArguments() {
if (MadeChange) {
// This is deleted in the dtor.
NumMacroTokens = ResultToks.size();
LexerToken *Res = new LexerToken[ResultToks.size()];
Token *Res = new Token[ResultToks.size()];
if (NumMacroTokens)
memcpy(Res, &ResultToks[0], NumMacroTokens*sizeof(LexerToken));
memcpy(Res, &ResultToks[0], NumMacroTokens*sizeof(Token));
MacroTokens = Res;
}
}
/// Lex - Lex and return a token from this macro stream.
///
void MacroExpander::Lex(LexerToken &Tok) {
void MacroExpander::Lex(Token &Tok) {
// Lexing off the end of the macro, pop this macro off the expansion stack.
if (isAtEnd()) {
// If this is a macro (not a token stream), mark the macro enabled now
@ -503,8 +503,8 @@ void MacroExpander::Lex(LexerToken &Tok) {
// If this is the first token, set the lexical properties of the token to
// match the lexical properties of the macro identifier.
if (isFirstToken) {
Tok.setFlagValue(LexerToken::StartOfLine , AtStartOfLine);
Tok.setFlagValue(LexerToken::LeadingSpace, HasLeadingSpace);
Tok.setFlagValue(Token::StartOfLine , AtStartOfLine);
Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
}
// Handle recursive expansion!
@ -517,7 +517,7 @@ void MacroExpander::Lex(LexerToken &Tok) {
/// PasteTokens - Tok is the LHS of a ## operator, and CurToken is the ##
/// operator. Read the ## and RHS, and paste the LHS/RHS together. If there
/// are is another ## after it, chomp it iteratively. Return the result as Tok.
void MacroExpander::PasteTokens(LexerToken &Tok) {
void MacroExpander::PasteTokens(Token &Tok) {
llvm::SmallVector<char, 128> Buffer;
do {
// Consume the ## operator.
@ -526,7 +526,7 @@ void MacroExpander::PasteTokens(LexerToken &Tok) {
assert(!isAtEnd() && "No token on the RHS of a paste operator!");
// Get the RHS token.
const LexerToken &RHS = MacroTokens[CurToken];
const Token &RHS = MacroTokens[CurToken];
bool isInvalid = false;
@ -556,7 +556,7 @@ void MacroExpander::PasteTokens(LexerToken &Tok) {
SourceLocation ResultTokLoc = PP.CreateString(&Buffer[0], Buffer.size());
// Lex the resultant pasted token into Result.
LexerToken Result;
Token Result;
// Avoid testing /*, as the lexer would think it is the start of a comment
// and emit an error that it is unterminated.
@ -613,8 +613,8 @@ void MacroExpander::PasteTokens(LexerToken &Tok) {
// FIXME: Turn __VARRGS__ into "not a token"?
// Transfer properties of the LHS over the the Result.
Result.setFlagValue(LexerToken::StartOfLine , Tok.isAtStartOfLine());
Result.setFlagValue(LexerToken::LeadingSpace, Tok.hasLeadingSpace());
Result.setFlagValue(Token::StartOfLine , Tok.isAtStartOfLine());
Result.setFlagValue(Token::LeadingSpace, Tok.hasLeadingSpace());
// Finally, replace LHS with the result, consume the RHS, and iterate.
++CurToken;

Просмотреть файл

@ -50,8 +50,8 @@ bool MacroInfo::isIdenticalTo(const MacroInfo &Other, Preprocessor &PP) const {
// Check all the tokens.
for (unsigned i = 0, e = ReplacementTokens.size(); i != e; ++i) {
const LexerToken &A = ReplacementTokens[i];
const LexerToken &B = Other.ReplacementTokens[i];
const Token &A = ReplacementTokens[i];
const Token &B = Other.ReplacementTokens[i];
if (A.getKind() != B.getKind() ||
A.isAtStartOfLine() != B.isAtStartOfLine() ||
A.hasLeadingSpace() != B.hasLeadingSpace())

Просмотреть файл

@ -27,7 +27,7 @@
using namespace clang;
static bool EvaluateDirectiveSubExpr(llvm::APSInt &LHS, unsigned MinPrec,
LexerToken &PeekTok, bool ValueLive,
Token &PeekTok, bool ValueLive,
Preprocessor &PP);
/// DefinedTracker - This struct is used while parsing expressions to keep track
@ -60,7 +60,7 @@ struct DefinedTracker {
/// If ValueLive is false, then this value is being evaluated in a context where
/// the result is not used. As such, avoid diagnostics that relate to
/// evaluation.
static bool EvaluateValue(llvm::APSInt &Result, LexerToken &PeekTok,
static bool EvaluateValue(llvm::APSInt &Result, Token &PeekTok,
DefinedTracker &DT, bool ValueLive,
Preprocessor &PP) {
Result = 0;
@ -360,7 +360,7 @@ static unsigned getPrecedence(tok::TokenKind Kind) {
/// the result is not used. As such, avoid diagnostics that relate to
/// evaluation.
static bool EvaluateDirectiveSubExpr(llvm::APSInt &LHS, unsigned MinPrec,
LexerToken &PeekTok, bool ValueLive,
Token &PeekTok, bool ValueLive,
Preprocessor &PP) {
unsigned PeekPrec = getPrecedence(PeekTok.getKind());
// If this token isn't valid, report the error.
@ -393,7 +393,7 @@ static bool EvaluateDirectiveSubExpr(llvm::APSInt &LHS, unsigned MinPrec,
RHSIsLive = ValueLive;
// Consume the operator, saving the operator token for error reporting.
LexerToken OpToken = PeekTok;
Token OpToken = PeekTok;
PP.LexNonComment(PeekTok);
llvm::APSInt RHS(LHS.getBitWidth());
@ -607,7 +607,7 @@ static bool EvaluateDirectiveSubExpr(llvm::APSInt &LHS, unsigned MinPrec,
bool Preprocessor::
EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Peek ahead one token.
LexerToken Tok;
Token Tok;
Lex(Tok);
// C99 6.10.1p3 - All expressions are evaluated as intmax_t or uintmax_t.

Просмотреть файл

@ -53,7 +53,7 @@ PragmaHandler *PragmaNamespace::FindHandler(const IdentifierInfo *Name,
return IgnoreNull ? 0 : NullHandler;
}
void PragmaNamespace::HandlePragma(Preprocessor &PP, LexerToken &Tok) {
void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) {
// Read the 'namespace' that the directive is in, e.g. STDC. Do not macro
// expand it, the user can have a STDC #define, that should not affect this.
PP.LexUnexpandedToken(Tok);
@ -76,7 +76,7 @@ void Preprocessor::HandlePragmaDirective() {
++NumPragma;
// Invoke the first level of pragma handlers which reads the namespace id.
LexerToken Tok;
Token Tok;
PragmaHandlers->HandlePragma(*this, Tok);
// If the pragma handler didn't read the rest of the line, consume it now.
@ -87,7 +87,7 @@ void Preprocessor::HandlePragmaDirective() {
/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
/// return the first token after the directive. The _Pragma token has just
/// been read into 'Tok'.
void Preprocessor::Handle_Pragma(LexerToken &Tok) {
void Preprocessor::Handle_Pragma(Token &Tok) {
// Remember the pragma token location.
SourceLocation PragmaLoc = Tok.getLocation();
@ -165,7 +165,7 @@ void Preprocessor::Handle_Pragma(LexerToken &Tok) {
/// HandlePragmaOnce - Handle #pragma once. OnceTok is the 'once'.
///
void Preprocessor::HandlePragmaOnce(LexerToken &OnceTok) {
void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
if (isInPrimaryFile()) {
Diag(OnceTok, diag::pp_pragma_once_in_main_file);
return;
@ -180,8 +180,8 @@ void Preprocessor::HandlePragmaOnce(LexerToken &OnceTok) {
/// HandlePragmaPoison - Handle #pragma GCC poison. PoisonTok is the 'poison'.
///
void Preprocessor::HandlePragmaPoison(LexerToken &PoisonTok) {
LexerToken Tok;
void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
Token Tok;
while (1) {
// Read the next token to poison. While doing this, pretend that we are
@ -220,7 +220,7 @@ void Preprocessor::HandlePragmaPoison(LexerToken &PoisonTok) {
/// HandlePragmaSystemHeader - Implement #pragma GCC system_header. We know
/// that the whole directive has been parsed.
void Preprocessor::HandlePragmaSystemHeader(LexerToken &SysHeaderTok) {
void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
if (isInPrimaryFile()) {
Diag(SysHeaderTok, diag::pp_pragma_sysheader_in_main_file);
return;
@ -242,8 +242,8 @@ void Preprocessor::HandlePragmaSystemHeader(LexerToken &SysHeaderTok) {
/// HandlePragmaDependency - Handle #pragma GCC dependency "foo" blah.
///
void Preprocessor::HandlePragmaDependency(LexerToken &DependencyTok) {
LexerToken FilenameTok;
void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
Token FilenameTok;
CurLexer->LexIncludeFilename(FilenameTok);
// If the token kind is EOM, the error has already been diagnosed.
@ -324,7 +324,7 @@ void Preprocessor::AddPragmaHandler(const char *Namespace,
namespace {
struct PragmaOnceHandler : public PragmaHandler {
PragmaOnceHandler(const IdentifierInfo *OnceID) : PragmaHandler(OnceID) {}
virtual void HandlePragma(Preprocessor &PP, LexerToken &OnceTok) {
virtual void HandlePragma(Preprocessor &PP, Token &OnceTok) {
PP.CheckEndOfDirective("#pragma once");
PP.HandlePragmaOnce(OnceTok);
}
@ -332,21 +332,21 @@ struct PragmaOnceHandler : public PragmaHandler {
struct PragmaPoisonHandler : public PragmaHandler {
PragmaPoisonHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
virtual void HandlePragma(Preprocessor &PP, LexerToken &PoisonTok) {
virtual void HandlePragma(Preprocessor &PP, Token &PoisonTok) {
PP.HandlePragmaPoison(PoisonTok);
}
};
struct PragmaSystemHeaderHandler : public PragmaHandler {
PragmaSystemHeaderHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
virtual void HandlePragma(Preprocessor &PP, LexerToken &SHToken) {
virtual void HandlePragma(Preprocessor &PP, Token &SHToken) {
PP.HandlePragmaSystemHeader(SHToken);
PP.CheckEndOfDirective("#pragma");
}
};
struct PragmaDependencyHandler : public PragmaHandler {
PragmaDependencyHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
virtual void HandlePragma(Preprocessor &PP, LexerToken &DepToken) {
virtual void HandlePragma(Preprocessor &PP, Token &DepToken) {
PP.HandlePragmaDependency(DepToken);
}
};

Просмотреть файл

@ -105,7 +105,7 @@ PPCallbacks::~PPCallbacks() {
}
/// Diag - Forwarding function for diagnostics. This emits a diagnostic at
/// the specified LexerToken's location, translating the token's start
/// the specified Token's location, translating the token's start
/// position in the current buffer into a SourcePosition object for rendering.
void Preprocessor::Diag(SourceLocation Loc, unsigned DiagID) {
Diags.Report(Loc, DiagID);
@ -116,7 +116,7 @@ void Preprocessor::Diag(SourceLocation Loc, unsigned DiagID,
Diags.Report(Loc, DiagID, &Msg, 1);
}
void Preprocessor::DumpToken(const LexerToken &Tok, bool DumpFlags) const {
void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
std::cerr << tok::getTokenName(Tok.getKind()) << " '"
<< getSpelling(Tok) << "'";
@ -176,7 +176,7 @@ void Preprocessor::PrintStats() {
/// after trigraph expansion and escaped-newline folding. In particular, this
/// wants to get the true, uncanonicalized, spelling of things like digraphs
/// UCNs, etc.
std::string Preprocessor::getSpelling(const LexerToken &Tok) const {
std::string Preprocessor::getSpelling(const Token &Tok) const {
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
// If this token contains nothing interesting, return it directly.
@ -209,7 +209,7 @@ std::string Preprocessor::getSpelling(const LexerToken &Tok) const {
/// to point to a constant buffer with the data already in it (avoiding a
/// copy). The caller is not allowed to modify the returned buffer pointer
/// if an internal buffer is returned.
unsigned Preprocessor::getSpelling(const LexerToken &Tok,
unsigned Preprocessor::getSpelling(const Token &Tok,
const char *&Buffer) const {
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
@ -282,7 +282,7 @@ SourceLocation Preprocessor::AdvanceToTokenCharacter(SourceLocation TokStart,
if (CharNo != 0) {
// Create a lexer starting at this token position.
Lexer TheLexer(TokStart, *this, TokPtr);
LexerToken Tok;
Token Tok;
// Skip over characters the remaining characters.
const char *TokStartPtr = TokPtr;
for (; CharNo; --CharNo)
@ -426,7 +426,7 @@ void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer.
void Preprocessor::EnterMacro(LexerToken &Tok, MacroArgs *Args) {
void Preprocessor::EnterMacro(Token &Tok, MacroArgs *Args) {
IncludeMacroStack.push_back(IncludeStackInfo(CurLexer, CurDirLookup,
CurMacroExpander));
CurLexer = 0;
@ -445,7 +445,7 @@ void Preprocessor::EnterMacro(LexerToken &Tok, MacroArgs *Args) {
/// that these tokens will be re-macro-expanded when/if expansion is enabled.
/// This method assumes that the specified stream of tokens has a permanent
/// owner somewhere, so they do not need to be copied.
void Preprocessor::EnterTokenStream(const LexerToken *Toks, unsigned NumToks) {
void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks) {
// Save our current state.
IncludeMacroStack.push_back(IncludeStackInfo(CurLexer, CurDirLookup,
CurMacroExpander));
@ -585,7 +585,7 @@ bool Preprocessor::isNextPPTokenLParen() {
if (Val != 1)
return false;
LexerToken Tok;
Token Tok;
LexUnexpandedToken(Tok);
assert(Tok.getKind() == tok::l_paren && "Error computing l-paren-ness?");
return true;
@ -593,7 +593,7 @@ bool Preprocessor::isNextPPTokenLParen() {
/// HandleMacroExpandedIdentifier - If an identifier token is read that is to be
/// expanded as a macro, handle it and return the next token as 'Identifier'.
bool Preprocessor::HandleMacroExpandedIdentifier(LexerToken &Identifier,
bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
MacroInfo *MI) {
// If this is a builtin macro, like __LINE__ or _Pragma, handle it specially.
@ -662,8 +662,8 @@ bool Preprocessor::HandleMacroExpandedIdentifier(LexerToken &Identifier,
// stuff like "! XX," -> "! ," and " XX," -> " ,", when XX is
// empty.
if (!Identifier.isAtStartOfLine()) {
if (IsAtStartOfLine) Identifier.setFlag(LexerToken::StartOfLine);
if (HadLeadingSpace) Identifier.setFlag(LexerToken::LeadingSpace);
if (IsAtStartOfLine) Identifier.setFlag(Token::StartOfLine);
if (HadLeadingSpace) Identifier.setFlag(Token::LeadingSpace);
}
++NumFastMacroExpanded;
return false;
@ -686,8 +686,8 @@ bool Preprocessor::HandleMacroExpandedIdentifier(LexerToken &Identifier,
Identifier = MI->getReplacementToken(0);
// Restore the StartOfLine/LeadingSpace markers.
Identifier.setFlagValue(LexerToken::StartOfLine , isAtStartOfLine);
Identifier.setFlagValue(LexerToken::LeadingSpace, hasLeadingSpace);
Identifier.setFlagValue(Token::StartOfLine , isAtStartOfLine);
Identifier.setFlagValue(Token::LeadingSpace, hasLeadingSpace);
// Update the tokens location to include both its logical and physical
// locations.
@ -698,7 +698,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(LexerToken &Identifier,
// If this is #define X X, we must mark the result as unexpandible.
if (IdentifierInfo *NewII = Identifier.getIdentifierInfo())
if (NewII->getMacroInfo() == MI)
Identifier.setFlag(LexerToken::DisableExpand);
Identifier.setFlag(Token::DisableExpand);
// Since this is not an identifier token, it can't be macro expanded, so
// we're done.
@ -718,21 +718,21 @@ bool Preprocessor::HandleMacroExpandedIdentifier(LexerToken &Identifier,
/// ReadFunctionLikeMacroArgs - After reading "MACRO(", this method is
/// invoked to read all of the actual arguments specified for the macro
/// invocation. This returns null on error.
MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(LexerToken &MacroName,
MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
MacroInfo *MI) {
// The number of fixed arguments to parse.
unsigned NumFixedArgsLeft = MI->getNumArgs();
bool isVariadic = MI->isVariadic();
// Outer loop, while there are more arguments, keep reading them.
LexerToken Tok;
Token Tok;
Tok.setKind(tok::comma);
--NumFixedArgsLeft; // Start reading the first arg.
// ArgTokens - Build up a list of tokens that make up each argument. Each
// argument is separated by an EOF token. Use a SmallVector so we can avoid
// heap allocations in the common case.
llvm::SmallVector<LexerToken, 64> ArgTokens;
llvm::SmallVector<Token, 64> ArgTokens;
unsigned NumActuals = 0;
while (Tok.getKind() == tok::comma) {
@ -784,7 +784,7 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(LexerToken &MacroName,
Diag(Tok, diag::ext_empty_fnmacro_arg);
// Add a marker EOF token to the end of the token list for this argument.
LexerToken EOFTok;
Token EOFTok;
EOFTok.startToken();
EOFTok.setKind(tok::eof);
EOFTok.setLocation(Tok.getLocation());
@ -862,7 +862,7 @@ static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
/// as a builtin macro, handle it and return the next token as 'Tok'.
void Preprocessor::ExpandBuiltinMacro(LexerToken &Tok) {
void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// Figure out which token this is.
IdentifierInfo *II = Tok.getIdentifierInfo();
assert(II && "Can't be a macro without id info!");
@ -878,7 +878,7 @@ void Preprocessor::ExpandBuiltinMacro(LexerToken &Tok) {
// Set up the return result.
Tok.setIdentifierInfo(0);
Tok.clearFlag(LexerToken::NeedsCleaning);
Tok.clearFlag(Token::NeedsCleaning);
if (II == Ident__LINE__) {
// __LINE__ expands to a simple numeric value.
@ -971,7 +971,7 @@ void Preprocessor::ExpandBuiltinMacro(LexerToken &Tok) {
/// LookUpIdentifierInfo - Given a tok::identifier token, look up the
/// identifier information for the token and install it into the token.
IdentifierInfo *Preprocessor::LookUpIdentifierInfo(LexerToken &Identifier,
IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier,
const char *BufPtr) {
assert(Identifier.getKind() == tok::identifier && "Not an identifier!");
assert(Identifier.getIdentifierInfo() == 0 && "Identinfo already exists!");
@ -997,7 +997,7 @@ IdentifierInfo *Preprocessor::LookUpIdentifierInfo(LexerToken &Identifier,
/// HandleIdentifier - This callback is invoked when the lexer reads an
/// identifier. This callback looks up the identifier in the map and/or
/// potentially macro expands it or turns it into a named token (like 'for').
void Preprocessor::HandleIdentifier(LexerToken &Identifier) {
void Preprocessor::HandleIdentifier(Token &Identifier) {
assert(Identifier.getIdentifierInfo() &&
"Can't handle identifiers without identifier info!");
@ -1022,7 +1022,7 @@ void Preprocessor::HandleIdentifier(LexerToken &Identifier) {
// C99 6.10.3.4p2 says that a disabled macro may never again be
// expanded, even if it's in a context where it could be expanded in the
// future.
Identifier.setFlag(LexerToken::DisableExpand);
Identifier.setFlag(Token::DisableExpand);
}
}
} else if (II.isOtherTargetMacro() && !DisableMacroExpansion) {
@ -1055,7 +1055,7 @@ void Preprocessor::HandleIdentifier(LexerToken &Identifier) {
/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
/// the current file. This either returns the EOF token or pops a level off
/// the include stack and keeps going.
bool Preprocessor::HandleEndOfFile(LexerToken &Result, bool isEndOfMacro) {
bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
assert(!CurMacroExpander &&
"Ending a file when currently in a macro!");
@ -1119,7 +1119,7 @@ bool Preprocessor::HandleEndOfFile(LexerToken &Result, bool isEndOfMacro) {
/// HandleEndOfMacro - This callback is invoked when the lexer hits the end of
/// the current macro expansion or token stream expansion.
bool Preprocessor::HandleEndOfMacro(LexerToken &Result) {
bool Preprocessor::HandleEndOfMacro(Token &Result) {
assert(CurMacroExpander && !CurLexer &&
"Ending a macro when currently in a #include file!");
@ -1142,7 +1142,7 @@ bool Preprocessor::HandleEndOfMacro(LexerToken &Result) {
/// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
/// current line until the tok::eom token is found.
void Preprocessor::DiscardUntilEndOfDirective() {
LexerToken Tmp;
Token Tmp;
do {
LexUnexpandedToken(Tmp);
} while (Tmp.getKind() != tok::eom);
@ -1160,7 +1160,7 @@ static bool isCXXNamedOperator(const std::string &Spelling) {
/// of the macro line if the macro name is invalid. isDefineUndef is 1 if
/// this is due to a a #define, 2 if #undef directive, 0 if it is something
/// else (e.g. #ifdef).
void Preprocessor::ReadMacroName(LexerToken &MacroNameTok, char isDefineUndef) {
void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
// Read the token, don't allow macro expansion on it.
LexUnexpandedToken(MacroNameTok);
@ -1202,7 +1202,7 @@ void Preprocessor::ReadMacroName(LexerToken &MacroNameTok, char isDefineUndef) {
/// CheckEndOfDirective - Ensure that the next token is a tok::eom token. If
/// not, emit a diagnostic and consume up until the eom.
void Preprocessor::CheckEndOfDirective(const char *DirType) {
LexerToken Tmp;
Token Tmp;
Lex(Tmp);
// There should be no tokens after the directive, but we allow them as an
// extension.
@ -1238,7 +1238,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// Enter raw mode to disable identifier lookup (and thus macro expansion),
// disabling warnings, etc.
CurLexer->LexingRawMode = true;
LexerToken Tok;
Token Tok;
while (1) {
CurLexer->Lex(Tok);
@ -1409,7 +1409,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
/// at the start of a line. This consumes the directive, modifies the
/// lexer/preprocessor state, and advances the lexer(s) so that the next token
/// read is the correct one.
void Preprocessor::HandleDirective(LexerToken &Result) {
void Preprocessor::HandleDirective(Token &Result) {
// FIXME: Traditional: # with whitespace before it not recognized by K&R?
// We just parsed a # character at the start of a line, so we're in directive
@ -1533,7 +1533,7 @@ TryAgain:
// Okay, we're done parsing the directive.
}
void Preprocessor::HandleUserDiagnosticDirective(LexerToken &Tok,
void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
bool isWarning) {
// Read the rest of the line raw. We do this because we don't want macros
// to be expanded and we don't require that the tokens be valid preprocessing
@ -1548,12 +1548,12 @@ void Preprocessor::HandleUserDiagnosticDirective(LexerToken &Tok,
/// HandleIdentSCCSDirective - Handle a #ident/#sccs directive.
///
void Preprocessor::HandleIdentSCCSDirective(LexerToken &Tok) {
void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
// Yes, this directive is an extension.
Diag(Tok, diag::ext_pp_ident_directive);
// Read the string argument.
LexerToken StrTok;
Token StrTok;
Lex(StrTok);
// If the token kind isn't a string, it's a malformed directive.
@ -1578,7 +1578,7 @@ void Preprocessor::HandleIdentSCCSDirective(LexerToken &Tok) {
/// caller is expected to provide a buffer that is large enough to hold the
/// spelling of the filename, but is also expected to handle the case when
/// this method decides to use a different buffer.
bool Preprocessor::GetIncludeFilenameSpelling(const LexerToken &FilenameTok,
bool Preprocessor::GetIncludeFilenameSpelling(const Token &FilenameTok,
const char *&BufStart,
const char *&BufEnd) {
// Get the text form of the filename.
@ -1625,11 +1625,11 @@ bool Preprocessor::GetIncludeFilenameSpelling(const LexerToken &FilenameTok,
/// file to be included from the lexer, then include it! This is a common
/// routine with functionality shared between #include, #include_next and
/// #import.
void Preprocessor::HandleIncludeDirective(LexerToken &IncludeTok,
void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
const DirectoryLookup *LookupFrom,
bool isImport) {
LexerToken FilenameTok;
Token FilenameTok;
CurLexer->LexIncludeFilename(FilenameTok);
// If the token kind is EOM, the error has already been diagnosed.
@ -1682,7 +1682,7 @@ void Preprocessor::HandleIncludeDirective(LexerToken &IncludeTok,
/// HandleIncludeNextDirective - Implements #include_next.
///
void Preprocessor::HandleIncludeNextDirective(LexerToken &IncludeNextTok) {
void Preprocessor::HandleIncludeNextDirective(Token &IncludeNextTok) {
Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
// #include_next is like #include, except that we start searching after
@ -1704,7 +1704,7 @@ void Preprocessor::HandleIncludeNextDirective(LexerToken &IncludeNextTok) {
/// HandleImportDirective - Implements #import.
///
void Preprocessor::HandleImportDirective(LexerToken &ImportTok) {
void Preprocessor::HandleImportDirective(Token &ImportTok) {
Diag(ImportTok, diag::ext_pp_import_directive);
return HandleIncludeDirective(ImportTok, 0, true);
@ -1721,7 +1721,7 @@ void Preprocessor::HandleImportDirective(LexerToken &ImportTok) {
bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI) {
llvm::SmallVector<IdentifierInfo*, 32> Arguments;
LexerToken Tok;
Token Tok;
while (1) {
LexUnexpandedToken(Tok);
switch (Tok.getKind()) {
@ -1808,11 +1808,11 @@ bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI) {
/// line then lets the caller lex the next real token. If 'isTargetSpecific' is
/// true, then this is a "#define_target", otherwise this is a "#define".
///
void Preprocessor::HandleDefineDirective(LexerToken &DefineTok,
void Preprocessor::HandleDefineDirective(Token &DefineTok,
bool isTargetSpecific) {
++NumDefined;
LexerToken MacroNameTok;
Token MacroNameTok;
ReadMacroName(MacroNameTok, 1);
// Error reading macro name? If so, diagnostic already issued.
@ -1831,7 +1831,7 @@ void Preprocessor::HandleDefineDirective(LexerToken &DefineTok,
MacroNameTok.getIdentifierInfo()->setIsOtherTargetMacro(false);
LexerToken Tok;
Token Tok;
LexUnexpandedToken(Tok);
// If this is a function-like macro definition, parse the argument list,
@ -1865,7 +1865,7 @@ void Preprocessor::HandleDefineDirective(LexerToken &DefineTok,
} else {
// This is a normal token with leading space. Clear the leading space
// marker on the first token to get proper expansion.
Tok.clearFlag(LexerToken::LeadingSpace);
Tok.clearFlag(Token::LeadingSpace);
}
// If this is a definition of a variadic C99 function-like macro, not using
@ -1967,8 +1967,8 @@ void Preprocessor::HandleDefineDirective(LexerToken &DefineTok,
}
/// HandleDefineOtherTargetDirective - Implements #define_other_target.
void Preprocessor::HandleDefineOtherTargetDirective(LexerToken &Tok) {
LexerToken MacroNameTok;
void Preprocessor::HandleDefineOtherTargetDirective(Token &Tok) {
Token MacroNameTok;
ReadMacroName(MacroNameTok, 1);
// Error reading macro name? If so, diagnostic already issued.
@ -1992,10 +1992,10 @@ void Preprocessor::HandleDefineOtherTargetDirective(LexerToken &Tok) {
/// HandleUndefDirective - Implements #undef.
///
void Preprocessor::HandleUndefDirective(LexerToken &UndefTok) {
void Preprocessor::HandleUndefDirective(Token &UndefTok) {
++NumUndefined;
LexerToken MacroNameTok;
Token MacroNameTok;
ReadMacroName(MacroNameTok, 2);
// Error reading macro name? If so, diagnostic already issued.
@ -2032,12 +2032,12 @@ void Preprocessor::HandleUndefDirective(LexerToken &UndefTok) {
/// if any tokens have been returned or pp-directives activated before this
/// #ifndef has been lexed.
///
void Preprocessor::HandleIfdefDirective(LexerToken &Result, bool isIfndef,
void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
bool ReadAnyTokensBeforeDirective) {
++NumIf;
LexerToken DirectiveTok = Result;
Token DirectiveTok = Result;
LexerToken MacroNameTok;
Token MacroNameTok;
ReadMacroName(MacroNameTok);
// Error reading macro name? If so, diagnostic already issued.
@ -2092,7 +2092,7 @@ void Preprocessor::HandleIfdefDirective(LexerToken &Result, bool isIfndef,
/// HandleIfDirective - Implements the #if directive.
///
void Preprocessor::HandleIfDirective(LexerToken &IfToken,
void Preprocessor::HandleIfDirective(Token &IfToken,
bool ReadAnyTokensBeforeDirective) {
++NumIf;
@ -2120,7 +2120,7 @@ void Preprocessor::HandleIfDirective(LexerToken &IfToken,
/// HandleEndifDirective - Implements the #endif directive.
///
void Preprocessor::HandleEndifDirective(LexerToken &EndifToken) {
void Preprocessor::HandleEndifDirective(Token &EndifToken) {
++NumEndif;
// Check that this is the whole directive.
@ -2141,7 +2141,7 @@ void Preprocessor::HandleEndifDirective(LexerToken &EndifToken) {
}
void Preprocessor::HandleElseDirective(LexerToken &Result) {
void Preprocessor::HandleElseDirective(Token &Result) {
++NumElse;
// #else directive in a non-skipping conditional... start skipping.
@ -2164,7 +2164,7 @@ void Preprocessor::HandleElseDirective(LexerToken &Result) {
/*FoundElse*/true);
}
void Preprocessor::HandleElifDirective(LexerToken &ElifToken) {
void Preprocessor::HandleElifDirective(Token &ElifToken) {
++NumElse;
// #elif directive in a non-skipping conditional... start skipping.

Просмотреть файл

@ -1349,7 +1349,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
ExprResult NumElements(false);
if (Tok.getKind() == tok::star) {
// Remember the '*' token, in case we have to un-get it.
LexerToken StarTok = Tok;
Token StarTok = Tok;
ConsumeToken();
// Check that the ']' token is present to avoid incorrectly parsing

Просмотреть файл

@ -195,7 +195,7 @@ Parser::ExprResult Parser::ParseConstantExpression() {
/// of part of an expression. For example, in "A[1]+B", we consumed "A" (which
/// is now in 'IdTok') and the current token is "[".
Parser::ExprResult Parser::
ParseExpressionWithLeadingIdentifier(const LexerToken &IdTok) {
ParseExpressionWithLeadingIdentifier(const Token &IdTok) {
// We know that 'IdTok' must correspond to this production:
// primary-expression: identifier
@ -226,7 +226,7 @@ ParseExpressionWithLeadingIdentifier(const LexerToken &IdTok) {
/// of part of an assignment-expression. For example, in "A[1]+B", we consumed
/// "A" (which is now in 'IdTok') and the current token is "[".
Parser::ExprResult Parser::
ParseAssignmentExprWithLeadingIdentifier(const LexerToken &IdTok) {
ParseAssignmentExprWithLeadingIdentifier(const Token &IdTok) {
// We know that 'IdTok' must correspond to this production:
// primary-expression: identifier
@ -258,7 +258,7 @@ ParseAssignmentExprWithLeadingIdentifier(const LexerToken &IdTok) {
/// expression. For example, in "*(int*)P+B", we consumed "*" (which is
/// now in 'StarTok') and the current token is "(".
Parser::ExprResult Parser::
ParseAssignmentExpressionWithLeadingStar(const LexerToken &StarTok) {
ParseAssignmentExpressionWithLeadingStar(const Token &StarTok) {
// We know that 'StarTok' must correspond to this production:
// unary-expression: unary-operator cast-expression
// where 'unary-operator' is '*'.
@ -303,7 +303,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, unsigned MinPrec) {
return LHS;
// Consume the operator, saving the operator token for error reporting.
LexerToken OpToken = Tok;
Token OpToken = Tok;
ConsumeToken();
// Special case handling for the ternary operator.
@ -696,7 +696,7 @@ Parser::ExprResult Parser::ParseSizeofAlignofExpression() {
assert((Tok.getKind() == tok::kw_sizeof ||
Tok.getKind() == tok::kw___alignof) &&
"Not a sizeof/alignof expression!");
LexerToken OpTok = Tok;
Token OpTok = Tok;
ConsumeToken();
// If the operand doesn't start with an '(', it must be an expression.
@ -924,7 +924,7 @@ Parser::ExprResult Parser::ParseStringLiteralExpression() {
// String concat. Note that keywords like __func__ and __FUNCTION__ are not
// considered to be strings for concatenation purposes.
llvm::SmallVector<LexerToken, 4> StringToks;
llvm::SmallVector<Token, 4> StringToks;
do {
StringToks.push_back(Tok);

Просмотреть файл

@ -115,7 +115,7 @@ Parser::ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// assignment-expression or if it is an old-style structure field
// designator.
// TODO: Check that this is the first designator.
LexerToken Ident = Tok;
Token Ident = Tok;
ConsumeToken();
// If this is the gross GNU extension, handle it now.

Просмотреть файл

@ -177,7 +177,7 @@ Parser::StmtResult Parser::ParseIdentifierStatement(bool OnlyStatement) {
assert(Tok.getKind() == tok::identifier && Tok.getIdentifierInfo() &&
"Not an identifier!");
LexerToken IdentTok = Tok; // Save the whole token.
Token IdentTok = Tok; // Save the whole token.
ConsumeToken(); // eat the identifier.
// identifier ':' statement

Просмотреть файл

@ -32,7 +32,7 @@ namespace clang {
class QualType;
class LangOptions;
class DeclaratorChunk;
class LexerToken;
class Token;
class IntegerLiteral;
class ArrayType;
class LabelStmt;
@ -208,14 +208,14 @@ public:
bool HasTrailingLParen);
virtual ExprResult ParseSimplePrimaryExpr(SourceLocation Loc,
tok::TokenKind Kind);
virtual ExprResult ParseNumericConstant(const LexerToken &);
virtual ExprResult ParseCharacterConstant(const LexerToken &);
virtual ExprResult ParseNumericConstant(const Token &);
virtual ExprResult ParseCharacterConstant(const Token &);
virtual ExprResult ParseParenExpr(SourceLocation L, SourceLocation R,
ExprTy *Val);
/// ParseStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
virtual ExprResult ParseStringLiteral(const LexerToken *Toks, unsigned NumToks);
virtual ExprResult ParseStringLiteral(const Token *Toks, unsigned NumToks);
// Binary/Unary Operators. 'Tok' is the token for the operator.
virtual ExprResult ParseUnaryOp(SourceLocation OpLoc, tok::TokenKind Op,

Просмотреть файл

@ -31,7 +31,7 @@ using namespace clang;
/// string.
///
Action::ExprResult
Sema::ParseStringLiteral(const LexerToken *StringToks, unsigned NumStringToks) {
Sema::ParseStringLiteral(const Token *StringToks, unsigned NumStringToks) {
assert(NumStringToks && "Must have at least one string!");
StringLiteralParser Literal(StringToks, NumStringToks, PP, Context.Target);
@ -95,7 +95,7 @@ Sema::ExprResult Sema::ParseSimplePrimaryExpr(SourceLocation Loc,
}
}
Sema::ExprResult Sema::ParseCharacterConstant(const LexerToken &Tok) {
Sema::ExprResult Sema::ParseCharacterConstant(const Token &Tok) {
llvm::SmallString<16> CharBuffer;
CharBuffer.resize(Tok.getLength());
const char *ThisTokBegin = &CharBuffer[0];
@ -109,7 +109,7 @@ Sema::ExprResult Sema::ParseCharacterConstant(const LexerToken &Tok) {
Tok.getLocation());
}
Action::ExprResult Sema::ParseNumericConstant(const LexerToken &Tok) {
Action::ExprResult Sema::ParseNumericConstant(const Token &Tok) {
// fast path for a single digit (which is quite common). A single digit
// cannot have a trigraph, escaped newline, radix prefix, or type suffix.
if (Tok.getLength() == 1) {

Просмотреть файл

@ -14,7 +14,7 @@
#ifndef LLVM_CLANG_LEXER_H
#define LLVM_CLANG_LEXER_H
#include "clang/Lex/LexerToken.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/MultipleIncludeOpt.h"
#include "clang/Basic/LangOptions.h"
#include <string>
@ -129,14 +129,14 @@ public:
/// return the tok::eof token. Return true if an error occurred and
/// compilation should terminate, false if normal. This implicitly involves
/// the preprocessor.
void Lex(LexerToken &Result) {
void Lex(Token &Result) {
// Start a new token.
Result.startToken();
// NOTE, any changes here should also change code after calls to
// Preprocessor::HandleDirective
if (IsAtStartOfLine) {
Result.setFlag(LexerToken::StartOfLine);
Result.setFlag(Token::StartOfLine);
IsAtStartOfLine = false;
}
@ -148,7 +148,7 @@ public:
/// LexRawToken - Switch the lexer to raw mode, lex a token into Result and
/// switch it back. Return true if the 'next character to read' pointer
/// points and the end of the lexer buffer, false otherwise.
bool LexRawToken(LexerToken &Result) {
bool LexRawToken(Token &Result) {
assert(!LexingRawMode && "Already in raw mode!");
LexingRawMode = true;
Lex(Result);
@ -184,14 +184,14 @@ private:
/// LexTokenInternal - Internal interface to lex a preprocessing token. Called
/// by Lex.
///
void LexTokenInternal(LexerToken &Result);
void LexTokenInternal(Token &Result);
/// FormTokenWithChars - When we lex a token, we have identified a span
/// starting at BufferPtr, going to TokEnd that forms the token. This method
/// takes that range and assigns it to the token as its location and size. In
/// addition, since tokens cannot overlap, this also updates BufferPtr to be
/// TokEnd.
void FormTokenWithChars(LexerToken &Result, const char *TokEnd) {
void FormTokenWithChars(Token &Result, const char *TokEnd) {
Result.setLocation(getSourceLocation(BufferPtr));
Result.setLength(TokEnd-BufferPtr);
BufferPtr = TokEnd;
@ -233,7 +233,7 @@ public:
/// advance over it, and return it. This is tricky in several cases. Here we
/// just handle the trivial case and fall-back to the non-inlined
/// getCharAndSizeSlow method to handle the hard case.
inline char getAndAdvanceChar(const char *&Ptr, LexerToken &Tok) {
inline char getAndAdvanceChar(const char *&Ptr, Token &Tok) {
// If this is not a trigraph and not a UCN or escaped newline, return
// quickly.
if (isObviouslySimpleCharacter(Ptr[0])) return *Ptr++;
@ -249,7 +249,7 @@ private:
/// and added to a given token, check to see if there are diagnostics that
/// need to be emitted or flags that need to be set on the token. If so, do
/// it.
const char *ConsumeChar(const char *Ptr, unsigned Size, LexerToken &Tok) {
const char *ConsumeChar(const char *Ptr, unsigned Size, Token &Tok) {
// Normal case, we consumed exactly one token. Just return it.
if (Size == 1)
return Ptr+Size;
@ -279,7 +279,7 @@ private:
/// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize
/// method.
char getCharAndSizeSlow(const char *Ptr, unsigned &Size, LexerToken *Tok = 0);
char getCharAndSizeSlow(const char *Ptr, unsigned &Size, Token *Tok = 0);
/// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
/// emit a warning.
@ -343,22 +343,22 @@ private:
// Other lexer functions.
// Helper functions to lex the remainder of a token of the specific type.
void LexIdentifier (LexerToken &Result, const char *CurPtr);
void LexNumericConstant (LexerToken &Result, const char *CurPtr);
void LexStringLiteral (LexerToken &Result, const char *CurPtr,bool Wide);
void LexAngledStringLiteral(LexerToken &Result, const char *CurPtr);
void LexCharConstant (LexerToken &Result, const char *CurPtr);
bool LexEndOfFile (LexerToken &Result, const char *CurPtr);
void LexIdentifier (Token &Result, const char *CurPtr);
void LexNumericConstant (Token &Result, const char *CurPtr);
void LexStringLiteral (Token &Result, const char *CurPtr,bool Wide);
void LexAngledStringLiteral(Token &Result, const char *CurPtr);
void LexCharConstant (Token &Result, const char *CurPtr);
bool LexEndOfFile (Token &Result, const char *CurPtr);
void SkipWhitespace (LexerToken &Result, const char *CurPtr);
bool SkipBCPLComment (LexerToken &Result, const char *CurPtr);
bool SkipBlockComment (LexerToken &Result, const char *CurPtr);
bool SaveBCPLComment (LexerToken &Result, const char *CurPtr);
void SkipWhitespace (Token &Result, const char *CurPtr);
bool SkipBCPLComment (Token &Result, const char *CurPtr);
bool SkipBlockComment (Token &Result, const char *CurPtr);
bool SaveBCPLComment (Token &Result, const char *CurPtr);
/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
/// (potentially) macro expand the filename. If the sequence parsed is not
/// lexically legal, emit a diagnostic and return a result EOM token.
void LexIncludeFilename(LexerToken &Result);
void LexIncludeFilename(Token &Result);
};

Просмотреть файл

@ -26,7 +26,7 @@ namespace clang {
class Diagnostic;
class Preprocessor;
class LexerToken;
class Token;
class SourceLocation;
class TargetInfo;
@ -142,7 +142,7 @@ class StringLiteralParser {
llvm::SmallString<512> ResultBuf;
char *ResultPtr; // cursor
public:
StringLiteralParser(const LexerToken *StringToks, unsigned NumStringToks,
StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
Preprocessor &PP, TargetInfo &T);
bool hadError;
bool AnyWide;

Просмотреть файл

@ -20,7 +20,7 @@
namespace clang {
class MacroInfo;
class Preprocessor;
class LexerToken;
class Token;
/// MacroArgs - An instance of this class captures information about
/// the formal arguments specified to a function-like macro invocation.
@ -34,11 +34,11 @@ class MacroArgs {
/// PreExpArgTokens - Pre-expanded tokens for arguments that need them. Empty
/// if not yet computed. This includes the EOF marker at the end of the
/// stream.
std::vector<std::vector<LexerToken> > PreExpArgTokens;
std::vector<std::vector<Token> > PreExpArgTokens;
/// StringifiedArgs - This contains arguments in 'stringified' form. If the
/// stringified form of an argument has not yet been computed, this is empty.
std::vector<LexerToken> StringifiedArgs;
std::vector<Token> StringifiedArgs;
/// VarargsElided - True if this is a C99 style varargs macro invocation and
/// there was no argument specified for the "..." argument. If the argument
@ -54,7 +54,7 @@ public:
/// MacroArgs ctor function - Create a new MacroArgs object with the specified
/// macro and argument info.
static MacroArgs *create(const MacroInfo *MI,
const LexerToken *UnexpArgTokens,
const Token *UnexpArgTokens,
unsigned NumArgTokens, bool VarargsElided);
/// destroy - Destroy and deallocate the memory for this object.
@ -63,26 +63,26 @@ public:
/// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
/// by pre-expansion, return false. Otherwise, conservatively return true.
bool ArgNeedsPreexpansion(const LexerToken *ArgTok) const;
bool ArgNeedsPreexpansion(const Token *ArgTok) const;
/// getUnexpArgument - Return a pointer to the first token of the unexpanded
/// token list for the specified formal.
///
const LexerToken *getUnexpArgument(unsigned Arg) const;
const Token *getUnexpArgument(unsigned Arg) const;
/// getArgLength - Given a pointer to an expanded or unexpanded argument,
/// return the number of tokens, not counting the EOF, that make up the
/// argument.
static unsigned getArgLength(const LexerToken *ArgPtr);
static unsigned getArgLength(const Token *ArgPtr);
/// getPreExpArgument - Return the pre-expanded form of the specified
/// argument.
const std::vector<LexerToken> &
const std::vector<Token> &
getPreExpArgument(unsigned Arg, Preprocessor &PP);
/// getStringifiedArgument - Compute, cache, and return the specified argument
/// that has been 'stringified' as required by the # operator.
const LexerToken &getStringifiedArgument(unsigned ArgNo, Preprocessor &PP);
const Token &getStringifiedArgument(unsigned ArgNo, Preprocessor &PP);
/// getNumArguments - Return the number of arguments passed into this macro
/// invocation.
@ -118,7 +118,7 @@ class MacroExpander {
/// MacroTokens - This is the pointer to an array of tokens that the macro is
/// defined to, with arguments expanded for function-like macros. If this is
/// a token stream, these are the tokens we are returning.
const LexerToken *MacroTokens;
const Token *MacroTokens;
/// NumMacroTokens - This is the length of the MacroTokens array.
///
@ -141,7 +141,7 @@ class MacroExpander {
public:
/// Create a macro expander for the specified macro with the specified actual
/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
MacroExpander(LexerToken &Tok, MacroArgs *ActualArgs, Preprocessor &pp)
MacroExpander(Token &Tok, MacroArgs *ActualArgs, Preprocessor &pp)
: Macro(0), ActualArgs(0), PP(pp) {
Init(Tok, ActualArgs);
}
@ -149,18 +149,18 @@ public:
/// Init - Initialize this macro expander to expand from the specified macro
/// with the specified argument information. Note that this ctor takes
/// ownership of the ActualArgs pointer.
void Init(LexerToken &Tok, MacroArgs *ActualArgs);
void Init(Token &Tok, MacroArgs *ActualArgs);
/// Create a macro expander for the specified token stream. This does not
/// take ownership of the specified token vector.
MacroExpander(const LexerToken *TokArray, unsigned NumToks, Preprocessor &pp)
MacroExpander(const Token *TokArray, unsigned NumToks, Preprocessor &pp)
: Macro(0), ActualArgs(0), PP(pp) {
Init(TokArray, NumToks);
}
/// Init - Initialize this macro expander with the specified token stream.
/// This does not take ownership of the specified token vector.
void Init(const LexerToken *TokArray, unsigned NumToks);
void Init(const Token *TokArray, unsigned NumToks);
~MacroExpander() { destroy(); }
@ -170,7 +170,7 @@ public:
unsigned isNextTokenLParen() const;
/// Lex - Lex and return a token from this macro stream.
void Lex(LexerToken &Tok);
void Lex(Token &Tok);
private:
void destroy();
@ -185,7 +185,7 @@ private:
/// operator. Read the ## and RHS, and paste the LHS/RHS together. If there
/// are is another ## after it, chomp it iteratively. Return the result as
/// Tok.
void PasteTokens(LexerToken &Tok);
void PasteTokens(Token &Tok);
/// Expand the arguments of a function-like macro so that we can quickly
/// return preexpanded tokens from MacroTokens.

Просмотреть файл

@ -14,7 +14,7 @@
#ifndef LLVM_CLANG_MACROINFO_H
#define LLVM_CLANG_MACROINFO_H
#include "clang/Lex/LexerToken.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/SmallVector.h"
#include <vector>
#include <cassert>
@ -39,7 +39,7 @@ class MacroInfo {
/// ReplacementTokens - This is the list of tokens that the macro is defined
/// to.
llvm::SmallVector<LexerToken, 8> ReplacementTokens;
llvm::SmallVector<Token, 8> ReplacementTokens;
/// IsFunctionLike - True if this macro is a function-like macro, false if it
/// is an object-like macro.
@ -165,18 +165,18 @@ public:
return ReplacementTokens.size();
}
const LexerToken &getReplacementToken(unsigned Tok) const {
const Token &getReplacementToken(unsigned Tok) const {
assert(Tok < ReplacementTokens.size() && "Invalid token #");
return ReplacementTokens[Tok];
}
typedef llvm::SmallVector<LexerToken, 8>::const_iterator tokens_iterator;
typedef llvm::SmallVector<Token, 8>::const_iterator tokens_iterator;
tokens_iterator tokens_begin() const { return ReplacementTokens.begin(); }
tokens_iterator tokens_end() const { return ReplacementTokens.end(); }
/// AddTokenToBody - Add the specified token to the replacement text for the
/// macro.
void AddTokenToBody(const LexerToken &Tok) {
void AddTokenToBody(const Token &Tok) {
ReplacementTokens.push_back(Tok);
}

Просмотреть файл

@ -19,7 +19,7 @@
namespace clang {
class Preprocessor;
class LexerToken;
class Token;
class IdentifierInfo;
class PragmaNamespace;
@ -39,7 +39,7 @@ public:
virtual ~PragmaHandler();
const IdentifierInfo *getName() const { return Name; }
virtual void HandlePragma(Preprocessor &PP, LexerToken &FirstToken) = 0;
virtual void HandlePragma(Preprocessor &PP, Token &FirstToken) = 0;
/// getIfNamespace - If this is a namespace, return it. This is equivalent to
/// using a dynamic_cast, but doesn't require RTTI.
@ -71,7 +71,7 @@ public:
Handlers.push_back(Handler);
}
virtual void HandlePragma(Preprocessor &PP, LexerToken &FirstToken);
virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
virtual PragmaNamespace *getIfNamespace() { return this; }
};

Просмотреть файл

@ -191,14 +191,14 @@ public:
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer. Args specifies the
/// tokens input to a function-like macro.
void EnterMacro(LexerToken &Identifier, MacroArgs *Args);
void EnterMacro(Token &Identifier, MacroArgs *Args);
/// EnterTokenStream - Add a "macro" context to the top of the include stack,
/// which will cause the lexer to start returning the specified tokens. Note
/// that these tokens will be re-macro-expanded when/if expansion is enabled.
/// This method assumes that the specified stream of tokens has a permanent
/// owner somewhere, so they do not need to be copied.
void EnterTokenStream(const LexerToken *Toks, unsigned NumToks);
void EnterTokenStream(const Token *Toks, unsigned NumToks);
/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
/// lexer stack. This should only be used in situations where the current
@ -207,7 +207,7 @@ public:
/// Lex - To lex a token from the preprocessor, just pull a token from the
/// current lexer or macro object.
void Lex(LexerToken &Result) {
void Lex(Token &Result) {
if (CurLexer)
CurLexer->Lex(Result);
else
@ -217,7 +217,7 @@ public:
/// LexNonComment - Lex a token. If it's a comment, keep lexing until we get
/// something not a comment. This is useful in -E -C mode where comments
/// would foul up preprocessor directive handling.
void LexNonComment(LexerToken &Result) {
void LexNonComment(Token &Result) {
do
Lex(Result);
while (Result.getKind() == tok::comment);
@ -225,7 +225,7 @@ public:
/// LexUnexpandedToken - This is just like Lex, but this disables macro
/// expansion of identifier tokens.
void LexUnexpandedToken(LexerToken &Result) {
void LexUnexpandedToken(Token &Result) {
// Disable macro expansion.
bool OldVal = DisableMacroExpansion;
DisableMacroExpansion = true;
@ -237,14 +237,14 @@ public:
}
/// Diag - Forwarding function for diagnostics. This emits a diagnostic at
/// the specified LexerToken's location, translating the token's start
/// the specified Token's location, translating the token's start
/// position in the current buffer into a SourcePosition object for rendering.
void Diag(SourceLocation Loc, unsigned DiagID);
void Diag(SourceLocation Loc, unsigned DiagID, const std::string &Msg);
void Diag(const LexerToken &Tok, unsigned DiagID) {
void Diag(const Token &Tok, unsigned DiagID) {
Diag(Tok.getLocation(), DiagID);
}
void Diag(const LexerToken &Tok, unsigned DiagID, const std::string &Msg) {
void Diag(const Token &Tok, unsigned DiagID, const std::string &Msg) {
Diag(Tok.getLocation(), DiagID, Msg);
}
@ -253,7 +253,7 @@ public:
/// after trigraph expansion and escaped-newline folding. In particular, this
/// wants to get the true, uncanonicalized, spelling of things like digraphs
/// UCNs, etc.
std::string getSpelling(const LexerToken &Tok) const;
std::string getSpelling(const Token &Tok) const;
/// getSpelling - This method is used to get the spelling of a token into a
/// preallocated buffer, instead of as an std::string. The caller is required
@ -265,7 +265,7 @@ public:
/// to point to a constant buffer with the data already in it (avoiding a
/// copy). The caller is not allowed to modify the returned buffer pointer
/// if an internal buffer is returned.
unsigned getSpelling(const LexerToken &Tok, const char *&Buffer) const;
unsigned getSpelling(const Token &Tok, const char *&Buffer) const;
/// CreateString - Plop the specified string into a scratch buffer and return
@ -276,7 +276,7 @@ public:
/// DumpToken - Print the token to stderr, used for debugging.
///
void DumpToken(const LexerToken &Tok, bool DumpFlags = false) const;
void DumpToken(const Token &Tok, bool DumpFlags = false) const;
void DumpMacro(const MacroInfo &MI) const;
/// AdvanceToTokenCharacter - Given a location that specifies the start of a
@ -302,32 +302,32 @@ public:
/// LookUpIdentifierInfo - Given a tok::identifier token, look up the
/// identifier information for the token and install it into the token.
IdentifierInfo *LookUpIdentifierInfo(LexerToken &Identifier,
IdentifierInfo *LookUpIdentifierInfo(Token &Identifier,
const char *BufPtr = 0);
/// HandleIdentifier - This callback is invoked when the lexer reads an
/// identifier and has filled in the tokens IdentifierInfo member. This
/// callback potentially macro expands it or turns it into a named token (like
/// 'for').
void HandleIdentifier(LexerToken &Identifier);
void HandleIdentifier(Token &Identifier);
/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
/// the current file. This either returns the EOF token and returns true, or
/// pops a level off the include stack and returns false, at which point the
/// client should call lex again.
bool HandleEndOfFile(LexerToken &Result, bool isEndOfMacro = false);
bool HandleEndOfFile(Token &Result, bool isEndOfMacro = false);
/// HandleEndOfMacro - This callback is invoked when the lexer hits the end of
/// the current macro line. It returns true if Result is filled in with a
/// token, or false if Lex should be called again.
bool HandleEndOfMacro(LexerToken &Result);
bool HandleEndOfMacro(Token &Result);
/// HandleDirective - This callback is invoked when the lexer sees a # token
/// at the start of a line. This consumes the directive, modifies the
/// lexer/preprocessor state, and advances the lexer(s) so that the next token
/// read is the correct one.
void HandleDirective(LexerToken &Result);
void HandleDirective(Token &Result);
/// CheckEndOfDirective - Ensure that the next token is a tok::eom token. If
/// not, emit a diagnostic and consume up until the eom.
@ -341,7 +341,7 @@ private:
/// ReadMacroName - Lex and validate a macro name, which occurs after a
/// #define or #undef. This emits a diagnostic, sets the token kind to eom,
/// and discards the rest of the macro line if the macro name is invalid.
void ReadMacroName(LexerToken &MacroNameTok, char isDefineUndef = 0);
void ReadMacroName(Token &MacroNameTok, char isDefineUndef = 0);
/// ReadMacroDefinitionArgList - The ( starting an argument list of a macro
/// definition has just been read. Lex the rest of the arguments and the
@ -377,7 +377,7 @@ private:
/// HandleMacroExpandedIdentifier - If an identifier token is read that is to
/// be expanded as a macro, handle it and return the next token as 'Tok'. If
/// the macro should not be expanded return true, otherwise return false.
bool HandleMacroExpandedIdentifier(LexerToken &Tok, MacroInfo *MI);
bool HandleMacroExpandedIdentifier(Token &Tok, MacroInfo *MI);
/// isNextPPTokenLParen - Determine whether the next preprocessor token to be
/// lexed is a '('. If so, consume the token and return true, if not, this
@ -387,16 +387,16 @@ private:
/// ReadFunctionLikeMacroArgs - After reading "MACRO(", this method is
/// invoked to read all of the formal arguments specified for the macro
/// invocation. This returns null on error.
MacroArgs *ReadFunctionLikeMacroArgs(LexerToken &MacroName, MacroInfo *MI);
MacroArgs *ReadFunctionLikeMacroArgs(Token &MacroName, MacroInfo *MI);
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
/// as a builtin macro, handle it and return the next token as 'Tok'.
void ExpandBuiltinMacro(LexerToken &Tok);
void ExpandBuiltinMacro(Token &Tok);
/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
/// return the first token after the directive. The _Pragma token has just
/// been read into 'Tok'.
void Handle_Pragma(LexerToken &Tok);
void Handle_Pragma(Token &Tok);
/// EnterSourceFileWithLexer - Add a lexer to the top of the include stack and
@ -409,7 +409,7 @@ private:
/// caller is expected to provide a buffer that is large enough to hold the
/// spelling of the filename, but is also expected to handle the case when
/// this method decides to use a different buffer.
bool GetIncludeFilenameSpelling(const LexerToken &FNTok,
bool GetIncludeFilenameSpelling(const Token &FNTok,
const char *&BufStart, const char *&BufEnd);
/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
@ -424,38 +424,38 @@ private:
/// should side-effect the current preprocessor object so that the next call
/// to Lex() will return the appropriate token next.
void HandleUserDiagnosticDirective(LexerToken &Tok, bool isWarning);
void HandleIdentSCCSDirective(LexerToken &Tok);
void HandleUserDiagnosticDirective(Token &Tok, bool isWarning);
void HandleIdentSCCSDirective(Token &Tok);
// File inclusion.
void HandleIncludeDirective(LexerToken &Tok,
void HandleIncludeDirective(Token &Tok,
const DirectoryLookup *LookupFrom = 0,
bool isImport = false);
void HandleIncludeNextDirective(LexerToken &Tok);
void HandleImportDirective(LexerToken &Tok);
void HandleIncludeNextDirective(Token &Tok);
void HandleImportDirective(Token &Tok);
// Macro handling.
void HandleDefineDirective(LexerToken &Tok, bool isTargetSpecific);
void HandleUndefDirective(LexerToken &Tok);
void HandleDefineOtherTargetDirective(LexerToken &Tok);
// HandleAssertDirective(LexerToken &Tok);
// HandleUnassertDirective(LexerToken &Tok);
void HandleDefineDirective(Token &Tok, bool isTargetSpecific);
void HandleUndefDirective(Token &Tok);
void HandleDefineOtherTargetDirective(Token &Tok);
// HandleAssertDirective(Token &Tok);
// HandleUnassertDirective(Token &Tok);
// Conditional Inclusion.
void HandleIfdefDirective(LexerToken &Tok, bool isIfndef,
void HandleIfdefDirective(Token &Tok, bool isIfndef,
bool ReadAnyTokensBeforeDirective);
void HandleIfDirective(LexerToken &Tok, bool ReadAnyTokensBeforeDirective);
void HandleEndifDirective(LexerToken &Tok);
void HandleElseDirective(LexerToken &Tok);
void HandleElifDirective(LexerToken &Tok);
void HandleIfDirective(Token &Tok, bool ReadAnyTokensBeforeDirective);
void HandleEndifDirective(Token &Tok);
void HandleElseDirective(Token &Tok);
void HandleElifDirective(Token &Tok);
// Pragmas.
void HandlePragmaDirective();
public:
void HandlePragmaOnce(LexerToken &OnceTok);
void HandlePragmaPoison(LexerToken &PoisonTok);
void HandlePragmaSystemHeader(LexerToken &SysHeaderTok);
void HandlePragmaDependency(LexerToken &DependencyTok);
void HandlePragmaOnce(Token &OnceTok);
void HandlePragmaPoison(Token &PoisonTok);
void HandlePragmaSystemHeader(Token &SysHeaderTok);
void HandlePragmaDependency(Token &DependencyTok);
};
} // end namespace clang

Просмотреть файл

@ -1,4 +1,4 @@
//===--- LexerToken.h - Token interface -------------------------*- C++ -*-===//
//===--- Token.h - Token interface ------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -7,12 +7,12 @@
//
//===----------------------------------------------------------------------===//
//
// This file defines the LexerToken interface.
// This file defines the Token interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LEXERTOKEN_H
#define LLVM_CLANG_LEXERTOKEN_H
#ifndef LLVM_CLANG_TOKEN_H
#define LLVM_CLANG_TOKEN_H
#include "clang/Basic/TokenKinds.h"
#include "clang/Basic/SourceLocation.h"
@ -21,11 +21,11 @@ namespace clang {
class IdentifierInfo;
/// LexerToken - This structure provides full information about a lexed token.
/// Token - This structure provides full information about a lexed token.
/// It is not intended to be space efficient, it is intended to return as much
/// information as possible about each returned token. This is expected to be
/// compressed into a smaller form if memory footprint is important.
class LexerToken {
class Token {
/// The location and length of the token text itself.
SourceLocation Loc;
unsigned Length;

Просмотреть файл

@ -27,7 +27,7 @@ namespace clang {
class Action;
// Lex.
class IdentifierInfo;
class LexerToken;
class Token;
/// Action - As the parser reads the input file and recognizes the productions
/// of the grammar, it invokes methods on this class to turn the parsed input
@ -279,12 +279,12 @@ public:
tok::TokenKind Kind) {
return 0;
}
virtual ExprResult ParseCharacterConstant(const LexerToken &) { return 0; }
virtual ExprResult ParseNumericConstant(const LexerToken &) { return 0; }
virtual ExprResult ParseCharacterConstant(const Token &) { return 0; }
virtual ExprResult ParseNumericConstant(const Token &) { return 0; }
/// ParseStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
virtual ExprResult ParseStringLiteral(const LexerToken *Toks, unsigned NumToks) {
virtual ExprResult ParseStringLiteral(const Token *Toks, unsigned NumToks) {
return 0;
}

Просмотреть файл

@ -32,7 +32,7 @@ class Parser {
/// Tok - The current token we are peeking head. All parsing methods assume
/// that this is valid.
LexerToken Tok;
Token Tok;
unsigned short ParenCount, BracketCount, BraceCount;
@ -219,7 +219,7 @@ private:
void Diag(SourceLocation Loc, unsigned DiagID,
const std::string &Msg = std::string());
void Diag(const LexerToken &Tok, unsigned DiagID,
void Diag(const Token &Tok, unsigned DiagID,
const std::string &M = std::string()) {
Diag(Tok.getLocation(), DiagID, M);
}
@ -275,9 +275,9 @@ private:
ExprResult ParseConstantExpression();
ExprResult ParseAssignmentExpression(); // Expr that doesn't include commas.
ExprResult ParseExpressionWithLeadingIdentifier(const LexerToken &Tok);
ExprResult ParseAssignmentExprWithLeadingIdentifier(const LexerToken &Tok);
ExprResult ParseAssignmentExpressionWithLeadingStar(const LexerToken &Tok);
ExprResult ParseExpressionWithLeadingIdentifier(const Token &Tok);
ExprResult ParseAssignmentExprWithLeadingIdentifier(const Token &Tok);
ExprResult ParseAssignmentExpressionWithLeadingStar(const Token &Tok);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, unsigned MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression);