зеркало из https://github.com/microsoft/clang-1.git
Fix PR5633 by making the preprocessor handle the case where we can
stat a file but where mmaping it fails. In this case, we emit an error like: t.c:1:10: fatal error: error opening file '../../foo.h' instead of "cannot find file". git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@90110 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Родитель
76ed1f76f9
Коммит
6e2901407b
|
@ -170,6 +170,8 @@ def ext_pp_counter : Extension<
|
|||
def err_pp_invalid_directive : Error<"invalid preprocessing directive">;
|
||||
def err_pp_hash_error : Error<"#error%0">;
|
||||
def err_pp_file_not_found : Error<"'%0' file not found">, DefaultFatal;
|
||||
def err_pp_error_opening_file : Error<
|
||||
"error opening file '%0'">, DefaultFatal;
|
||||
def err_pp_empty_filename : Error<"empty filename">;
|
||||
def err_pp_include_too_deep : Error<"#include nested too deeply">;
|
||||
def err_pp_expects_filename : Error<"expected \"FILENAME\" or <FILENAME>">;
|
||||
|
|
|
@ -78,7 +78,7 @@ public:
|
|||
/// with the specified preprocessor managing the lexing process. This lexer
|
||||
/// assumes that the associated file buffer and Preprocessor objects will
|
||||
/// outlive it, so it doesn't take ownership of either of them.
|
||||
Lexer(FileID FID, Preprocessor &PP);
|
||||
Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer, Preprocessor &PP);
|
||||
|
||||
/// Lexer constructor - Create a new raw lexer object. This object is only
|
||||
/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
|
||||
|
@ -89,7 +89,8 @@ public:
|
|||
/// Lexer constructor - Create a new raw lexer object. This object is only
|
||||
/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
|
||||
/// range will outlive it, so it doesn't take ownership of it.
|
||||
Lexer(FileID FID, const SourceManager &SM, const LangOptions &Features);
|
||||
Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer,
|
||||
const SourceManager &SM, const LangOptions &Features);
|
||||
|
||||
/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
|
||||
/// _Pragma expansion. This has a variety of magic semantics that this method
|
||||
|
|
|
@ -329,9 +329,9 @@ public:
|
|||
void EnterMainSourceFile();
|
||||
|
||||
/// EnterSourceFile - Add a source file to the top of the include stack and
|
||||
/// start lexing tokens from it instead of the current buffer. If isMainFile
|
||||
/// is true, this is the main file for the translation unit.
|
||||
void EnterSourceFile(FileID CurFileID, const DirectoryLookup *Dir);
|
||||
/// start lexing tokens from it instead of the current buffer. Return true
|
||||
/// on failure.
|
||||
bool EnterSourceFile(FileID CurFileID, const DirectoryLookup *Dir);
|
||||
|
||||
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
|
||||
/// tokens from it instead of the current buffer. Args specifies the
|
||||
|
|
|
@ -482,7 +482,8 @@ void PTHWriter::GeneratePTH(const std::string *MainFile) {
|
|||
if (!B) continue;
|
||||
|
||||
FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
|
||||
Lexer L(FID, SM, LOpts);
|
||||
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
|
||||
Lexer L(FID, FromFile, SM, LOpts);
|
||||
PM.insert(FE, LexTokens(L));
|
||||
}
|
||||
|
||||
|
|
|
@ -149,7 +149,8 @@ static void FindExpectedDiags(Preprocessor &PP,
|
|||
FileID FID = PP.getSourceManager().getMainFileID();
|
||||
|
||||
// Create a lexer to lex all the tokens of the main file in raw mode.
|
||||
Lexer RawLex(FID, PP.getSourceManager(), PP.getLangOptions());
|
||||
const llvm::MemoryBuffer *FromFile = PP.getSourceManager().getBuffer(FID);
|
||||
Lexer RawLex(FID, FromFile, PP.getSourceManager(), PP.getLangOptions());
|
||||
|
||||
// Return comments as tokens, this is how we find expected diagnostics.
|
||||
RawLex.SetCommentRetentionState(true);
|
||||
|
|
|
@ -192,7 +192,8 @@ void DumpRawTokensAction::ExecuteAction() {
|
|||
SourceManager &SM = PP.getSourceManager();
|
||||
|
||||
// Start lexing the specified input file.
|
||||
Lexer RawLex(SM.getMainFileID(), SM, PP.getLangOptions());
|
||||
const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
|
||||
Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOptions());
|
||||
RawLex.SetKeepWhitespaceMode(true);
|
||||
|
||||
Token RawTok;
|
||||
|
|
|
@ -65,7 +65,8 @@ static void LexRawTokensFromMainFile(Preprocessor &PP,
|
|||
|
||||
// Create a lexer to lex all the tokens of the main file in raw mode. Even
|
||||
// though it is in raw mode, it will not return comments.
|
||||
Lexer RawLex(SM.getMainFileID(), SM, PP.getLangOptions());
|
||||
const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
|
||||
Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOptions());
|
||||
|
||||
// Switch on comment lexing because we really do want them.
|
||||
RawLex.SetCommentRetentionState(true);
|
||||
|
|
|
@ -164,12 +164,14 @@ static void FindExpectedDiags(Preprocessor &PP,
|
|||
DiagList &ExpectedNotes) {
|
||||
// Create a raw lexer to pull all the comments out of the main file. We don't
|
||||
// want to look in #include'd headers for expected-error strings.
|
||||
FileID FID = PP.getSourceManager().getMainFileID();
|
||||
if (PP.getSourceManager().getMainFileID().isInvalid())
|
||||
SourceManager &SM = PP.getSourceManager();
|
||||
FileID FID = SM.getMainFileID();
|
||||
if (SM.getMainFileID().isInvalid())
|
||||
return;
|
||||
|
||||
// Create a lexer to lex all the tokens of the main file in raw mode.
|
||||
Lexer RawLex(FID, PP.getSourceManager(), PP.getLangOptions());
|
||||
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
|
||||
Lexer RawLex(FID, FromFile, SM, PP.getLangOptions());
|
||||
|
||||
// Return comments as tokens, this is how we find expected diagnostics.
|
||||
RawLex.SetCommentRetentionState(true);
|
||||
|
|
|
@ -95,13 +95,11 @@ void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
|
|||
/// with the specified preprocessor managing the lexing process. This lexer
|
||||
/// assumes that the associated file buffer and Preprocessor objects will
|
||||
/// outlive it, so it doesn't take ownership of either of them.
|
||||
Lexer::Lexer(FileID FID, Preprocessor &PP)
|
||||
Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
|
||||
: PreprocessorLexer(&PP, FID),
|
||||
FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
|
||||
Features(PP.getLangOptions()) {
|
||||
|
||||
const llvm::MemoryBuffer *InputFile = PP.getSourceManager().getBuffer(FID);
|
||||
|
||||
InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
|
||||
InputFile->getBufferEnd());
|
||||
|
||||
|
@ -129,9 +127,9 @@ Lexer::Lexer(SourceLocation fileloc, const LangOptions &features,
|
|||
/// Lexer constructor - Create a new raw lexer object. This object is only
|
||||
/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
|
||||
/// range will outlive it, so it doesn't take ownership of it.
|
||||
Lexer::Lexer(FileID FID, const SourceManager &SM, const LangOptions &features)
|
||||
Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
|
||||
const SourceManager &SM, const LangOptions &features)
|
||||
: FileLoc(SM.getLocForStartOfFile(FID)), Features(features) {
|
||||
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
|
||||
|
||||
InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(),
|
||||
FromFile->getBufferEnd());
|
||||
|
@ -163,7 +161,8 @@ Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
|
|||
|
||||
// Create the lexer as if we were going to lex the file normally.
|
||||
FileID SpellingFID = SM.getFileID(SpellingLoc);
|
||||
Lexer *L = new Lexer(SpellingFID, PP);
|
||||
const llvm::MemoryBuffer *InputFile = SM.getBuffer(SpellingFID);
|
||||
Lexer *L = new Lexer(SpellingFID, InputFile, PP);
|
||||
|
||||
// Now that the lexer is created, change the start/end locations so that we
|
||||
// just lex the subsection of the file that we want. This is lexing from a
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "clang/Lex/HeaderSearch.h"
|
||||
#include "clang/Lex/MacroInfo.h"
|
||||
#include "clang/Lex/LexDiagnostic.h"
|
||||
#include "clang/Basic/FileManager.h"
|
||||
#include "clang/Basic/SourceManager.h"
|
||||
#include "llvm/ADT/APInt.h"
|
||||
using namespace clang;
|
||||
|
@ -1111,7 +1112,9 @@ void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
|
|||
}
|
||||
|
||||
// Finally, if all is good, enter the new file!
|
||||
EnterSourceFile(FID, CurDir);
|
||||
if (EnterSourceFile(FID, CurDir))
|
||||
Diag(FilenameTok, diag::err_pp_error_opening_file)
|
||||
<< std::string(SourceMgr.getFileEntryForID(FID)->getName());
|
||||
}
|
||||
|
||||
/// HandleIncludeNextDirective - Implements #include_next.
|
||||
|
|
|
@ -64,7 +64,7 @@ PreprocessorLexer *Preprocessor::getCurrentFileLexer() const {
|
|||
|
||||
/// EnterSourceFile - Add a source file to the top of the include stack and
|
||||
/// start lexing tokens from it instead of the current buffer.
|
||||
void Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir) {
|
||||
bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir) {
|
||||
assert(CurTokenLexer == 0 && "Cannot #include a file inside a macro!");
|
||||
++NumEnteredSourceFiles;
|
||||
|
||||
|
@ -72,10 +72,19 @@ void Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir) {
|
|||
MaxIncludeStackDepth = IncludeMacroStack.size();
|
||||
|
||||
if (PTH) {
|
||||
if (PTHLexer *PL = PTH->CreateLexer(FID))
|
||||
return EnterSourceFileWithPTH(PL, CurDir);
|
||||
if (PTHLexer *PL = PTH->CreateLexer(FID)) {
|
||||
EnterSourceFileWithPTH(PL, CurDir);
|
||||
return false;
|
||||
}
|
||||
EnterSourceFileWithLexer(new Lexer(FID, *this), CurDir);
|
||||
}
|
||||
|
||||
// Get the MemoryBuffer for this FID, if it fails, we fail.
|
||||
const llvm::MemoryBuffer *InputFile = getSourceManager().getBuffer(FID);
|
||||
if (InputFile == 0)
|
||||
return true;
|
||||
|
||||
EnterSourceFileWithLexer(new Lexer(FID, InputFile, *this), CurDir);
|
||||
return false;
|
||||
}
|
||||
|
||||
/// EnterSourceFileWithLexer - Add a source file to the top of the include stack
|
||||
|
|
|
@ -353,7 +353,8 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
|
|||
RewriteBuffer &RB = R.getEditBuffer(FID);
|
||||
|
||||
const SourceManager &SM = PP.getSourceManager();
|
||||
Lexer L(FID, SM, PP.getLangOptions());
|
||||
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
|
||||
Lexer L(FID, FromFile, SM, PP.getLangOptions());
|
||||
const char *BufferStart = L.getBufferStart();
|
||||
|
||||
// Inform the preprocessor that we want to retain comments as tokens, so we
|
||||
|
@ -444,7 +445,8 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
|
|||
const SourceManager &SM = PP.getSourceManager();
|
||||
std::vector<Token> TokenStream;
|
||||
|
||||
Lexer L(FID, SM, PP.getLangOptions());
|
||||
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
|
||||
Lexer L(FID, FromFile, SM, PP.getLangOptions());
|
||||
|
||||
// Lex all the tokens in raw mode, to avoid entering #includes or expanding
|
||||
// macros.
|
||||
|
|
|
@ -23,7 +23,8 @@ TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
|
|||
ScratchBuf.reset(new ScratchBuffer(SM));
|
||||
|
||||
// Create a lexer to lex all the tokens of the main file in raw mode.
|
||||
Lexer RawLex(FID, SM, LangOpts);
|
||||
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
|
||||
Lexer RawLex(FID, FromFile, SM, LangOpts);
|
||||
|
||||
// Return all comments and whitespace as tokens.
|
||||
RawLex.SetKeepWhitespaceMode(true);
|
||||
|
|
Загрузка…
Ссылка в новой задаче