2010-08-19 03:57:11 +04:00
|
|
|
//===--- ASTReader.cpp - AST File Reader ------------------------*- C++ -*-===//
|
2009-04-10 02:27:44 +04:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2010-08-19 03:56:43 +04:00
|
|
|
// This file defines the ASTReader class, which reads AST files.
|
2009-04-10 02:27:44 +04:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2009-04-27 09:14:47 +04:00
|
|
|
|
2010-08-19 03:57:17 +04:00
|
|
|
#include "clang/Serialization/ASTReader.h"
|
|
|
|
#include "clang/Serialization/ASTDeserializationListener.h"
|
2010-08-20 20:03:52 +04:00
|
|
|
#include "ASTCommon.h"
|
2009-04-11 00:39:37 +04:00
|
|
|
#include "clang/Frontend/FrontendDiagnostic.h"
|
2009-11-12 02:58:53 +03:00
|
|
|
#include "clang/Frontend/Utils.h"
|
2010-08-13 00:07:10 +04:00
|
|
|
#include "clang/Sema/Sema.h"
|
2010-08-24 12:50:51 +04:00
|
|
|
#include "clang/Sema/Scope.h"
|
2009-04-14 04:24:19 +04:00
|
|
|
#include "clang/AST/ASTConsumer.h"
|
2009-04-10 02:27:44 +04:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2010-08-25 09:32:35 +04:00
|
|
|
#include "clang/AST/DeclTemplate.h"
|
2009-04-15 01:18:50 +04:00
|
|
|
#include "clang/AST/Expr.h"
|
2010-08-24 11:32:53 +04:00
|
|
|
#include "clang/AST/ExprCXX.h"
|
2011-03-01 02:58:31 +03:00
|
|
|
#include "clang/AST/NestedNameSpecifier.h"
|
2009-04-10 02:27:44 +04:00
|
|
|
#include "clang/AST/Type.h"
|
2009-10-17 01:56:05 +04:00
|
|
|
#include "clang/AST/TypeLocVisitor.h"
|
2009-04-11 01:41:48 +04:00
|
|
|
#include "clang/Lex/MacroInfo.h"
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
#include "clang/Lex/PreprocessingRecord.h"
|
2009-04-10 07:52:48 +04:00
|
|
|
#include "clang/Lex/Preprocessor.h"
|
2009-04-25 00:03:17 +04:00
|
|
|
#include "clang/Lex/HeaderSearch.h"
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
#include "clang/Basic/OnDiskHashTable.h"
|
2009-04-10 07:52:48 +04:00
|
|
|
#include "clang/Basic/SourceManager.h"
|
2009-04-13 20:31:14 +04:00
|
|
|
#include "clang/Basic/SourceManagerInternals.h"
|
2009-04-10 07:52:48 +04:00
|
|
|
#include "clang/Basic/FileManager.h"
|
2010-11-23 22:19:34 +03:00
|
|
|
#include "clang/Basic/FileSystemStatCache.h"
|
2009-04-11 01:16:55 +04:00
|
|
|
#include "clang/Basic/TargetInfo.h"
|
2009-10-06 01:07:28 +04:00
|
|
|
#include "clang/Basic/Version.h"
|
Implement a new 'availability' attribute, that allows one to specify
which versions of an OS provide a certain facility. For example,
void foo()
__attribute__((availability(macosx,introduced=10.2,deprecated=10.4,obsoleted=10.6)));
says that the function "foo" was introduced in 10.2, deprecated in
10.4, and completely obsoleted in 10.6. This attribute ties in with
the deployment targets (e.g., -mmacosx-version-min=10.1 specifies that
we want to deploy back to Mac OS X 10.1). There are several concrete
behaviors that this attribute enables, as illustrated with the
function foo() above:
- If we choose a deployment target >= Mac OS X 10.4, uses of "foo"
will result in a deprecation warning, as if we had placed
attribute((deprecated)) on it (but with a better diagnostic)
- If we choose a deployment target >= Mac OS X 10.6, uses of "foo"
will result in an "unavailable" warning (in C)/error (in C++), as
if we had placed attribute((unavailable)) on it
- If we choose a deployment target prior to 10.2, foo() is
weak-imported (if it is a kind of entity that can be weak
imported), as if we had placed the weak_import attribute on it.
Naturally, there can be multiple availability attributes on a
declaration, for different platforms; only the current platform
matters when checking availability attributes.
The only platforms this attribute currently works for are "ios" and
"macosx", since we already have -mxxxx-version-min flags for them and we
have experience there with macro tricks translating down to the
deprecated/unavailable/weak_import attributes. The end goal is to open
this up to other platforms, and even extension to other "platforms"
that are really libraries (say, through a #pragma clang
define_system), but that hasn't yet been designed and we may want to
shake out more issues with this narrower problem first.
Addresses <rdar://problem/6690412>.
As a drive-by bug-fix, if an entity is both deprecated and
unavailable, we only emit the "unavailable" diagnostic.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@128127 91177308-0d34-0410-b5e6-96231b3b80d8
2011-03-23 03:50:03 +03:00
|
|
|
#include "clang/Basic/VersionTuple.h"
|
2009-10-18 03:52:28 +04:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2009-04-10 02:27:44 +04:00
|
|
|
#include "llvm/Bitcode/BitstreamReader.h"
|
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
2009-10-29 11:12:44 +03:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
#include "llvm/Support/FileSystem.h"
|
2010-11-29 21:12:39 +03:00
|
|
|
#include "llvm/Support/Path.h"
|
2010-12-09 20:36:38 +03:00
|
|
|
#include "llvm/Support/system_error.h"
|
2009-04-10 02:27:44 +04:00
|
|
|
#include <algorithm>
|
2009-04-28 22:58:38 +04:00
|
|
|
#include <iterator>
|
2009-04-10 02:27:44 +04:00
|
|
|
#include <cstdio>
|
2009-04-27 22:38:38 +04:00
|
|
|
#include <sys/stat.h>
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
#include <iostream>
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
|
2009-04-10 02:27:44 +04:00
|
|
|
using namespace clang;
|
2010-08-19 03:57:32 +04:00
|
|
|
using namespace clang::serialization;
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
//===----------------------------------------------------------------------===//
|
2010-08-19 03:57:06 +04:00
|
|
|
// PCH validator implementation
|
2009-06-19 04:03:23 +04:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-08-19 03:56:56 +04:00
|
|
|
ASTReaderListener::~ASTReaderListener() {}
|
2009-06-19 04:03:23 +04:00
|
|
|
|
|
|
|
bool
|
|
|
|
PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
|
|
|
|
const LangOptions &PPLangOpts = PP.getLangOptions();
|
|
|
|
#define PARSE_LANGOPT_BENIGN(Option)
|
|
|
|
#define PARSE_LANGOPT_IMPORTANT(Option, DiagID) \
|
|
|
|
if (PPLangOpts.Option != LangOpts.Option) { \
|
|
|
|
Reader.Diag(DiagID) << LangOpts.Option << PPLangOpts.Option; \
|
|
|
|
return true; \
|
|
|
|
}
|
|
|
|
|
|
|
|
PARSE_LANGOPT_BENIGN(Trigraphs);
|
|
|
|
PARSE_LANGOPT_BENIGN(BCPLComment);
|
|
|
|
PARSE_LANGOPT_BENIGN(DollarIdents);
|
|
|
|
PARSE_LANGOPT_BENIGN(AsmPreprocessor);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(GNUMode, diag::warn_pch_gnu_extensions);
|
2010-04-18 00:17:31 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(GNUKeywords, diag::warn_pch_gnu_keywords);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_BENIGN(ImplicitInt);
|
|
|
|
PARSE_LANGOPT_BENIGN(Digraphs);
|
|
|
|
PARSE_LANGOPT_BENIGN(HexFloats);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(C99, diag::warn_pch_c99);
|
2011-04-15 04:35:23 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(C1X, diag::warn_pch_c1x);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(Microsoft, diag::warn_pch_microsoft_extensions);
|
2010-10-21 09:21:48 +04:00
|
|
|
PARSE_LANGOPT_BENIGN(MSCVersion);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(CPlusPlus, diag::warn_pch_cplusplus);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(CPlusPlus0x, diag::warn_pch_cplusplus0x);
|
|
|
|
PARSE_LANGOPT_BENIGN(CXXOperatorName);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(ObjC1, diag::warn_pch_objective_c);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(ObjC2, diag::warn_pch_objective_c2);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(ObjCNonFragileABI, diag::warn_pch_nonfragile_abi);
|
2010-02-09 22:31:38 +03:00
|
|
|
PARSE_LANGOPT_IMPORTANT(ObjCNonFragileABI2, diag::warn_pch_nonfragile_abi2);
|
2011-01-07 21:59:25 +03:00
|
|
|
PARSE_LANGOPT_IMPORTANT(AppleKext, diag::warn_pch_apple_kext);
|
2010-12-24 00:35:43 +03:00
|
|
|
PARSE_LANGOPT_IMPORTANT(ObjCDefaultSynthProperties,
|
|
|
|
diag::warn_pch_objc_auto_properties);
|
2011-06-15 03:20:43 +04:00
|
|
|
PARSE_LANGOPT_BENIGN(ObjCInferRelatedResultType)
|
2010-10-21 07:16:25 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(NoConstantCFStrings,
|
2010-04-23 01:01:59 +04:00
|
|
|
diag::warn_pch_no_constant_cfstrings);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_BENIGN(PascalStrings);
|
|
|
|
PARSE_LANGOPT_BENIGN(WritableStrings);
|
2009-09-09 19:08:12 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(LaxVectorConversions,
|
2009-06-19 04:03:23 +04:00
|
|
|
diag::warn_pch_lax_vector_conversions);
|
2009-06-26 02:57:40 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(AltiVec, diag::warn_pch_altivec);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(Exceptions, diag::warn_pch_exceptions);
|
2011-02-20 02:53:54 +03:00
|
|
|
PARSE_LANGOPT_IMPORTANT(ObjCExceptions, diag::warn_pch_objc_exceptions);
|
2011-02-23 06:04:54 +03:00
|
|
|
PARSE_LANGOPT_IMPORTANT(CXXExceptions, diag::warn_pch_cxx_exceptions);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(SjLjExceptions, diag::warn_pch_sjlj_exceptions);
|
2011-02-01 18:15:22 +03:00
|
|
|
PARSE_LANGOPT_IMPORTANT(MSBitfields, diag::warn_pch_ms_bitfields);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(NeXTRuntime, diag::warn_pch_objc_runtime);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(Freestanding, diag::warn_pch_freestanding);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(NoBuiltin, diag::warn_pch_builtins);
|
2009-09-09 19:08:12 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(ThreadsafeStatics,
|
2009-06-19 04:03:23 +04:00
|
|
|
diag::warn_pch_thread_safe_statics);
|
2009-09-03 08:54:28 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(POSIXThreads, diag::warn_pch_posix_threads);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(Blocks, diag::warn_pch_blocks);
|
|
|
|
PARSE_LANGOPT_BENIGN(EmitAllDecls);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(MathErrno, diag::warn_pch_math_errno);
|
2010-06-27 01:25:03 +04:00
|
|
|
PARSE_LANGOPT_BENIGN(getSignedOverflowBehavior());
|
2009-09-09 19:08:12 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(HeinousExtensions,
|
2009-06-19 04:03:23 +04:00
|
|
|
diag::warn_pch_heinous_extensions);
|
|
|
|
// FIXME: Most of the options below are benign if the macro wasn't
|
|
|
|
// used. Unfortunately, this means that a PCH compiled without
|
|
|
|
// optimization can't be used with optimization turned on, even
|
|
|
|
// though the only thing that changes is whether __OPTIMIZE__ was
|
|
|
|
// defined... but if __OPTIMIZE__ never showed up in the header, it
|
|
|
|
// doesn't matter. We could consider making this some special kind
|
|
|
|
// of check.
|
|
|
|
PARSE_LANGOPT_IMPORTANT(Optimize, diag::warn_pch_optimize);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(OptimizeSize, diag::warn_pch_optimize_size);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(Static, diag::warn_pch_static);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(PICLevel, diag::warn_pch_pic_level);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(GNUInline, diag::warn_pch_gnu_inline);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(NoInline, diag::warn_pch_no_inline);
|
2011-04-24 00:05:38 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(Deprecated, diag::warn_pch_deprecated);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(AccessControl, diag::warn_pch_access_control);
|
|
|
|
PARSE_LANGOPT_IMPORTANT(CharIsSigned, diag::warn_pch_char_signed);
|
2009-11-05 23:14:16 +03:00
|
|
|
PARSE_LANGOPT_IMPORTANT(ShortWChar, diag::warn_pch_short_wchar);
|
2010-10-08 04:25:19 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(ShortEnums, diag::warn_pch_short_enums);
|
2009-06-19 04:03:23 +04:00
|
|
|
if ((PPLangOpts.getGCMode() != 0) != (LangOpts.getGCMode() != 0)) {
|
2009-09-09 19:08:12 +04:00
|
|
|
Reader.Diag(diag::warn_pch_gc_mode)
|
2009-06-19 04:03:23 +04:00
|
|
|
<< LangOpts.getGCMode() << PPLangOpts.getGCMode();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
PARSE_LANGOPT_BENIGN(getVisibilityMode());
|
2009-09-21 08:16:19 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(getStackProtectorMode(),
|
|
|
|
diag::warn_pch_stack_protector);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT_BENIGN(InstantiationDepth);
|
2009-06-26 02:57:40 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(OpenCL, diag::warn_pch_opencl);
|
2010-12-01 22:14:57 +03:00
|
|
|
PARSE_LANGOPT_IMPORTANT(CUDA, diag::warn_pch_cuda);
|
2009-12-12 04:27:46 +03:00
|
|
|
PARSE_LANGOPT_BENIGN(CatchUndefined);
|
2011-06-16 03:02:42 +04:00
|
|
|
PARSE_LANGOPT_BENIGN(DefaultFPContract);
|
2009-09-21 08:16:19 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(ElideConstructors, diag::warn_pch_elide_constructors);
|
2010-07-09 21:35:33 +04:00
|
|
|
PARSE_LANGOPT_BENIGN(SpellChecking);
|
2011-06-16 03:02:42 +04:00
|
|
|
PARSE_LANGOPT_IMPORTANT(ObjCAutoRefCount, diag::warn_pch_auto_ref_count);
|
|
|
|
PARSE_LANGOPT_BENIGN(ObjCInferRelatedReturnType);
|
2010-03-07 22:10:13 +03:00
|
|
|
#undef PARSE_LANGOPT_IMPORTANT
|
2009-06-19 04:03:23 +04:00
|
|
|
#undef PARSE_LANGOPT_BENIGN
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
bool PCHValidator::ReadTargetTriple(StringRef Triple) {
|
2009-11-11 03:52:11 +03:00
|
|
|
if (Triple == PP.getTargetInfo().getTriple().str())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Reader.Diag(diag::warn_pch_target_triple)
|
|
|
|
<< Triple << PP.getTargetInfo().getTriple().str();
|
|
|
|
return true;
|
2009-06-19 04:03:23 +04:00
|
|
|
}
|
|
|
|
|
2010-11-25 21:29:30 +03:00
|
|
|
namespace {
|
|
|
|
struct EmptyStringRef {
|
2011-07-23 14:55:15 +04:00
|
|
|
bool operator ()(StringRef r) const { return r.empty(); }
|
2010-11-25 21:29:30 +03:00
|
|
|
};
|
|
|
|
struct EmptyBlock {
|
|
|
|
bool operator ()(const PCHPredefinesBlock &r) const {return r.Data.empty();}
|
|
|
|
};
|
|
|
|
}
|
2010-07-14 21:49:11 +04:00
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
static bool EqualConcatenations(SmallVector<StringRef, 2> L,
|
2010-07-14 21:49:11 +04:00
|
|
|
PCHPredefinesBlocks R) {
|
|
|
|
// First, sum up the lengths.
|
|
|
|
unsigned LL = 0, RL = 0;
|
|
|
|
for (unsigned I = 0, N = L.size(); I != N; ++I) {
|
|
|
|
LL += L[I].size();
|
|
|
|
}
|
|
|
|
for (unsigned I = 0, N = R.size(); I != N; ++I) {
|
|
|
|
RL += R[I].Data.size();
|
|
|
|
}
|
|
|
|
if (LL != RL)
|
|
|
|
return false;
|
|
|
|
if (LL == 0 && RL == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Kick out empty parts, they confuse the algorithm below.
|
|
|
|
L.erase(std::remove_if(L.begin(), L.end(), EmptyStringRef()), L.end());
|
|
|
|
R.erase(std::remove_if(R.begin(), R.end(), EmptyBlock()), R.end());
|
|
|
|
|
|
|
|
// Do it the hard way. At this point, both vectors must be non-empty.
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef LR = L[0], RR = R[0].Data;
|
2010-07-14 21:49:11 +04:00
|
|
|
unsigned LI = 0, RI = 0, LN = L.size(), RN = R.size();
|
2010-07-16 04:00:11 +04:00
|
|
|
(void) RN;
|
2010-07-14 21:49:11 +04:00
|
|
|
for (;;) {
|
|
|
|
// Compare the current pieces.
|
|
|
|
if (LR.size() == RR.size()) {
|
|
|
|
// If they're the same length, it's pretty easy.
|
|
|
|
if (LR != RR)
|
|
|
|
return false;
|
|
|
|
// Both pieces are done, advance.
|
|
|
|
++LI;
|
|
|
|
++RI;
|
|
|
|
// If either string is done, they're both done, since they're the same
|
|
|
|
// length.
|
|
|
|
if (LI == LN) {
|
|
|
|
assert(RI == RN && "Strings not the same length after all?");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
LR = L[LI];
|
|
|
|
RR = R[RI].Data;
|
|
|
|
} else if (LR.size() < RR.size()) {
|
|
|
|
// Right piece is longer.
|
|
|
|
if (!RR.startswith(LR))
|
|
|
|
return false;
|
|
|
|
++LI;
|
|
|
|
assert(LI != LN && "Strings not the same length after all?");
|
|
|
|
RR = RR.substr(LR.size());
|
|
|
|
LR = L[LI];
|
|
|
|
} else {
|
|
|
|
// Left piece is longer.
|
|
|
|
if (!LR.startswith(RR))
|
|
|
|
return false;
|
|
|
|
++RI;
|
|
|
|
assert(RI != RN && "Strings not the same length after all?");
|
|
|
|
LR = LR.substr(RR.size());
|
|
|
|
RR = R[RI].Data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
static std::pair<FileID, StringRef::size_type>
|
|
|
|
FindMacro(const PCHPredefinesBlocks &Buffers, StringRef MacroDef) {
|
|
|
|
std::pair<FileID, StringRef::size_type> Res;
|
2010-07-14 21:49:11 +04:00
|
|
|
for (unsigned I = 0, N = Buffers.size(); I != N; ++I) {
|
|
|
|
Res.second = Buffers[I].Data.find(MacroDef);
|
2011-07-23 14:55:15 +04:00
|
|
|
if (Res.second != StringRef::npos) {
|
2010-07-14 21:49:11 +04:00
|
|
|
Res.first = Buffers[I].BufferID;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef OriginalFileName,
|
2011-02-24 00:16:44 +03:00
|
|
|
std::string &SuggestedPredefines,
|
|
|
|
FileManager &FileMgr) {
|
2009-11-12 02:58:53 +03:00
|
|
|
// We are in the context of an implicit include, so the predefines buffer will
|
|
|
|
// have a #include entry for the PCH file itself (as normalized by the
|
|
|
|
// preprocessor initialization). Find it and skip over it in the checking
|
|
|
|
// below.
|
2009-11-11 08:29:04 +03:00
|
|
|
llvm::SmallString<256> PCHInclude;
|
|
|
|
PCHInclude += "#include \"";
|
2011-02-24 00:16:44 +03:00
|
|
|
PCHInclude += NormalizeDashIncludePath(OriginalFileName, FileMgr);
|
2009-11-11 08:29:04 +03:00
|
|
|
PCHInclude += "\"\n";
|
2011-07-23 14:55:15 +04:00
|
|
|
std::pair<StringRef,StringRef> Split =
|
|
|
|
StringRef(PP.getPredefines()).split(PCHInclude.str());
|
|
|
|
StringRef Left = Split.first, Right = Split.second;
|
2010-03-18 03:56:54 +03:00
|
|
|
if (Left == PP.getPredefines()) {
|
|
|
|
Error("Missing PCH include entry!");
|
|
|
|
return true;
|
|
|
|
}
|
2009-11-11 08:29:04 +03:00
|
|
|
|
2010-07-14 21:49:11 +04:00
|
|
|
// If the concatenation of all the PCH buffers is equal to the adjusted
|
|
|
|
// command line, we're done.
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<StringRef, 2> CommandLine;
|
2010-07-14 21:49:11 +04:00
|
|
|
CommandLine.push_back(Left);
|
|
|
|
CommandLine.push_back(Right);
|
|
|
|
if (EqualConcatenations(CommandLine, Buffers))
|
2009-06-19 04:03:23 +04:00
|
|
|
return false;
|
|
|
|
|
|
|
|
SourceManager &SourceMgr = PP.getSourceManager();
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-11-11 06:45:59 +03:00
|
|
|
// The predefines buffers are different. Determine what the differences are,
|
|
|
|
// and whether they require us to reject the PCH file.
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<StringRef, 8> PCHLines;
|
2010-07-14 21:49:11 +04:00
|
|
|
for (unsigned I = 0, N = Buffers.size(); I != N; ++I)
|
|
|
|
Buffers[I].Data.split(PCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
|
2009-11-13 19:46:11 +03:00
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<StringRef, 8> CmdLineLines;
|
2009-11-13 19:46:11 +03:00
|
|
|
Left.split(CmdLineLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
|
2010-09-30 20:53:50 +04:00
|
|
|
|
|
|
|
// Pick out implicit #includes after the PCH and don't consider them for
|
|
|
|
// validation; we will insert them into SuggestedPredefines so that the
|
|
|
|
// preprocessor includes them.
|
|
|
|
std::string IncludesAfterPCH;
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<StringRef, 8> AfterPCHLines;
|
2010-09-30 20:53:50 +04:00
|
|
|
Right.split(AfterPCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
|
|
|
|
for (unsigned i = 0, e = AfterPCHLines.size(); i != e; ++i) {
|
|
|
|
if (AfterPCHLines[i].startswith("#include ")) {
|
|
|
|
IncludesAfterPCH += AfterPCHLines[i];
|
|
|
|
IncludesAfterPCH += '\n';
|
|
|
|
} else {
|
|
|
|
CmdLineLines.push_back(AfterPCHLines[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we add the includes last into SuggestedPredefines before we
|
|
|
|
// exit this function.
|
|
|
|
struct AddIncludesRAII {
|
|
|
|
std::string &SuggestedPredefines;
|
|
|
|
std::string &IncludesAfterPCH;
|
|
|
|
|
|
|
|
AddIncludesRAII(std::string &SuggestedPredefines,
|
|
|
|
std::string &IncludesAfterPCH)
|
|
|
|
: SuggestedPredefines(SuggestedPredefines),
|
|
|
|
IncludesAfterPCH(IncludesAfterPCH) { }
|
|
|
|
~AddIncludesRAII() {
|
|
|
|
SuggestedPredefines += IncludesAfterPCH;
|
|
|
|
}
|
|
|
|
} AddIncludes(SuggestedPredefines, IncludesAfterPCH);
|
2009-06-19 04:03:23 +04:00
|
|
|
|
2009-11-11 08:26:28 +03:00
|
|
|
// Sort both sets of predefined buffer lines, since we allow some extra
|
|
|
|
// definitions and they may appear at any point in the output.
|
2009-06-19 04:03:23 +04:00
|
|
|
std::sort(CmdLineLines.begin(), CmdLineLines.end());
|
|
|
|
std::sort(PCHLines.begin(), PCHLines.end());
|
|
|
|
|
2009-11-11 08:26:28 +03:00
|
|
|
// Determine which predefines that were used to build the PCH file are missing
|
|
|
|
// from the command line.
|
2011-07-23 14:55:15 +04:00
|
|
|
std::vector<StringRef> MissingPredefines;
|
2009-06-19 04:03:23 +04:00
|
|
|
std::set_difference(PCHLines.begin(), PCHLines.end(),
|
|
|
|
CmdLineLines.begin(), CmdLineLines.end(),
|
|
|
|
std::back_inserter(MissingPredefines));
|
|
|
|
|
|
|
|
bool MissingDefines = false;
|
|
|
|
bool ConflictingDefines = false;
|
|
|
|
for (unsigned I = 0, N = MissingPredefines.size(); I != N; ++I) {
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef Missing = MissingPredefines[I];
|
2010-09-30 20:53:50 +04:00
|
|
|
if (Missing.startswith("#include ")) {
|
|
|
|
// An -include was specified when generating the PCH; it is included in
|
|
|
|
// the PCH, just ignore it.
|
|
|
|
continue;
|
|
|
|
}
|
2009-11-11 08:26:28 +03:00
|
|
|
if (!Missing.startswith("#define ")) {
|
2009-06-19 04:03:23 +04:00
|
|
|
Reader.Diag(diag::warn_pch_compiler_options_mismatch);
|
|
|
|
return true;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-11-11 06:45:59 +03:00
|
|
|
// This is a macro definition. Determine the name of the macro we're
|
|
|
|
// defining.
|
2009-06-19 04:03:23 +04:00
|
|
|
std::string::size_type StartOfMacroName = strlen("#define ");
|
2009-09-09 19:08:12 +04:00
|
|
|
std::string::size_type EndOfMacroName
|
2009-06-19 04:03:23 +04:00
|
|
|
= Missing.find_first_of("( \n\r", StartOfMacroName);
|
|
|
|
assert(EndOfMacroName != std::string::npos &&
|
|
|
|
"Couldn't find the end of the macro name");
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef MacroName = Missing.slice(StartOfMacroName, EndOfMacroName);
|
2009-06-19 04:03:23 +04:00
|
|
|
|
2009-11-11 06:45:59 +03:00
|
|
|
// Determine whether this macro was given a different definition on the
|
|
|
|
// command line.
|
2009-11-11 08:26:28 +03:00
|
|
|
std::string MacroDefStart = "#define " + MacroName.str();
|
2009-06-19 04:03:23 +04:00
|
|
|
std::string::size_type MacroDefLen = MacroDefStart.size();
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<StringRef, 8>::iterator ConflictPos
|
2009-06-19 04:03:23 +04:00
|
|
|
= std::lower_bound(CmdLineLines.begin(), CmdLineLines.end(),
|
|
|
|
MacroDefStart);
|
|
|
|
for (; ConflictPos != CmdLineLines.end(); ++ConflictPos) {
|
2009-11-11 06:45:59 +03:00
|
|
|
if (!ConflictPos->startswith(MacroDefStart)) {
|
2009-06-19 04:03:23 +04:00
|
|
|
// Different macro; we're done.
|
|
|
|
ConflictPos = CmdLineLines.end();
|
2009-09-09 19:08:12 +04:00
|
|
|
break;
|
2009-06-19 04:03:23 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
|
|
|
assert(ConflictPos->size() > MacroDefLen &&
|
2009-06-19 04:03:23 +04:00
|
|
|
"Invalid #define in predefines buffer?");
|
2009-09-09 19:08:12 +04:00
|
|
|
if ((*ConflictPos)[MacroDefLen] != ' ' &&
|
2009-06-19 04:03:23 +04:00
|
|
|
(*ConflictPos)[MacroDefLen] != '(')
|
|
|
|
continue; // Longer macro name; keep trying.
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
// We found a conflicting macro definition.
|
|
|
|
break;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
if (ConflictPos != CmdLineLines.end()) {
|
|
|
|
Reader.Diag(diag::warn_cmdline_conflicting_macro_def)
|
|
|
|
<< MacroName;
|
|
|
|
|
|
|
|
// Show the definition of this macro within the PCH file.
|
2011-07-23 14:55:15 +04:00
|
|
|
std::pair<FileID, StringRef::size_type> MacroLoc =
|
2010-07-14 21:49:11 +04:00
|
|
|
FindMacro(Buffers, Missing);
|
2011-07-23 14:55:15 +04:00
|
|
|
assert(MacroLoc.second!=StringRef::npos && "Unable to find macro!");
|
2010-07-14 21:49:11 +04:00
|
|
|
SourceLocation PCHMissingLoc =
|
|
|
|
SourceMgr.getLocForStartOfFile(MacroLoc.first)
|
|
|
|
.getFileLocWithOffset(MacroLoc.second);
|
2009-11-11 08:26:28 +03:00
|
|
|
Reader.Diag(PCHMissingLoc, diag::note_pch_macro_defined_as) << MacroName;
|
2009-06-19 04:03:23 +04:00
|
|
|
|
|
|
|
ConflictingDefines = true;
|
|
|
|
continue;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-11-11 06:45:59 +03:00
|
|
|
// If the macro doesn't conflict, then we'll just pick up the macro
|
|
|
|
// definition from the PCH file. Warn the user that they made a mistake.
|
2009-06-19 04:03:23 +04:00
|
|
|
if (ConflictingDefines)
|
|
|
|
continue; // Don't complain if there are already conflicting defs
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
if (!MissingDefines) {
|
|
|
|
Reader.Diag(diag::warn_cmdline_missing_macro_defs);
|
|
|
|
MissingDefines = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Show the definition of this macro within the PCH file.
|
2011-07-23 14:55:15 +04:00
|
|
|
std::pair<FileID, StringRef::size_type> MacroLoc =
|
2010-07-14 21:49:11 +04:00
|
|
|
FindMacro(Buffers, Missing);
|
2011-07-23 14:55:15 +04:00
|
|
|
assert(MacroLoc.second!=StringRef::npos && "Unable to find macro!");
|
2010-07-14 21:49:11 +04:00
|
|
|
SourceLocation PCHMissingLoc =
|
|
|
|
SourceMgr.getLocForStartOfFile(MacroLoc.first)
|
|
|
|
.getFileLocWithOffset(MacroLoc.second);
|
2009-06-19 04:03:23 +04:00
|
|
|
Reader.Diag(PCHMissingLoc, diag::note_using_macro_def_from_pch);
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
if (ConflictingDefines)
|
|
|
|
return true;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
// Determine what predefines were introduced based on command-line
|
|
|
|
// parameters that were not present when building the PCH
|
|
|
|
// file. Extra #defines are okay, so long as the identifiers being
|
|
|
|
// defined were not used within the precompiled header.
|
2011-07-23 14:55:15 +04:00
|
|
|
std::vector<StringRef> ExtraPredefines;
|
2009-06-19 04:03:23 +04:00
|
|
|
std::set_difference(CmdLineLines.begin(), CmdLineLines.end(),
|
|
|
|
PCHLines.begin(), PCHLines.end(),
|
2009-09-09 19:08:12 +04:00
|
|
|
std::back_inserter(ExtraPredefines));
|
2009-06-19 04:03:23 +04:00
|
|
|
for (unsigned I = 0, N = ExtraPredefines.size(); I != N; ++I) {
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef &Extra = ExtraPredefines[I];
|
2009-11-11 08:26:28 +03:00
|
|
|
if (!Extra.startswith("#define ")) {
|
2009-06-19 04:03:23 +04:00
|
|
|
Reader.Diag(diag::warn_pch_compiler_options_mismatch);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is an extra macro definition. Determine the name of the
|
|
|
|
// macro we're defining.
|
|
|
|
std::string::size_type StartOfMacroName = strlen("#define ");
|
2009-09-09 19:08:12 +04:00
|
|
|
std::string::size_type EndOfMacroName
|
2009-06-19 04:03:23 +04:00
|
|
|
= Extra.find_first_of("( \n\r", StartOfMacroName);
|
|
|
|
assert(EndOfMacroName != std::string::npos &&
|
|
|
|
"Couldn't find the end of the macro name");
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef MacroName = Extra.slice(StartOfMacroName, EndOfMacroName);
|
2009-06-19 04:03:23 +04:00
|
|
|
|
|
|
|
// Check whether this name was used somewhere in the PCH file. If
|
|
|
|
// so, defining it as a macro could change behavior, so we reject
|
|
|
|
// the PCH file.
|
2009-11-11 08:26:28 +03:00
|
|
|
if (IdentifierInfo *II = Reader.get(MacroName)) {
|
2009-11-11 03:52:00 +03:00
|
|
|
Reader.Diag(diag::warn_macro_name_used_in_pch) << II;
|
2009-06-19 04:03:23 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add this definition to the suggested predefines buffer.
|
|
|
|
SuggestedPredefines += Extra;
|
|
|
|
SuggestedPredefines += '\n';
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we get here, it's because the predefines buffer had compatible
|
|
|
|
// contents. Accept the PCH file.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-03-16 19:35:32 +03:00
|
|
|
void PCHValidator::ReadHeaderFileInfo(const HeaderFileInfo &HFI,
|
|
|
|
unsigned ID) {
|
|
|
|
PP.getHeaderSearchInfo().setHeaderFileInfoForUID(HFI, ID);
|
|
|
|
++NumHeaderInfos;
|
2009-06-19 04:03:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void PCHValidator::ReadCounter(unsigned Value) {
|
|
|
|
PP.setCounterValue(Value);
|
|
|
|
}
|
|
|
|
|
2009-04-22 10:29:42 +04:00
|
|
|
//===----------------------------------------------------------------------===//
|
2010-08-19 03:56:43 +04:00
|
|
|
// AST reader implementation
|
2009-04-27 09:14:47 +04:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-04-26 22:52:16 +04:00
|
|
|
|
2010-07-30 04:29:29 +04:00
|
|
|
void
|
2010-08-19 03:56:56 +04:00
|
|
|
ASTReader::setDeserializationListener(ASTDeserializationListener *Listener) {
|
2010-07-30 04:29:29 +04:00
|
|
|
DeserializationListener = Listener;
|
|
|
|
}
|
|
|
|
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
|
2009-04-25 01:10:55 +04:00
|
|
|
namespace {
|
2010-08-19 03:57:06 +04:00
|
|
|
class ASTSelectorLookupTrait {
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader &Reader;
|
2011-07-22 20:00:58 +04:00
|
|
|
Module &F;
|
2011-07-22 02:35:25 +04:00
|
|
|
|
2009-04-25 01:10:55 +04:00
|
|
|
public:
|
2010-08-04 21:20:04 +04:00
|
|
|
struct data_type {
|
2010-08-19 03:57:32 +04:00
|
|
|
SelectorID ID;
|
2010-08-04 21:20:04 +04:00
|
|
|
ObjCMethodList Instance, Factory;
|
|
|
|
};
|
2009-04-25 01:10:55 +04:00
|
|
|
|
|
|
|
typedef Selector external_key_type;
|
|
|
|
typedef external_key_type internal_key_type;
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTSelectorLookupTrait(ASTReader &Reader, Module &F)
|
2011-07-22 02:35:25 +04:00
|
|
|
: Reader(Reader), F(F) { }
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-25 01:10:55 +04:00
|
|
|
static bool EqualKey(const internal_key_type& a,
|
|
|
|
const internal_key_type& b) {
|
|
|
|
return a == b;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-25 01:10:55 +04:00
|
|
|
static unsigned ComputeHash(Selector Sel) {
|
2010-08-20 20:03:52 +04:00
|
|
|
return serialization::ComputeHash(Sel);
|
2009-04-25 01:10:55 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-25 01:10:55 +04:00
|
|
|
// This hopefully will just get inlined and removed by the optimizer.
|
|
|
|
static const internal_key_type&
|
|
|
|
GetInternalKey(const external_key_type& x) { return x; }
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-25 01:10:55 +04:00
|
|
|
static std::pair<unsigned, unsigned>
|
|
|
|
ReadKeyDataLength(const unsigned char*& d) {
|
|
|
|
using namespace clang::io;
|
|
|
|
unsigned KeyLen = ReadUnalignedLE16(d);
|
|
|
|
unsigned DataLen = ReadUnalignedLE16(d);
|
|
|
|
return std::make_pair(KeyLen, DataLen);
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-25 21:48:32 +04:00
|
|
|
internal_key_type ReadKey(const unsigned char* d, unsigned) {
|
2009-04-25 01:10:55 +04:00
|
|
|
using namespace clang::io;
|
2009-04-28 01:45:14 +04:00
|
|
|
SelectorTable &SelTable = Reader.getContext()->Selectors;
|
2009-04-25 01:10:55 +04:00
|
|
|
unsigned N = ReadUnalignedLE16(d);
|
2009-09-09 19:08:12 +04:00
|
|
|
IdentifierInfo *FirstII
|
2011-07-29 00:55:49 +04:00
|
|
|
= Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
|
2009-04-25 01:10:55 +04:00
|
|
|
if (N == 0)
|
|
|
|
return SelTable.getNullarySelector(FirstII);
|
|
|
|
else if (N == 1)
|
|
|
|
return SelTable.getUnarySelector(FirstII);
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<IdentifierInfo *, 16> Args;
|
2009-04-25 01:10:55 +04:00
|
|
|
Args.push_back(FirstII);
|
|
|
|
for (unsigned I = 1; I != N; ++I)
|
2011-07-29 00:55:49 +04:00
|
|
|
Args.push_back(Reader.getLocalIdentifier(F, ReadUnalignedLE32(d)));
|
2009-04-25 01:10:55 +04:00
|
|
|
|
2009-05-23 02:45:36 +04:00
|
|
|
return SelTable.getSelector(N, Args.data());
|
2009-04-25 01:10:55 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-25 01:10:55 +04:00
|
|
|
data_type ReadData(Selector, const unsigned char* d, unsigned DataLen) {
|
|
|
|
using namespace clang::io;
|
|
|
|
|
|
|
|
data_type Result;
|
|
|
|
|
2011-07-29 01:16:51 +04:00
|
|
|
Result.ID = Reader.getGlobalSelectorID(F, ReadUnalignedLE32(d));
|
2010-08-04 21:20:04 +04:00
|
|
|
unsigned NumInstanceMethods = ReadUnalignedLE16(d);
|
|
|
|
unsigned NumFactoryMethods = ReadUnalignedLE16(d);
|
|
|
|
|
2009-04-25 01:10:55 +04:00
|
|
|
// Load instance methods
|
|
|
|
ObjCMethodList *Prev = 0;
|
|
|
|
for (unsigned I = 0; I != NumInstanceMethods; ++I) {
|
2009-09-09 19:08:12 +04:00
|
|
|
ObjCMethodDecl *Method
|
2011-07-22 02:35:25 +04:00
|
|
|
= Reader.GetLocalDeclAs<ObjCMethodDecl>(F, ReadUnalignedLE32(d));
|
2010-08-04 21:20:04 +04:00
|
|
|
if (!Result.Instance.Method) {
|
2009-04-25 01:10:55 +04:00
|
|
|
// This is the first method, which is the easy case.
|
2010-08-04 21:20:04 +04:00
|
|
|
Result.Instance.Method = Method;
|
|
|
|
Prev = &Result.Instance;
|
2009-04-25 01:10:55 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-02-11 03:53:01 +03:00
|
|
|
ObjCMethodList *Mem =
|
|
|
|
Reader.getSema()->BumpAlloc.Allocate<ObjCMethodList>();
|
|
|
|
Prev->Next = new (Mem) ObjCMethodList(Method, 0);
|
2009-04-25 01:10:55 +04:00
|
|
|
Prev = Prev->Next;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load factory methods
|
|
|
|
Prev = 0;
|
|
|
|
for (unsigned I = 0; I != NumFactoryMethods; ++I) {
|
2009-09-09 19:08:12 +04:00
|
|
|
ObjCMethodDecl *Method
|
2011-07-22 02:35:25 +04:00
|
|
|
= Reader.GetLocalDeclAs<ObjCMethodDecl>(F, ReadUnalignedLE32(d));
|
2010-08-04 21:20:04 +04:00
|
|
|
if (!Result.Factory.Method) {
|
2009-04-25 01:10:55 +04:00
|
|
|
// This is the first method, which is the easy case.
|
2010-08-04 21:20:04 +04:00
|
|
|
Result.Factory.Method = Method;
|
|
|
|
Prev = &Result.Factory;
|
2009-04-25 01:10:55 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-02-11 03:53:01 +03:00
|
|
|
ObjCMethodList *Mem =
|
|
|
|
Reader.getSema()->BumpAlloc.Allocate<ObjCMethodList>();
|
|
|
|
Prev->Next = new (Mem) ObjCMethodList(Method, 0);
|
2009-04-25 01:10:55 +04:00
|
|
|
Prev = Prev->Next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
};
|
2009-09-09 19:08:12 +04:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2009-04-25 01:10:55 +04:00
|
|
|
|
|
|
|
/// \brief The on-disk hash table used for the global method pool.
|
2010-08-19 03:57:06 +04:00
|
|
|
typedef OnDiskChainedHashTable<ASTSelectorLookupTrait>
|
|
|
|
ASTSelectorLookupTable;
|
2009-04-25 01:10:55 +04:00
|
|
|
|
2010-10-05 19:59:54 +04:00
|
|
|
namespace clang {
|
2010-08-19 03:57:06 +04:00
|
|
|
class ASTIdentifierLookupTrait {
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader &Reader;
|
2011-07-22 20:00:58 +04:00
|
|
|
Module &F;
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
|
|
|
|
// If we know the IdentifierInfo in advance, it is here and we will
|
|
|
|
// not build a new one. Used when deserializing information about an
|
2010-08-19 03:57:06 +04:00
|
|
|
// identifier that was constructed before the AST file was read.
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
IdentifierInfo *KnownII;
|
|
|
|
|
|
|
|
public:
|
|
|
|
typedef IdentifierInfo * data_type;
|
|
|
|
|
|
|
|
typedef const std::pair<const char*, unsigned> external_key_type;
|
|
|
|
|
|
|
|
typedef external_key_type internal_key_type;
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTIdentifierLookupTrait(ASTReader &Reader, Module &F,
|
2010-07-22 02:31:37 +04:00
|
|
|
IdentifierInfo *II = 0)
|
2010-10-05 19:59:54 +04:00
|
|
|
: Reader(Reader), F(F), KnownII(II) { }
|
2009-09-09 19:08:12 +04:00
|
|
|
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
static bool EqualKey(const internal_key_type& a,
|
|
|
|
const internal_key_type& b) {
|
|
|
|
return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
|
|
|
|
: false;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
static unsigned ComputeHash(const internal_key_type& a) {
|
2011-07-23 14:55:15 +04:00
|
|
|
return llvm::HashString(StringRef(a.first, a.second));
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
// This hopefully will just get inlined and removed by the optimizer.
|
|
|
|
static const internal_key_type&
|
|
|
|
GetInternalKey(const external_key_type& x) { return x; }
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-10-15 02:11:03 +04:00
|
|
|
// This hopefully will just get inlined and removed by the optimizer.
|
|
|
|
static const external_key_type&
|
|
|
|
GetExternalKey(const internal_key_type& x) { return x; }
|
|
|
|
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
static std::pair<unsigned, unsigned>
|
|
|
|
ReadKeyDataLength(const unsigned char*& d) {
|
|
|
|
using namespace clang::io;
|
2009-04-26 00:26:24 +04:00
|
|
|
unsigned DataLen = ReadUnalignedLE16(d);
|
2009-04-26 01:04:17 +04:00
|
|
|
unsigned KeyLen = ReadUnalignedLE16(d);
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
return std::make_pair(KeyLen, DataLen);
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
static std::pair<const char*, unsigned>
|
|
|
|
ReadKey(const unsigned char* d, unsigned n) {
|
|
|
|
assert(n >= 2 && d[n-1] == '\0');
|
|
|
|
return std::make_pair((const char*) d, n-1);
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
|
|
|
IdentifierInfo *ReadData(const internal_key_type& k,
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
const unsigned char* d,
|
|
|
|
unsigned DataLen) {
|
|
|
|
using namespace clang::io;
|
2011-08-04 01:49:18 +04:00
|
|
|
unsigned RawID = ReadUnalignedLE32(d);
|
|
|
|
bool IsInteresting = RawID & 0x01;
|
2009-04-29 01:18:29 +04:00
|
|
|
|
|
|
|
// Wipe out the "is interesting" bit.
|
2011-08-04 01:49:18 +04:00
|
|
|
RawID = RawID >> 1;
|
2009-04-29 01:18:29 +04:00
|
|
|
|
2011-08-04 01:49:18 +04:00
|
|
|
IdentID ID = Reader.getGlobalIdentifierID(F, RawID);
|
2009-04-29 01:18:29 +04:00
|
|
|
if (!IsInteresting) {
|
2010-07-28 03:01:28 +04:00
|
|
|
// For uninteresting identifiers, just build the IdentifierInfo
|
2009-04-29 01:18:29 +04:00
|
|
|
// and associate it with the persistent ID.
|
|
|
|
IdentifierInfo *II = KnownII;
|
|
|
|
if (!II)
|
2011-08-04 01:49:18 +04:00
|
|
|
II = &Reader.getIdentifierTable().getOwn(StringRef(k.first, k.second));
|
2009-04-29 01:18:29 +04:00
|
|
|
Reader.SetIdentifierInfo(ID, II);
|
2010-08-19 03:57:06 +04:00
|
|
|
II->setIsFromAST();
|
2009-04-29 01:18:29 +04:00
|
|
|
return II;
|
|
|
|
}
|
|
|
|
|
2009-04-29 01:32:13 +04:00
|
|
|
unsigned Bits = ReadUnalignedLE16(d);
|
2009-04-22 22:49:13 +04:00
|
|
|
bool CPlusPlusOperatorKeyword = Bits & 0x01;
|
|
|
|
Bits >>= 1;
|
2010-08-12 02:55:12 +04:00
|
|
|
bool HasRevertedTokenIDToIdentifier = Bits & 0x01;
|
|
|
|
Bits >>= 1;
|
2009-04-22 22:49:13 +04:00
|
|
|
bool Poisoned = Bits & 0x01;
|
|
|
|
Bits >>= 1;
|
|
|
|
bool ExtensionToken = Bits & 0x01;
|
|
|
|
Bits >>= 1;
|
|
|
|
bool hasMacroDefinition = Bits & 0x01;
|
|
|
|
Bits >>= 1;
|
|
|
|
unsigned ObjCOrBuiltinID = Bits & 0x3FF;
|
|
|
|
Bits >>= 10;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-22 22:49:13 +04:00
|
|
|
assert(Bits == 0 && "Extra bits in the identifier?");
|
2009-04-29 01:32:13 +04:00
|
|
|
DataLen -= 6;
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
|
|
|
|
// Build the IdentifierInfo itself and link the identifier ID with
|
|
|
|
// the new IdentifierInfo.
|
|
|
|
IdentifierInfo *II = KnownII;
|
|
|
|
if (!II)
|
2011-08-04 01:49:18 +04:00
|
|
|
II = &Reader.getIdentifierTable().getOwn(StringRef(k.first, k.second));
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
Reader.SetIdentifierInfo(ID, II);
|
|
|
|
|
2009-04-22 22:49:13 +04:00
|
|
|
// Set or check the various bits in the IdentifierInfo structure.
|
2010-08-12 02:55:12 +04:00
|
|
|
// Token IDs are read-only.
|
|
|
|
if (HasRevertedTokenIDToIdentifier)
|
|
|
|
II->RevertTokenIDToIdentifier();
|
2009-04-22 22:49:13 +04:00
|
|
|
II->setObjCOrBuiltinID(ObjCOrBuiltinID);
|
2009-09-09 19:08:12 +04:00
|
|
|
assert(II->isExtensionToken() == ExtensionToken &&
|
2009-04-22 22:49:13 +04:00
|
|
|
"Incorrect extension token flag");
|
|
|
|
(void)ExtensionToken;
|
|
|
|
II->setIsPoisoned(Poisoned);
|
|
|
|
assert(II->isCPlusPlusOperatorKeyword() == CPlusPlusOperatorKeyword &&
|
|
|
|
"Incorrect C++ operator keyword flag");
|
|
|
|
(void)CPlusPlusOperatorKeyword;
|
|
|
|
|
2009-04-22 03:56:24 +04:00
|
|
|
// If this identifier is a macro, deserialize the macro
|
|
|
|
// definition.
|
|
|
|
if (hasMacroDefinition) {
|
2009-04-29 01:32:13 +04:00
|
|
|
uint32_t Offset = ReadUnalignedLE32(d);
|
2010-10-30 04:23:06 +04:00
|
|
|
Reader.SetIdentifierIsMacro(II, F, Offset);
|
2009-04-29 01:32:13 +04:00
|
|
|
DataLen -= 4;
|
2009-04-22 03:56:24 +04:00
|
|
|
}
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
|
|
|
|
// Read all of the declarations visible at global scope with this
|
|
|
|
// name.
|
2009-04-28 02:17:41 +04:00
|
|
|
if (Reader.getContext() == 0) return II;
|
2009-07-06 22:54:52 +04:00
|
|
|
if (DataLen > 0) {
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<uint32_t, 4> DeclIDs;
|
2009-07-06 22:54:52 +04:00
|
|
|
for (; DataLen > 0; DataLen -= 4)
|
2011-07-22 03:29:11 +04:00
|
|
|
DeclIDs.push_back(Reader.getGlobalDeclID(F, ReadUnalignedLE32(d)));
|
2009-07-06 22:54:52 +04:00
|
|
|
Reader.SetGloballyVisibleDecls(II, DeclIDs);
|
2009-06-19 04:03:23 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
II->setIsFromAST();
|
2009-06-19 04:03:23 +04:00
|
|
|
return II;
|
2009-04-28 22:58:38 +04:00
|
|
|
}
|
2009-06-19 04:03:23 +04:00
|
|
|
};
|
2009-09-09 19:08:12 +04:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2009-04-28 22:58:38 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
/// \brief The on-disk hash table used to contain information about
|
|
|
|
/// all of the identifiers in the program.
|
2010-08-19 03:57:06 +04:00
|
|
|
typedef OnDiskChainedHashTable<ASTIdentifierLookupTrait>
|
|
|
|
ASTIdentifierLookupTable;
|
2009-04-28 22:58:38 +04:00
|
|
|
|
2010-08-20 20:04:27 +04:00
|
|
|
namespace {
|
|
|
|
class ASTDeclContextNameLookupTrait {
|
|
|
|
ASTReader &Reader;
|
2011-07-22 20:00:58 +04:00
|
|
|
Module &F;
|
2011-07-22 04:38:23 +04:00
|
|
|
|
2010-08-20 20:04:27 +04:00
|
|
|
public:
|
|
|
|
/// \brief Pair of begin/end iterators for DeclIDs.
|
2011-07-29 02:16:57 +04:00
|
|
|
///
|
|
|
|
/// Note that these declaration IDs are local to the module that contains this
|
|
|
|
/// particular lookup t
|
2010-08-20 20:04:27 +04:00
|
|
|
typedef std::pair<DeclID *, DeclID *> data_type;
|
|
|
|
|
|
|
|
/// \brief Special internal key for declaration names.
|
|
|
|
/// The hash table creates keys for comparison; we do not create
|
|
|
|
/// a DeclarationName for the internal key to avoid deserializing types.
|
|
|
|
struct DeclNameKey {
|
|
|
|
DeclarationName::NameKind Kind;
|
|
|
|
uint64_t Data;
|
|
|
|
DeclNameKey() : Kind((DeclarationName::NameKind)0), Data(0) { }
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef DeclarationName external_key_type;
|
|
|
|
typedef DeclNameKey internal_key_type;
|
|
|
|
|
2011-07-22 04:38:23 +04:00
|
|
|
explicit ASTDeclContextNameLookupTrait(ASTReader &Reader,
|
2011-07-22 20:00:58 +04:00
|
|
|
Module &F)
|
2011-07-22 04:38:23 +04:00
|
|
|
: Reader(Reader), F(F) { }
|
2010-08-20 20:04:27 +04:00
|
|
|
|
|
|
|
static bool EqualKey(const internal_key_type& a,
|
|
|
|
const internal_key_type& b) {
|
|
|
|
return a.Kind == b.Kind && a.Data == b.Data;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned ComputeHash(const DeclNameKey &Key) const {
|
|
|
|
llvm::FoldingSetNodeID ID;
|
|
|
|
ID.AddInteger(Key.Kind);
|
|
|
|
|
|
|
|
switch (Key.Kind) {
|
|
|
|
case DeclarationName::Identifier:
|
|
|
|
case DeclarationName::CXXLiteralOperatorName:
|
|
|
|
ID.AddString(((IdentifierInfo*)Key.Data)->getName());
|
|
|
|
break;
|
|
|
|
case DeclarationName::ObjCZeroArgSelector:
|
|
|
|
case DeclarationName::ObjCOneArgSelector:
|
|
|
|
case DeclarationName::ObjCMultiArgSelector:
|
|
|
|
ID.AddInteger(serialization::ComputeHash(Selector(Key.Data)));
|
|
|
|
break;
|
|
|
|
case DeclarationName::CXXOperatorName:
|
|
|
|
ID.AddInteger((OverloadedOperatorKind)Key.Data);
|
|
|
|
break;
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
case DeclarationName::CXXConstructorName:
|
|
|
|
case DeclarationName::CXXDestructorName:
|
|
|
|
case DeclarationName::CXXConversionFunctionName:
|
2010-08-20 20:04:27 +04:00
|
|
|
case DeclarationName::CXXUsingDirective:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ID.ComputeHash();
|
|
|
|
}
|
|
|
|
|
|
|
|
internal_key_type GetInternalKey(const external_key_type& Name) const {
|
|
|
|
DeclNameKey Key;
|
|
|
|
Key.Kind = Name.getNameKind();
|
|
|
|
switch (Name.getNameKind()) {
|
|
|
|
case DeclarationName::Identifier:
|
|
|
|
Key.Data = (uint64_t)Name.getAsIdentifierInfo();
|
|
|
|
break;
|
|
|
|
case DeclarationName::ObjCZeroArgSelector:
|
|
|
|
case DeclarationName::ObjCOneArgSelector:
|
|
|
|
case DeclarationName::ObjCMultiArgSelector:
|
|
|
|
Key.Data = (uint64_t)Name.getObjCSelector().getAsOpaquePtr();
|
|
|
|
break;
|
|
|
|
case DeclarationName::CXXOperatorName:
|
|
|
|
Key.Data = Name.getCXXOverloadedOperator();
|
|
|
|
break;
|
|
|
|
case DeclarationName::CXXLiteralOperatorName:
|
|
|
|
Key.Data = (uint64_t)Name.getCXXLiteralIdentifier();
|
|
|
|
break;
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
case DeclarationName::CXXConstructorName:
|
|
|
|
case DeclarationName::CXXDestructorName:
|
|
|
|
case DeclarationName::CXXConversionFunctionName:
|
2010-08-20 20:04:27 +04:00
|
|
|
case DeclarationName::CXXUsingDirective:
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
Key.Data = 0;
|
2010-08-20 20:04:27 +04:00
|
|
|
break;
|
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-08-20 20:04:27 +04:00
|
|
|
return Key;
|
|
|
|
}
|
|
|
|
|
2010-08-21 03:35:55 +04:00
|
|
|
external_key_type GetExternalKey(const internal_key_type& Key) const {
|
|
|
|
ASTContext *Context = Reader.getContext();
|
|
|
|
switch (Key.Kind) {
|
|
|
|
case DeclarationName::Identifier:
|
|
|
|
return DeclarationName((IdentifierInfo*)Key.Data);
|
|
|
|
|
|
|
|
case DeclarationName::ObjCZeroArgSelector:
|
|
|
|
case DeclarationName::ObjCOneArgSelector:
|
|
|
|
case DeclarationName::ObjCMultiArgSelector:
|
|
|
|
return DeclarationName(Selector(Key.Data));
|
|
|
|
|
|
|
|
case DeclarationName::CXXConstructorName:
|
|
|
|
return Context->DeclarationNames.getCXXConstructorName(
|
2011-07-22 04:38:23 +04:00
|
|
|
Context->getCanonicalType(Reader.getLocalType(F, Key.Data)));
|
2010-08-21 03:35:55 +04:00
|
|
|
|
|
|
|
case DeclarationName::CXXDestructorName:
|
|
|
|
return Context->DeclarationNames.getCXXDestructorName(
|
2011-07-22 04:38:23 +04:00
|
|
|
Context->getCanonicalType(Reader.getLocalType(F, Key.Data)));
|
2010-08-21 03:35:55 +04:00
|
|
|
|
|
|
|
case DeclarationName::CXXConversionFunctionName:
|
|
|
|
return Context->DeclarationNames.getCXXConversionFunctionName(
|
2011-07-22 04:38:23 +04:00
|
|
|
Context->getCanonicalType(Reader.getLocalType(F, Key.Data)));
|
2010-08-21 03:35:55 +04:00
|
|
|
|
|
|
|
case DeclarationName::CXXOperatorName:
|
|
|
|
return Context->DeclarationNames.getCXXOperatorName(
|
|
|
|
(OverloadedOperatorKind)Key.Data);
|
|
|
|
|
|
|
|
case DeclarationName::CXXLiteralOperatorName:
|
|
|
|
return Context->DeclarationNames.getCXXLiteralOperatorName(
|
|
|
|
(IdentifierInfo*)Key.Data);
|
|
|
|
|
|
|
|
case DeclarationName::CXXUsingDirective:
|
|
|
|
return DeclarationName::getUsingDirectiveName();
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("Invalid Name Kind ?");
|
|
|
|
}
|
|
|
|
|
2010-08-20 20:04:27 +04:00
|
|
|
static std::pair<unsigned, unsigned>
|
|
|
|
ReadKeyDataLength(const unsigned char*& d) {
|
|
|
|
using namespace clang::io;
|
|
|
|
unsigned KeyLen = ReadUnalignedLE16(d);
|
|
|
|
unsigned DataLen = ReadUnalignedLE16(d);
|
|
|
|
return std::make_pair(KeyLen, DataLen);
|
|
|
|
}
|
|
|
|
|
|
|
|
internal_key_type ReadKey(const unsigned char* d, unsigned) {
|
|
|
|
using namespace clang::io;
|
|
|
|
|
|
|
|
DeclNameKey Key;
|
|
|
|
Key.Kind = (DeclarationName::NameKind)*d++;
|
|
|
|
switch (Key.Kind) {
|
|
|
|
case DeclarationName::Identifier:
|
2011-07-29 00:55:49 +04:00
|
|
|
Key.Data = (uint64_t)Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
|
2010-08-20 20:04:27 +04:00
|
|
|
break;
|
|
|
|
case DeclarationName::ObjCZeroArgSelector:
|
|
|
|
case DeclarationName::ObjCOneArgSelector:
|
|
|
|
case DeclarationName::ObjCMultiArgSelector:
|
2010-10-21 07:16:25 +04:00
|
|
|
Key.Data =
|
2011-07-29 01:16:51 +04:00
|
|
|
(uint64_t)Reader.getLocalSelector(F, ReadUnalignedLE32(d))
|
|
|
|
.getAsOpaquePtr();
|
2010-08-20 20:04:27 +04:00
|
|
|
break;
|
|
|
|
case DeclarationName::CXXOperatorName:
|
|
|
|
Key.Data = *d++; // OverloadedOperatorKind
|
|
|
|
break;
|
|
|
|
case DeclarationName::CXXLiteralOperatorName:
|
2011-07-29 00:55:49 +04:00
|
|
|
Key.Data = (uint64_t)Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
|
2010-08-20 20:04:27 +04:00
|
|
|
break;
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
case DeclarationName::CXXConstructorName:
|
|
|
|
case DeclarationName::CXXDestructorName:
|
|
|
|
case DeclarationName::CXXConversionFunctionName:
|
2010-08-20 20:04:27 +04:00
|
|
|
case DeclarationName::CXXUsingDirective:
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
Key.Data = 0;
|
2010-08-20 20:04:27 +04:00
|
|
|
break;
|
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-08-20 20:04:27 +04:00
|
|
|
return Key;
|
|
|
|
}
|
|
|
|
|
|
|
|
data_type ReadData(internal_key_type, const unsigned char* d,
|
|
|
|
unsigned DataLen) {
|
|
|
|
using namespace clang::io;
|
|
|
|
unsigned NumDecls = ReadUnalignedLE16(d);
|
|
|
|
DeclID *Start = (DeclID *)d;
|
|
|
|
return std::make_pair(Start, Start + NumDecls);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
/// \brief The on-disk hash table used for the DeclContext's Name lookup table.
|
|
|
|
typedef OnDiskChainedHashTable<ASTDeclContextNameLookupTrait>
|
|
|
|
ASTDeclContextNameLookupTable;
|
|
|
|
|
2010-08-20 20:04:35 +04:00
|
|
|
bool ASTReader::ReadDeclContextStorage(llvm::BitstreamCursor &Cursor,
|
|
|
|
const std::pair<uint64_t, uint64_t> &Offsets,
|
|
|
|
DeclContextInfo &Info) {
|
|
|
|
SavedStreamPosition SavedPosition(Cursor);
|
|
|
|
// First the lexical decls.
|
|
|
|
if (Offsets.first != 0) {
|
|
|
|
Cursor.JumpToBit(Offsets.first);
|
|
|
|
|
|
|
|
RecordData Record;
|
|
|
|
const char *Blob;
|
|
|
|
unsigned BlobLen;
|
|
|
|
unsigned Code = Cursor.ReadCode();
|
|
|
|
unsigned RecCode = Cursor.ReadRecord(Code, Record, &Blob, &BlobLen);
|
|
|
|
if (RecCode != DECL_CONTEXT_LEXICAL) {
|
|
|
|
Error("Expected lexical block");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-10-15 00:14:34 +04:00
|
|
|
Info.LexicalDecls = reinterpret_cast<const KindDeclIDPair*>(Blob);
|
|
|
|
Info.NumLexicalDecls = BlobLen / sizeof(KindDeclIDPair);
|
2010-08-20 20:04:35 +04:00
|
|
|
} else {
|
|
|
|
Info.LexicalDecls = 0;
|
|
|
|
Info.NumLexicalDecls = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now the lookup table.
|
|
|
|
if (Offsets.second != 0) {
|
|
|
|
Cursor.JumpToBit(Offsets.second);
|
|
|
|
|
|
|
|
RecordData Record;
|
|
|
|
const char *Blob;
|
|
|
|
unsigned BlobLen;
|
|
|
|
unsigned Code = Cursor.ReadCode();
|
|
|
|
unsigned RecCode = Cursor.ReadRecord(Code, Record, &Blob, &BlobLen);
|
|
|
|
if (RecCode != DECL_CONTEXT_VISIBLE) {
|
|
|
|
Error("Expected visible lookup table block");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
Info.NameLookupTableData
|
|
|
|
= ASTDeclContextNameLookupTable::Create(
|
|
|
|
(const unsigned char *)Blob + Record[0],
|
|
|
|
(const unsigned char *)Blob,
|
2011-07-22 04:38:23 +04:00
|
|
|
ASTDeclContextNameLookupTrait(*this, *Info.F));
|
2010-08-24 04:50:00 +04:00
|
|
|
} else {
|
|
|
|
Info.NameLookupTableData = 0;
|
2010-08-20 20:04:35 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
void ASTReader::Error(StringRef Msg) {
|
2011-04-26 02:23:56 +04:00
|
|
|
Error(diag::err_fe_pch_malformed, Msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ASTReader::Error(unsigned DiagID,
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef Arg1, StringRef Arg2) {
|
2011-04-26 02:23:56 +04:00
|
|
|
if (Diags.isDiagnosticInFlight())
|
|
|
|
Diags.SetDelayedDiagnostic(DiagID, Arg1, Arg2);
|
|
|
|
else
|
|
|
|
Diag(DiagID) << Arg1 << Arg2;
|
2009-04-28 22:58:38 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
/// \brief Tell the AST listener about the predefines buffers in the chain.
|
2010-08-19 03:56:43 +04:00
|
|
|
bool ASTReader::CheckPredefinesBuffers() {
|
2009-06-19 04:03:23 +04:00
|
|
|
if (Listener)
|
2010-07-14 21:49:11 +04:00
|
|
|
return Listener->ReadPredefinesBuffer(PCHPredefinesBuffers,
|
2009-11-11 08:29:04 +03:00
|
|
|
ActualOriginalFileName,
|
2011-02-24 00:16:44 +03:00
|
|
|
SuggestedPredefines,
|
|
|
|
FileMgr);
|
2009-04-28 22:58:38 +04:00
|
|
|
return false;
|
2009-04-11 03:10:45 +04:00
|
|
|
}
|
|
|
|
|
2009-04-27 22:38:38 +04:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Source Manager Deserialization
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-04-13 20:31:14 +04:00
|
|
|
/// \brief Read the line table in the source manager block.
|
2010-10-05 19:59:54 +04:00
|
|
|
/// \returns true if there was an error.
|
2011-07-22 20:00:58 +04:00
|
|
|
bool ASTReader::ParseLineTable(Module &F,
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVectorImpl<uint64_t> &Record) {
|
2009-04-13 20:31:14 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
LineTableInfo &LineTable = SourceMgr.getLineTable();
|
|
|
|
|
|
|
|
// Parse the file names
|
2009-04-13 21:12:42 +04:00
|
|
|
std::map<int, int> FileIDs;
|
|
|
|
for (int I = 0, N = Record[Idx++]; I != N; ++I) {
|
2009-04-13 20:31:14 +04:00
|
|
|
// Extract the file name
|
|
|
|
unsigned FilenameLen = Record[Idx++];
|
|
|
|
std::string Filename(&Record[Idx], &Record[Idx] + FilenameLen);
|
|
|
|
Idx += FilenameLen;
|
2009-07-07 04:12:59 +04:00
|
|
|
MaybeAddSystemRootToFilename(Filename);
|
2011-06-21 19:13:30 +04:00
|
|
|
FileIDs[I] = LineTable.getLineTableFilenameID(Filename);
|
2009-04-13 20:31:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the line entries
|
|
|
|
std::vector<LineEntry> Entries;
|
|
|
|
while (Idx < Record.size()) {
|
2010-07-02 15:55:05 +04:00
|
|
|
int FID = Record[Idx++];
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
assert(FID >= 0 && "Serialized line entries for non-local file.");
|
|
|
|
// Remap FileID from 1-based old view.
|
|
|
|
FID += F.SLocEntryBaseID - 1;
|
2009-04-13 20:31:14 +04:00
|
|
|
|
|
|
|
// Extract the line entries
|
|
|
|
unsigned NumEntries = Record[Idx++];
|
2010-07-02 15:55:05 +04:00
|
|
|
assert(NumEntries && "Numentries is 00000");
|
2009-04-13 20:31:14 +04:00
|
|
|
Entries.clear();
|
|
|
|
Entries.reserve(NumEntries);
|
|
|
|
for (unsigned I = 0; I != NumEntries; ++I) {
|
|
|
|
unsigned FileOffset = Record[Idx++];
|
|
|
|
unsigned LineNo = Record[Idx++];
|
2010-07-02 15:55:05 +04:00
|
|
|
int FilenameID = FileIDs[Record[Idx++]];
|
2009-09-09 19:08:12 +04:00
|
|
|
SrcMgr::CharacteristicKind FileKind
|
2009-04-13 20:31:14 +04:00
|
|
|
= (SrcMgr::CharacteristicKind)Record[Idx++];
|
|
|
|
unsigned IncludeOffset = Record[Idx++];
|
|
|
|
Entries.push_back(LineEntry::get(FileOffset, LineNo, FilenameID,
|
|
|
|
FileKind, IncludeOffset));
|
|
|
|
}
|
|
|
|
LineTable.AddEntry(FID, Entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-04-27 22:38:38 +04:00
|
|
|
namespace {
|
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
class ASTStatData {
|
2009-04-27 22:38:38 +04:00
|
|
|
public:
|
|
|
|
const ino_t ino;
|
|
|
|
const dev_t dev;
|
|
|
|
const mode_t mode;
|
|
|
|
const time_t mtime;
|
|
|
|
const off_t size;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
ASTStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s)
|
2010-11-23 22:28:12 +03:00
|
|
|
: ino(i), dev(d), mode(mo), mtime(m), size(s) {}
|
2009-04-27 22:38:38 +04:00
|
|
|
};
|
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
class ASTStatLookupTrait {
|
2009-04-27 22:38:38 +04:00
|
|
|
public:
|
|
|
|
typedef const char *external_key_type;
|
|
|
|
typedef const char *internal_key_type;
|
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
typedef ASTStatData data_type;
|
2009-04-27 22:38:38 +04:00
|
|
|
|
|
|
|
static unsigned ComputeHash(const char *path) {
|
2009-10-18 03:52:28 +04:00
|
|
|
return llvm::HashString(path);
|
2009-04-27 22:38:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static internal_key_type GetInternalKey(const char *path) { return path; }
|
|
|
|
|
|
|
|
static bool EqualKey(internal_key_type a, internal_key_type b) {
|
|
|
|
return strcmp(a, b) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::pair<unsigned, unsigned>
|
|
|
|
ReadKeyDataLength(const unsigned char*& d) {
|
|
|
|
unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d);
|
|
|
|
unsigned DataLen = (unsigned) *d++;
|
|
|
|
return std::make_pair(KeyLen + 1, DataLen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static internal_key_type ReadKey(const unsigned char *d, unsigned) {
|
|
|
|
return (const char *)d;
|
|
|
|
}
|
|
|
|
|
|
|
|
static data_type ReadData(const internal_key_type, const unsigned char *d,
|
|
|
|
unsigned /*DataLen*/) {
|
|
|
|
using namespace clang::io;
|
|
|
|
|
|
|
|
ino_t ino = (ino_t) ReadUnalignedLE32(d);
|
|
|
|
dev_t dev = (dev_t) ReadUnalignedLE32(d);
|
|
|
|
mode_t mode = (mode_t) ReadUnalignedLE16(d);
|
2009-09-09 19:08:12 +04:00
|
|
|
time_t mtime = (time_t) ReadUnalignedLE64(d);
|
2009-04-27 22:38:38 +04:00
|
|
|
off_t size = (off_t) ReadUnalignedLE64(d);
|
|
|
|
return data_type(ino, dev, mode, mtime, size);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// \brief stat() cache for precompiled headers.
|
|
|
|
///
|
|
|
|
/// This cache is very similar to the stat cache used by pretokenized
|
|
|
|
/// headers.
|
2010-11-23 22:19:34 +03:00
|
|
|
class ASTStatCache : public FileSystemStatCache {
|
2010-08-19 03:57:06 +04:00
|
|
|
typedef OnDiskChainedHashTable<ASTStatLookupTrait> CacheTy;
|
2009-04-27 22:38:38 +04:00
|
|
|
CacheTy *Cache;
|
|
|
|
|
|
|
|
unsigned &NumStatHits, &NumStatMisses;
|
2009-09-09 19:08:12 +04:00
|
|
|
public:
|
2010-11-23 22:28:12 +03:00
|
|
|
ASTStatCache(const unsigned char *Buckets, const unsigned char *Base,
|
|
|
|
unsigned &NumStatHits, unsigned &NumStatMisses)
|
2009-04-27 22:38:38 +04:00
|
|
|
: Cache(0), NumStatHits(NumStatHits), NumStatMisses(NumStatMisses) {
|
|
|
|
Cache = CacheTy::Create(Buckets, Base);
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
~ASTStatCache() { delete Cache; }
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-11-24 00:17:56 +03:00
|
|
|
LookupResult getStat(const char *Path, struct stat &StatBuf,
|
|
|
|
int *FileDescriptor) {
|
2010-08-19 03:57:06 +04:00
|
|
|
// Do the lookup for the file's data in the AST file.
|
2010-11-23 22:19:34 +03:00
|
|
|
CacheTy::iterator I = Cache->find(Path);
|
2009-04-27 22:38:38 +04:00
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
// If we don't get a hit in the AST file just forward to 'stat'.
|
2009-04-27 22:38:38 +04:00
|
|
|
if (I == Cache->end()) {
|
|
|
|
++NumStatMisses;
|
2010-11-24 00:17:56 +03:00
|
|
|
return statChained(Path, StatBuf, FileDescriptor);
|
2009-04-27 22:38:38 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-27 22:38:38 +04:00
|
|
|
++NumStatHits;
|
2010-08-19 03:57:06 +04:00
|
|
|
ASTStatData Data = *I;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-11-23 22:19:34 +03:00
|
|
|
StatBuf.st_ino = Data.ino;
|
|
|
|
StatBuf.st_dev = Data.dev;
|
|
|
|
StatBuf.st_mtime = Data.mtime;
|
|
|
|
StatBuf.st_mode = Data.mode;
|
|
|
|
StatBuf.st_size = Data.size;
|
2010-11-23 23:05:15 +03:00
|
|
|
return CacheExists;
|
2009-04-27 22:38:38 +04:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
|
2010-07-20 00:52:06 +04:00
|
|
|
/// \brief Read a source manager block
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(Module &F) {
|
2009-04-10 07:52:48 +04:00
|
|
|
using namespace SrcMgr;
|
2009-04-27 10:38:32 +04:00
|
|
|
|
2010-07-20 00:52:06 +04:00
|
|
|
llvm::BitstreamCursor &SLocEntryCursor = F.SLocEntryCursor;
|
2010-07-16 21:50:48 +04:00
|
|
|
|
2009-04-27 10:38:32 +04:00
|
|
|
// Set the source-location entry cursor to the current position in
|
|
|
|
// the stream. This cursor will be used to read the contents of the
|
|
|
|
// source manager block initially, and then lazily read
|
|
|
|
// source-location entries as needed.
|
2010-07-20 00:52:06 +04:00
|
|
|
SLocEntryCursor = F.Stream;
|
2009-04-27 10:38:32 +04:00
|
|
|
|
|
|
|
// The stream itself is going to skip over the source manager block.
|
2010-07-20 00:52:06 +04:00
|
|
|
if (F.Stream.SkipBlock()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2009-04-27 10:38:32 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enter the source manager block.
|
2010-08-19 03:57:32 +04:00
|
|
|
if (SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID)) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed source manager block record in AST file");
|
2009-04-11 03:10:45 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2009-04-10 07:52:48 +04:00
|
|
|
|
|
|
|
RecordData Record;
|
|
|
|
while (true) {
|
2009-04-27 10:38:32 +04:00
|
|
|
unsigned Code = SLocEntryCursor.ReadCode();
|
2009-04-10 07:52:48 +04:00
|
|
|
if (Code == llvm::bitc::END_BLOCK) {
|
2009-04-27 10:38:32 +04:00
|
|
|
if (SLocEntryCursor.ReadBlockEnd()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("error at end of Source Manager block in AST file");
|
2009-04-11 03:10:45 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
return Success;
|
2009-04-10 07:52:48 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-10 07:52:48 +04:00
|
|
|
if (Code == llvm::bitc::ENTER_SUBBLOCK) {
|
|
|
|
// No known subblocks, always skip them.
|
2009-04-27 10:38:32 +04:00
|
|
|
SLocEntryCursor.ReadSubBlockID();
|
|
|
|
if (SLocEntryCursor.SkipBlock()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2009-04-11 03:10:45 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2009-04-10 07:52:48 +04:00
|
|
|
continue;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-10 07:52:48 +04:00
|
|
|
if (Code == llvm::bitc::DEFINE_ABBREV) {
|
2009-04-27 10:38:32 +04:00
|
|
|
SLocEntryCursor.ReadAbbrevRecord();
|
2009-04-10 07:52:48 +04:00
|
|
|
continue;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-10 07:52:48 +04:00
|
|
|
// Read a record.
|
|
|
|
const char *BlobStart;
|
|
|
|
unsigned BlobLen;
|
|
|
|
Record.clear();
|
2009-04-27 10:38:32 +04:00
|
|
|
switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
|
2009-04-10 07:52:48 +04:00
|
|
|
default: // Default behavior: ignore.
|
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case SM_SLOC_FILE_ENTRY:
|
|
|
|
case SM_SLOC_BUFFER_ENTRY:
|
2011-07-15 11:25:21 +04:00
|
|
|
case SM_SLOC_EXPANSION_ENTRY:
|
2009-04-27 10:38:32 +04:00
|
|
|
// Once we hit one of the source location entries, we're done.
|
|
|
|
return Success;
|
2009-04-10 07:52:48 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-15 20:54:22 +03:00
|
|
|
/// \brief If a header file is not found at the path that we expect it to be
|
|
|
|
/// and the PCH file was moved from its original location, try to resolve the
|
|
|
|
/// file by assuming that header+PCH were moved together and the header is in
|
|
|
|
/// the same place relative to the PCH.
|
|
|
|
static std::string
|
|
|
|
resolveFileRelativeToOriginalDir(const std::string &Filename,
|
|
|
|
const std::string &OriginalDir,
|
|
|
|
const std::string &CurrDir) {
|
|
|
|
assert(OriginalDir != CurrDir &&
|
|
|
|
"No point trying to resolve the file if the PCH dir didn't change");
|
|
|
|
using namespace llvm::sys;
|
|
|
|
llvm::SmallString<128> filePath(Filename);
|
|
|
|
fs::make_absolute(filePath);
|
|
|
|
assert(path::is_absolute(OriginalDir));
|
|
|
|
llvm::SmallString<128> currPCHPath(CurrDir);
|
|
|
|
|
|
|
|
path::const_iterator fileDirI = path::begin(path::parent_path(filePath)),
|
|
|
|
fileDirE = path::end(path::parent_path(filePath));
|
|
|
|
path::const_iterator origDirI = path::begin(OriginalDir),
|
|
|
|
origDirE = path::end(OriginalDir);
|
|
|
|
// Skip the common path components from filePath and OriginalDir.
|
|
|
|
while (fileDirI != fileDirE && origDirI != origDirE &&
|
|
|
|
*fileDirI == *origDirI) {
|
|
|
|
++fileDirI;
|
|
|
|
++origDirI;
|
|
|
|
}
|
|
|
|
for (; origDirI != origDirE; ++origDirI)
|
|
|
|
path::append(currPCHPath, "..");
|
|
|
|
path::append(currPCHPath, fileDirI, fileDirE);
|
|
|
|
path::append(currPCHPath, path::filename(Filename));
|
|
|
|
return currPCHPath.str();
|
|
|
|
}
|
|
|
|
|
2009-04-27 10:38:32 +04:00
|
|
|
/// \brief Read in the source location entry with the given ID.
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
|
2009-04-27 10:38:32 +04:00
|
|
|
if (ID == 0)
|
|
|
|
return Success;
|
|
|
|
|
2011-07-21 22:46:38 +04:00
|
|
|
if (unsigned(-ID) - 2 >= getTotalNumSLocs() || ID > 0) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("source location entry ID out-of-range for AST file");
|
2009-04-27 10:38:32 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
Module *F = GlobalSLocEntryMap.find(-ID)->second;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
F->SLocEntryCursor.JumpToBit(F->SLocEntryOffsets[ID - F->SLocEntryBaseID]);
|
2010-10-05 19:59:54 +04:00
|
|
|
llvm::BitstreamCursor &SLocEntryCursor = F->SLocEntryCursor;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
unsigned BaseOffset = F->SLocEntryBaseOffset;
|
2010-07-16 21:50:48 +04:00
|
|
|
|
2009-04-27 10:38:32 +04:00
|
|
|
++NumSLocEntriesRead;
|
|
|
|
unsigned Code = SLocEntryCursor.ReadCode();
|
|
|
|
if (Code == llvm::bitc::END_BLOCK ||
|
|
|
|
Code == llvm::bitc::ENTER_SUBBLOCK ||
|
|
|
|
Code == llvm::bitc::DEFINE_ABBREV) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("incorrectly-formatted source location entry in AST file");
|
2009-04-27 10:38:32 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
RecordData Record;
|
|
|
|
const char *BlobStart;
|
|
|
|
unsigned BlobLen;
|
|
|
|
switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
|
|
|
|
default:
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("incorrectly-formatted source location entry in AST file");
|
2009-04-27 10:38:32 +04:00
|
|
|
return Failure;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case SM_SLOC_FILE_ENTRY: {
|
2009-07-07 04:12:59 +04:00
|
|
|
std::string Filename(BlobStart, BlobStart + BlobLen);
|
|
|
|
MaybeAddSystemRootToFilename(Filename);
|
2010-11-23 11:35:12 +03:00
|
|
|
const FileEntry *File = FileMgr.getFile(Filename);
|
2011-02-15 20:54:22 +03:00
|
|
|
if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() &&
|
|
|
|
OriginalDir != CurrentDir) {
|
|
|
|
std::string resolved = resolveFileRelativeToOriginalDir(Filename,
|
|
|
|
OriginalDir,
|
|
|
|
CurrentDir);
|
|
|
|
if (!resolved.empty())
|
|
|
|
File = FileMgr.getFile(resolved);
|
|
|
|
}
|
2011-01-27 13:55:51 +03:00
|
|
|
if (File == 0)
|
|
|
|
File = FileMgr.getVirtualFile(Filename, (off_t)Record[4],
|
|
|
|
(time_t)Record[5]);
|
2009-06-15 08:35:16 +04:00
|
|
|
if (File == 0) {
|
|
|
|
std::string ErrorStr = "could not find file '";
|
2009-07-07 04:12:59 +04:00
|
|
|
ErrorStr += Filename;
|
2010-08-19 03:57:06 +04:00
|
|
|
ErrorStr += "' referenced by AST file";
|
2009-06-15 08:35:16 +04:00
|
|
|
Error(ErrorStr.c_str());
|
|
|
|
return Failure;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
if (Record.size() < 6) {
|
2010-03-19 00:23:05 +03:00
|
|
|
Error("source location entry is incorrect");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
2010-07-27 04:27:13 +04:00
|
|
|
if (!DisableValidation &&
|
|
|
|
((off_t)Record[4] != File->getSize()
|
2010-04-09 19:54:22 +04:00
|
|
|
#if !defined(LLVM_ON_WIN32)
|
|
|
|
// In our regression testing, the Windows file system seems to
|
|
|
|
// have inconsistent modification times that sometimes
|
|
|
|
// erroneously trigger this error-handling path.
|
2010-07-27 04:27:13 +04:00
|
|
|
|| (time_t)Record[5] != File->getModificationTime()
|
2010-04-09 19:54:22 +04:00
|
|
|
#endif
|
2010-07-27 04:27:13 +04:00
|
|
|
)) {
|
2011-04-26 02:23:56 +04:00
|
|
|
Error(diag::err_fe_pch_file_modified, Filename);
|
2010-03-22 01:49:54 +03:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
|
2011-07-22 20:00:58 +04:00
|
|
|
if (IncludeLoc.isInvalid() && F->Kind != MK_MainFile) {
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
// This is the module's main file.
|
|
|
|
IncludeLoc = getImportLocation(F);
|
|
|
|
}
|
|
|
|
FileID FID = SourceMgr.createFileID(File, IncludeLoc,
|
2010-11-23 12:19:42 +03:00
|
|
|
(SrcMgr::CharacteristicKind)Record[2],
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
ID, BaseOffset + Record[0]);
|
2009-04-27 10:38:32 +04:00
|
|
|
if (Record[3])
|
|
|
|
const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile())
|
|
|
|
.setHasLineDirectives();
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
|
2009-04-27 10:38:32 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case SM_SLOC_BUFFER_ENTRY: {
|
2009-04-27 10:38:32 +04:00
|
|
|
const char *Name = BlobStart;
|
|
|
|
unsigned Offset = Record[0];
|
|
|
|
unsigned Code = SLocEntryCursor.ReadCode();
|
|
|
|
Record.clear();
|
2009-09-09 19:08:12 +04:00
|
|
|
unsigned RecCode
|
2009-04-27 10:38:32 +04:00
|
|
|
= SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen);
|
2010-03-18 03:56:54 +03:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
if (RecCode != SM_SLOC_BUFFER_BLOB) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("AST record has invalid code");
|
2010-03-18 03:56:54 +03:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
2009-04-27 10:38:32 +04:00
|
|
|
llvm::MemoryBuffer *Buffer
|
2011-07-23 14:55:15 +04:00
|
|
|
= llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1),
|
2010-04-06 02:42:27 +04:00
|
|
|
Name);
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
FileID BufferID = SourceMgr.createFileIDForMemBuffer(Buffer, ID,
|
|
|
|
BaseOffset + Offset);
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-29 00:33:11 +04:00
|
|
|
if (strcmp(Name, "<built-in>") == 0) {
|
2010-07-14 21:49:11 +04:00
|
|
|
PCHPredefinesBlock Block = {
|
|
|
|
BufferID,
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef(BlobStart, BlobLen - 1)
|
2010-07-14 21:49:11 +04:00
|
|
|
};
|
|
|
|
PCHPredefinesBuffers.push_back(Block);
|
2009-04-29 00:33:11 +04:00
|
|
|
}
|
2009-04-27 10:38:32 +04:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-07-15 11:25:21 +04:00
|
|
|
case SM_SLOC_EXPANSION_ENTRY: {
|
2010-10-05 19:59:54 +04:00
|
|
|
SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1]);
|
2011-07-26 07:03:05 +04:00
|
|
|
SourceMgr.createExpansionLoc(SpellingLoc,
|
2010-10-05 19:59:54 +04:00
|
|
|
ReadSourceLocation(*F, Record[2]),
|
|
|
|
ReadSourceLocation(*F, Record[3]),
|
2009-04-27 10:38:32 +04:00
|
|
|
Record[4],
|
|
|
|
ID,
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
BaseOffset + Record[0]);
|
2009-04-27 10:38:32 +04:00
|
|
|
break;
|
2009-09-09 19:08:12 +04:00
|
|
|
}
|
2009-04-27 10:38:32 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return Success;
|
|
|
|
}
|
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
/// \brief Find the location where the module F is imported.
|
2011-07-22 20:00:58 +04:00
|
|
|
SourceLocation ASTReader::getImportLocation(Module *F) {
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
if (F->ImportLoc.isValid())
|
|
|
|
return F->ImportLoc;
|
2011-07-29 22:09:09 +04:00
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
// Otherwise we have a PCH. It's considered to be "imported" at the first
|
|
|
|
// location of its includer.
|
2011-07-29 22:09:09 +04:00
|
|
|
if (F->ImportedBy.empty() || !F->ImportedBy[0]) {
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
// Main file is the importer. We assume that it is the first entry in the
|
|
|
|
// entry table. We can't ask the manager, because at the time of PCH loading
|
|
|
|
// the main file entry doesn't exist yet.
|
|
|
|
// The very first entry is the invalid instantiation loc, which takes up
|
|
|
|
// offsets 0 and 1.
|
|
|
|
return SourceLocation::getFromRawEncoding(2U);
|
|
|
|
}
|
2011-07-29 22:09:09 +04:00
|
|
|
//return F->Loaders[0]->FirstLoc;
|
|
|
|
return F->ImportedBy[0]->FirstLoc;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
}
|
|
|
|
|
2009-04-27 05:05:14 +04:00
|
|
|
/// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
|
|
|
|
/// specified cursor. Read the abbreviations that are at the top of the block
|
|
|
|
/// and then leave the cursor pointing into the block.
|
2010-08-19 03:56:43 +04:00
|
|
|
bool ASTReader::ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor,
|
2009-04-27 05:05:14 +04:00
|
|
|
unsigned BlockID) {
|
|
|
|
if (Cursor.EnterSubBlock(BlockID)) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2009-04-27 05:05:14 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-27 05:05:14 +04:00
|
|
|
while (true) {
|
2010-10-21 02:00:55 +04:00
|
|
|
uint64_t Offset = Cursor.GetCurrentBitNo();
|
2009-04-27 05:05:14 +04:00
|
|
|
unsigned Code = Cursor.ReadCode();
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2009-04-27 05:05:14 +04:00
|
|
|
// We expect all abbrevs to be at the start of the block.
|
2010-10-21 02:00:55 +04:00
|
|
|
if (Code != llvm::bitc::DEFINE_ABBREV) {
|
|
|
|
Cursor.JumpToBit(Offset);
|
2009-04-27 05:05:14 +04:00
|
|
|
return false;
|
2010-10-21 02:00:55 +04:00
|
|
|
}
|
2009-04-27 05:05:14 +04:00
|
|
|
Cursor.ReadAbbrevRecord();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-04 22:09:14 +04:00
|
|
|
void ASTReader::ReadMacroRecord(Module &F, uint64_t Offset) {
|
2009-06-19 04:03:23 +04:00
|
|
|
assert(PP && "Forgot to set Preprocessor ?");
|
2010-10-21 02:00:55 +04:00
|
|
|
llvm::BitstreamCursor &Stream = F.MacroCursor;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-22 03:56:24 +04:00
|
|
|
// Keep track of where we are in the stream, then jump back there
|
|
|
|
// after reading this macro.
|
|
|
|
SavedStreamPosition SavedPosition(Stream);
|
|
|
|
|
|
|
|
Stream.JumpToBit(Offset);
|
2009-04-11 01:41:48 +04:00
|
|
|
RecordData Record;
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<IdentifierInfo*, 16> MacroArgs;
|
2009-04-22 03:56:24 +04:00
|
|
|
MacroInfo *Macro = 0;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-11 01:41:48 +04:00
|
|
|
while (true) {
|
|
|
|
unsigned Code = Stream.ReadCode();
|
|
|
|
switch (Code) {
|
|
|
|
case llvm::bitc::END_BLOCK:
|
2011-08-04 22:09:14 +04:00
|
|
|
return;
|
2009-04-22 03:56:24 +04:00
|
|
|
|
2009-04-11 01:41:48 +04:00
|
|
|
case llvm::bitc::ENTER_SUBBLOCK:
|
|
|
|
// No known subblocks, always skip them.
|
|
|
|
Stream.ReadSubBlockID();
|
2009-04-22 03:56:24 +04:00
|
|
|
if (Stream.SkipBlock()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2011-08-04 22:09:14 +04:00
|
|
|
return;
|
2009-04-22 03:56:24 +04:00
|
|
|
}
|
2009-04-11 01:41:48 +04:00
|
|
|
continue;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-11 01:41:48 +04:00
|
|
|
case llvm::bitc::DEFINE_ABBREV:
|
|
|
|
Stream.ReadAbbrevRecord();
|
|
|
|
continue;
|
|
|
|
default: break;
|
|
|
|
}
|
2009-04-22 03:56:24 +04:00
|
|
|
|
2009-04-11 01:41:48 +04:00
|
|
|
// Read a record.
|
2010-10-21 02:00:55 +04:00
|
|
|
const char *BlobStart = 0;
|
|
|
|
unsigned BlobLen = 0;
|
2009-04-11 01:41:48 +04:00
|
|
|
Record.clear();
|
2010-08-19 03:57:32 +04:00
|
|
|
PreprocessorRecordTypes RecType =
|
2010-10-21 07:16:25 +04:00
|
|
|
(PreprocessorRecordTypes)Stream.ReadRecord(Code, Record, BlobStart,
|
2010-10-21 02:00:55 +04:00
|
|
|
BlobLen);
|
2009-04-11 01:41:48 +04:00
|
|
|
switch (RecType) {
|
2010-08-19 03:57:32 +04:00
|
|
|
case PP_MACRO_OBJECT_LIKE:
|
|
|
|
case PP_MACRO_FUNCTION_LIKE: {
|
2009-04-22 03:56:24 +04:00
|
|
|
// If we already have a macro, that means that we've hit the end
|
|
|
|
// of the definition of the macro we were looking for. We're
|
|
|
|
// done.
|
|
|
|
if (Macro)
|
2011-08-04 22:09:14 +04:00
|
|
|
return;
|
2009-04-22 03:56:24 +04:00
|
|
|
|
2011-07-29 00:55:49 +04:00
|
|
|
IdentifierInfo *II = getLocalIdentifier(F, Record[0]);
|
2009-04-22 03:56:24 +04:00
|
|
|
if (II == 0) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("macro must have a name in AST file");
|
2011-08-04 22:09:14 +04:00
|
|
|
return;
|
2009-04-22 03:56:24 +04:00
|
|
|
}
|
2010-10-05 19:59:54 +04:00
|
|
|
SourceLocation Loc = ReadSourceLocation(F, Record[1]);
|
2009-04-11 01:41:48 +04:00
|
|
|
bool isUsed = Record[2];
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
MacroInfo *MI = PP->AllocateMacroInfo(Loc);
|
2009-04-11 01:41:48 +04:00
|
|
|
MI->setIsUsed(isUsed);
|
2010-08-19 03:57:06 +04:00
|
|
|
MI->setIsFromAST();
|
2009-09-09 19:08:12 +04:00
|
|
|
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
unsigned NextIndex = 3;
|
2010-08-19 03:57:32 +04:00
|
|
|
if (RecType == PP_MACRO_FUNCTION_LIKE) {
|
2009-04-11 01:41:48 +04:00
|
|
|
// Decode function-like macro info.
|
|
|
|
bool isC99VarArgs = Record[3];
|
|
|
|
bool isGNUVarArgs = Record[4];
|
|
|
|
MacroArgs.clear();
|
|
|
|
unsigned NumArgs = Record[5];
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
NextIndex = 6 + NumArgs;
|
2009-04-11 01:41:48 +04:00
|
|
|
for (unsigned i = 0; i != NumArgs; ++i)
|
2011-07-29 00:55:49 +04:00
|
|
|
MacroArgs.push_back(getLocalIdentifier(F, Record[6+i]));
|
2009-04-11 01:41:48 +04:00
|
|
|
|
|
|
|
// Install function-like macro info.
|
|
|
|
MI->setIsFunctionLike();
|
|
|
|
if (isC99VarArgs) MI->setIsC99Varargs();
|
|
|
|
if (isGNUVarArgs) MI->setIsGNUVarargs();
|
2009-05-23 02:45:36 +04:00
|
|
|
MI->setArgumentList(MacroArgs.data(), MacroArgs.size(),
|
2009-06-19 04:03:23 +04:00
|
|
|
PP->getPreprocessorAllocator());
|
2009-04-11 01:41:48 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, install the macro.
|
2009-06-19 04:03:23 +04:00
|
|
|
PP->setMacroInfo(II, MI);
|
2009-04-11 01:41:48 +04:00
|
|
|
|
|
|
|
// Remember that we saw this macro last so that we add the tokens that
|
|
|
|
// form its body to it.
|
2009-04-22 03:56:24 +04:00
|
|
|
Macro = MI;
|
2010-10-21 07:16:25 +04:00
|
|
|
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
if (NextIndex + 1 == Record.size() && PP->getPreprocessingRecord()) {
|
|
|
|
// We have a macro definition. Load it now.
|
|
|
|
PP->getPreprocessingRecord()->RegisterMacroDefinition(Macro,
|
2011-07-29 02:16:57 +04:00
|
|
|
getLocalMacroDefinition(F, Record[NextIndex]));
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2009-04-22 03:56:24 +04:00
|
|
|
++NumMacrosRead;
|
2009-04-11 01:41:48 +04:00
|
|
|
break;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case PP_TOKEN: {
|
2009-04-22 03:56:24 +04:00
|
|
|
// If we see a TOKEN before a PP_MACRO_*, then the file is
|
|
|
|
// erroneous, just pretend we didn't see this.
|
|
|
|
if (Macro == 0) break;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-04-11 01:41:48 +04:00
|
|
|
Token Tok;
|
|
|
|
Tok.startToken();
|
2010-10-05 19:59:54 +04:00
|
|
|
Tok.setLocation(ReadSourceLocation(F, Record[0]));
|
2009-04-11 01:41:48 +04:00
|
|
|
Tok.setLength(Record[1]);
|
2011-07-29 00:55:49 +04:00
|
|
|
if (IdentifierInfo *II = getLocalIdentifier(F, Record[2]))
|
2009-04-12 01:15:38 +04:00
|
|
|
Tok.setIdentifierInfo(II);
|
2009-04-11 01:41:48 +04:00
|
|
|
Tok.setKind((tok::TokenKind)Record[3]);
|
|
|
|
Tok.setFlag((Token::TokenFlags)Record[4]);
|
2009-04-22 03:56:24 +04:00
|
|
|
Macro->AddTokenToBody(Tok);
|
2009-04-11 01:41:48 +04:00
|
|
|
break;
|
|
|
|
}
|
2011-02-09 00:58:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-04 22:09:14 +04:00
|
|
|
return;
|
2011-02-09 00:58:10 +03:00
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
PreprocessedEntity *ASTReader::LoadPreprocessedEntity(Module &F) {
|
2011-02-09 00:58:10 +03:00
|
|
|
assert(PP && "Forgot to set Preprocessor ?");
|
|
|
|
unsigned Code = F.PreprocessorDetailCursor.ReadCode();
|
|
|
|
switch (Code) {
|
|
|
|
case llvm::bitc::END_BLOCK:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case llvm::bitc::ENTER_SUBBLOCK:
|
|
|
|
Error("unexpected subblock record in preprocessor detail block");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case llvm::bitc::DEFINE_ABBREV:
|
|
|
|
Error("unexpected abbrevation record in preprocessor detail block");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
|
2011-02-09 00:58:10 +03:00
|
|
|
if (!PP->getPreprocessingRecord()) {
|
|
|
|
Error("no preprocessing record");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the record.
|
|
|
|
PreprocessingRecord &PPRec = *PP->getPreprocessingRecord();
|
|
|
|
const char *BlobStart = 0;
|
|
|
|
unsigned BlobLen = 0;
|
|
|
|
RecordData Record;
|
|
|
|
PreprocessorDetailRecordTypes RecType =
|
|
|
|
(PreprocessorDetailRecordTypes)F.PreprocessorDetailCursor.ReadRecord(
|
|
|
|
Code, Record, BlobStart, BlobLen);
|
|
|
|
switch (RecType) {
|
2011-07-15 11:25:21 +04:00
|
|
|
case PPD_MACRO_EXPANSION: {
|
2011-07-29 02:39:26 +04:00
|
|
|
PreprocessedEntityID GlobalID = getGlobalPreprocessedEntityID(F, Record[0]);
|
2011-08-04 21:06:18 +04:00
|
|
|
if (PreprocessedEntity *PE = PPRec.getLoadedPreprocessedEntity(GlobalID-1))
|
2011-02-09 00:58:10 +03:00
|
|
|
return PE;
|
|
|
|
|
2011-07-14 12:20:46 +04:00
|
|
|
MacroExpansion *ME =
|
2011-07-29 00:55:49 +04:00
|
|
|
new (PPRec) MacroExpansion(getLocalIdentifier(F, Record[3]),
|
2011-02-09 00:58:10 +03:00
|
|
|
SourceRange(ReadSourceLocation(F, Record[1]),
|
|
|
|
ReadSourceLocation(F, Record[2])),
|
2011-07-29 02:16:57 +04:00
|
|
|
getLocalMacroDefinition(F, Record[4]));
|
2011-08-04 21:06:18 +04:00
|
|
|
PPRec.setLoadedPreallocatedEntity(GlobalID - 1, ME);
|
2011-07-14 12:20:46 +04:00
|
|
|
return ME;
|
2011-02-09 00:58:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
case PPD_MACRO_DEFINITION: {
|
2011-07-29 02:39:26 +04:00
|
|
|
PreprocessedEntityID GlobalID = getGlobalPreprocessedEntityID(F, Record[0]);
|
2011-08-04 21:06:18 +04:00
|
|
|
if (PreprocessedEntity *PE = PPRec.getLoadedPreprocessedEntity(GlobalID-1))
|
2011-02-09 00:58:10 +03:00
|
|
|
return PE;
|
2011-07-29 02:39:26 +04:00
|
|
|
|
|
|
|
unsigned MacroDefID = getGlobalMacroDefinitionID(F, Record[1]);
|
|
|
|
if (MacroDefID > MacroDefinitionsLoaded.size()) {
|
2011-02-09 00:58:10 +03:00
|
|
|
Error("out-of-bounds macro definition record");
|
|
|
|
return 0;
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
}
|
2011-02-09 00:58:10 +03:00
|
|
|
|
|
|
|
// Decode the identifier info and then check again; if the macro is
|
|
|
|
// still defined and associated with the identifier,
|
2011-07-29 00:55:49 +04:00
|
|
|
IdentifierInfo *II = getLocalIdentifier(F, Record[4]);
|
2011-07-29 02:39:26 +04:00
|
|
|
if (!MacroDefinitionsLoaded[MacroDefID - 1]) {
|
2011-02-09 00:58:10 +03:00
|
|
|
MacroDefinition *MD
|
|
|
|
= new (PPRec) MacroDefinition(II,
|
|
|
|
ReadSourceLocation(F, Record[5]),
|
|
|
|
SourceRange(
|
|
|
|
ReadSourceLocation(F, Record[2]),
|
|
|
|
ReadSourceLocation(F, Record[3])));
|
|
|
|
|
2011-08-04 21:06:18 +04:00
|
|
|
PPRec.setLoadedPreallocatedEntity(GlobalID - 1, MD);
|
2011-07-29 02:39:26 +04:00
|
|
|
MacroDefinitionsLoaded[MacroDefID - 1] = MD;
|
2011-02-09 00:58:10 +03:00
|
|
|
|
|
|
|
if (DeserializationListener)
|
2011-07-29 02:39:26 +04:00
|
|
|
DeserializationListener->MacroDefinitionRead(MacroDefID, MD);
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
}
|
2011-02-09 00:58:10 +03:00
|
|
|
|
2011-07-29 02:39:26 +04:00
|
|
|
return MacroDefinitionsLoaded[MacroDefID - 1];
|
2011-02-09 00:58:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
case PPD_INCLUSION_DIRECTIVE: {
|
2011-07-29 02:39:26 +04:00
|
|
|
PreprocessedEntityID GlobalID = getGlobalPreprocessedEntityID(F, Record[0]);
|
2011-08-04 21:06:18 +04:00
|
|
|
if (PreprocessedEntity *PE = PPRec.getLoadedPreprocessedEntity(GlobalID-1))
|
2011-02-09 00:58:10 +03:00
|
|
|
return PE;
|
|
|
|
|
|
|
|
const char *FullFileNameStart = BlobStart + Record[3];
|
|
|
|
const FileEntry *File
|
2011-07-23 14:55:15 +04:00
|
|
|
= PP->getFileManager().getFile(StringRef(FullFileNameStart,
|
2011-07-29 02:39:26 +04:00
|
|
|
BlobLen - Record[3]));
|
2011-02-09 00:58:10 +03:00
|
|
|
|
|
|
|
// FIXME: Stable encoding
|
|
|
|
InclusionDirective::InclusionKind Kind
|
|
|
|
= static_cast<InclusionDirective::InclusionKind>(Record[5]);
|
|
|
|
InclusionDirective *ID
|
|
|
|
= new (PPRec) InclusionDirective(PPRec, Kind,
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef(BlobStart, Record[3]),
|
2011-02-09 00:58:10 +03:00
|
|
|
Record[4],
|
|
|
|
File,
|
2010-10-21 02:00:55 +04:00
|
|
|
SourceRange(ReadSourceLocation(F, Record[1]),
|
|
|
|
ReadSourceLocation(F, Record[2])));
|
2011-08-04 21:06:18 +04:00
|
|
|
PPRec.setLoadedPreallocatedEntity(GlobalID - 1, ID);
|
2011-02-09 00:58:10 +03:00
|
|
|
return ID;
|
|
|
|
}
|
2009-04-11 01:41:48 +04:00
|
|
|
}
|
2010-11-30 09:16:57 +03:00
|
|
|
|
2011-02-09 00:58:10 +03:00
|
|
|
Error("invalid offset in preprocessor detail block");
|
2010-11-30 09:16:57 +03:00
|
|
|
return 0;
|
2009-04-11 01:41:48 +04:00
|
|
|
}
|
|
|
|
|
2011-07-29 02:39:26 +04:00
|
|
|
PreprocessedEntityID
|
|
|
|
ASTReader::getGlobalPreprocessedEntityID(Module &M, unsigned LocalID) {
|
2011-08-04 22:56:47 +04:00
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::iterator
|
|
|
|
I = M.PreprocessedEntityRemap.find(LocalID - NUM_PREDEF_PP_ENTITY_IDS);
|
|
|
|
assert(I != M.PreprocessedEntityRemap.end()
|
|
|
|
&& "Invalid index into preprocessed entity index remap");
|
|
|
|
|
|
|
|
return LocalID + I->second;
|
2011-07-29 02:39:26 +04:00
|
|
|
}
|
|
|
|
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
namespace {
|
|
|
|
/// \brief Trait class used to search the on-disk hash table containing all of
|
|
|
|
/// the header search information.
|
|
|
|
///
|
|
|
|
/// The on-disk hash table contains a mapping from each header path to
|
|
|
|
/// information about that header (how many times it has been included, its
|
|
|
|
/// controlling macro, etc.). Note that we actually hash based on the
|
|
|
|
/// filename, and support "deep" comparisons of file names based on current
|
|
|
|
/// inode numbers, so that the search can cope with non-normalized path names
|
|
|
|
/// and symlinks.
|
|
|
|
class HeaderFileInfoTrait {
|
2011-07-29 00:55:49 +04:00
|
|
|
ASTReader &Reader;
|
|
|
|
Module &M;
|
2011-07-28 08:50:02 +04:00
|
|
|
HeaderSearch *HS;
|
|
|
|
const char *FrameworkStrings;
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
const char *SearchPath;
|
|
|
|
struct stat SearchPathStatBuf;
|
|
|
|
llvm::Optional<int> SearchPathStatResult;
|
|
|
|
|
|
|
|
int StatSimpleCache(const char *Path, struct stat *StatBuf) {
|
|
|
|
if (Path == SearchPath) {
|
|
|
|
if (!SearchPathStatResult)
|
|
|
|
SearchPathStatResult = stat(Path, &SearchPathStatBuf);
|
|
|
|
|
|
|
|
*StatBuf = SearchPathStatBuf;
|
|
|
|
return *SearchPathStatResult;
|
|
|
|
}
|
|
|
|
|
|
|
|
return stat(Path, StatBuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
typedef const char *external_key_type;
|
|
|
|
typedef const char *internal_key_type;
|
|
|
|
|
|
|
|
typedef HeaderFileInfo data_type;
|
|
|
|
|
2011-07-29 00:55:49 +04:00
|
|
|
HeaderFileInfoTrait(ASTReader &Reader, Module &M, HeaderSearch *HS,
|
2011-07-28 08:50:02 +04:00
|
|
|
const char *FrameworkStrings,
|
|
|
|
const char *SearchPath = 0)
|
2011-07-29 00:55:49 +04:00
|
|
|
: Reader(Reader), M(M), HS(HS), FrameworkStrings(FrameworkStrings),
|
|
|
|
SearchPath(SearchPath) { }
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
|
|
|
|
static unsigned ComputeHash(const char *path) {
|
|
|
|
return llvm::HashString(llvm::sys::path::filename(path));
|
|
|
|
}
|
|
|
|
|
|
|
|
static internal_key_type GetInternalKey(const char *path) { return path; }
|
|
|
|
|
|
|
|
bool EqualKey(internal_key_type a, internal_key_type b) {
|
|
|
|
if (strcmp(a, b) == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (llvm::sys::path::filename(a) != llvm::sys::path::filename(b))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The file names match, but the path names don't. stat() the files to
|
|
|
|
// see if they are the same.
|
|
|
|
struct stat StatBufA, StatBufB;
|
|
|
|
if (StatSimpleCache(a, &StatBufA) || StatSimpleCache(b, &StatBufB))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return StatBufA.st_ino == StatBufB.st_ino;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::pair<unsigned, unsigned>
|
|
|
|
ReadKeyDataLength(const unsigned char*& d) {
|
|
|
|
unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d);
|
|
|
|
unsigned DataLen = (unsigned) *d++;
|
|
|
|
return std::make_pair(KeyLen + 1, DataLen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static internal_key_type ReadKey(const unsigned char *d, unsigned) {
|
|
|
|
return (const char *)d;
|
|
|
|
}
|
|
|
|
|
2011-07-28 08:50:02 +04:00
|
|
|
data_type ReadData(const internal_key_type, const unsigned char *d,
|
|
|
|
unsigned DataLen) {
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
const unsigned char *End = d + DataLen;
|
|
|
|
using namespace clang::io;
|
|
|
|
HeaderFileInfo HFI;
|
|
|
|
unsigned Flags = *d++;
|
2011-07-28 08:50:02 +04:00
|
|
|
HFI.isImport = (Flags >> 5) & 0x01;
|
|
|
|
HFI.isPragmaOnce = (Flags >> 4) & 0x01;
|
|
|
|
HFI.DirInfo = (Flags >> 2) & 0x03;
|
|
|
|
HFI.Resolved = (Flags >> 1) & 0x01;
|
|
|
|
HFI.IndexHeaderMapHeader = Flags & 0x01;
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
HFI.NumIncludes = ReadUnalignedLE16(d);
|
2011-07-29 00:55:49 +04:00
|
|
|
HFI.ControllingMacroID = Reader.getGlobalDeclID(M, ReadUnalignedLE32(d));
|
2011-07-28 08:50:02 +04:00
|
|
|
if (unsigned FrameworkOffset = ReadUnalignedLE32(d)) {
|
|
|
|
// The framework offset is 1 greater than the actual offset,
|
|
|
|
// since 0 is used as an indicator for "no framework name".
|
|
|
|
StringRef FrameworkName(FrameworkStrings + FrameworkOffset - 1);
|
|
|
|
HFI.Framework = HS->getUniqueFrameworkName(FrameworkName);
|
|
|
|
}
|
|
|
|
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
assert(End == d && "Wrong data length in HeaderFileInfo deserialization");
|
|
|
|
(void)End;
|
2011-07-28 08:50:02 +04:00
|
|
|
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
// This HeaderFileInfo was externally loaded.
|
|
|
|
HFI.External = true;
|
|
|
|
return HFI;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief The on-disk hash table used for the global method pool.
|
|
|
|
typedef OnDiskChainedHashTable<HeaderFileInfoTrait>
|
|
|
|
HeaderFileInfoLookupTable;
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
void ASTReader::SetIdentifierIsMacro(IdentifierInfo *II, Module &F,
|
2011-07-29 01:16:51 +04:00
|
|
|
uint64_t LocalOffset) {
|
2010-10-30 04:23:06 +04:00
|
|
|
// Note that this identifier has a macro definition.
|
|
|
|
II->setHasMacroDefinition(true);
|
|
|
|
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
// Adjust the offset to a global offset.
|
2011-07-29 01:16:51 +04:00
|
|
|
UnreadMacroRecordOffsets[II] = F.GlobalBitOffset + LocalOffset;
|
2010-10-30 04:23:06 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::ReadDefinedMacros() {
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleReverseIterator I = ModuleMgr.rbegin(),
|
|
|
|
E = ModuleMgr.rend(); I != E; ++I) {
|
|
|
|
llvm::BitstreamCursor &MacroCursor = (*I)->MacroCursor;
|
2010-03-07 22:10:13 +03:00
|
|
|
|
2010-07-22 02:31:37 +04:00
|
|
|
// If there was no preprocessor block, skip this file.
|
|
|
|
if (!MacroCursor.getBitStreamReader())
|
|
|
|
continue;
|
2010-03-07 22:10:13 +03:00
|
|
|
|
2010-07-22 02:31:37 +04:00
|
|
|
llvm::BitstreamCursor Cursor = MacroCursor;
|
2011-07-26 00:32:21 +04:00
|
|
|
Cursor.JumpToBit((*I)->MacroStartOffset);
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-07-22 02:31:37 +04:00
|
|
|
RecordData Record;
|
|
|
|
while (true) {
|
|
|
|
unsigned Code = Cursor.ReadCode();
|
2010-10-21 02:00:55 +04:00
|
|
|
if (Code == llvm::bitc::END_BLOCK)
|
2010-07-22 02:31:37 +04:00
|
|
|
break;
|
2010-03-07 22:10:13 +03:00
|
|
|
|
2010-07-22 02:31:37 +04:00
|
|
|
if (Code == llvm::bitc::ENTER_SUBBLOCK) {
|
|
|
|
// No known subblocks, always skip them.
|
|
|
|
Cursor.ReadSubBlockID();
|
|
|
|
if (Cursor.SkipBlock()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2010-07-22 02:31:37 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2010-03-07 22:10:13 +03:00
|
|
|
|
2010-07-22 02:31:37 +04:00
|
|
|
if (Code == llvm::bitc::DEFINE_ABBREV) {
|
|
|
|
Cursor.ReadAbbrevRecord();
|
|
|
|
continue;
|
|
|
|
}
|
2010-03-07 22:10:13 +03:00
|
|
|
|
2010-07-22 02:31:37 +04:00
|
|
|
// Read a record.
|
|
|
|
const char *BlobStart;
|
|
|
|
unsigned BlobLen;
|
|
|
|
Record.clear();
|
|
|
|
switch (Cursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
|
|
|
|
default: // Default behavior: ignore.
|
|
|
|
break;
|
2010-01-04 22:18:44 +03:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case PP_MACRO_OBJECT_LIKE:
|
|
|
|
case PP_MACRO_FUNCTION_LIKE:
|
2011-07-29 00:55:49 +04:00
|
|
|
getLocalIdentifier(**I, Record[0]);
|
2010-07-22 02:31:37 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case PP_TOKEN:
|
2010-07-22 02:31:37 +04:00
|
|
|
// Ignore tokens.
|
|
|
|
break;
|
|
|
|
}
|
2010-01-04 22:18:44 +03:00
|
|
|
}
|
|
|
|
}
|
2010-10-30 04:23:06 +04:00
|
|
|
|
|
|
|
// Drain the unread macro-record offsets map.
|
|
|
|
while (!UnreadMacroRecordOffsets.empty())
|
|
|
|
LoadMacroDefinition(UnreadMacroRecordOffsets.begin());
|
|
|
|
}
|
|
|
|
|
|
|
|
void ASTReader::LoadMacroDefinition(
|
|
|
|
llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos) {
|
|
|
|
assert(Pos != UnreadMacroRecordOffsets.end() && "Unknown macro definition");
|
|
|
|
uint64_t Offset = Pos->second;
|
|
|
|
UnreadMacroRecordOffsets.erase(Pos);
|
|
|
|
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
RecordLocation Loc = getLocalBitOffset(Offset);
|
|
|
|
ReadMacroRecord(*Loc.F, Loc.Offset);
|
2010-10-30 04:23:06 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void ASTReader::LoadMacroDefinition(IdentifierInfo *II) {
|
|
|
|
llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos
|
|
|
|
= UnreadMacroRecordOffsets.find(II);
|
|
|
|
LoadMacroDefinition(Pos);
|
2010-01-04 22:18:44 +03:00
|
|
|
}
|
|
|
|
|
2010-09-15 23:54:06 +04:00
|
|
|
MacroDefinition *ASTReader::getMacroDefinition(MacroID ID) {
|
2010-10-02 23:29:26 +04:00
|
|
|
if (ID == 0 || ID > MacroDefinitionsLoaded.size())
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
return 0;
|
2010-07-22 02:31:37 +04:00
|
|
|
|
2010-10-02 23:29:26 +04:00
|
|
|
if (!MacroDefinitionsLoaded[ID - 1]) {
|
2011-07-20 05:29:15 +04:00
|
|
|
GlobalMacroDefinitionMapType::iterator I =GlobalMacroDefinitionMap.find(ID);
|
|
|
|
assert(I != GlobalMacroDefinitionMap.end() &&
|
|
|
|
"Corrupted global macro definition map");
|
2011-07-29 04:56:45 +04:00
|
|
|
Module &M = *I->second;
|
|
|
|
unsigned Index = ID - 1 - M.BaseMacroDefinitionID;
|
|
|
|
SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
|
|
|
|
M.PreprocessorDetailCursor.JumpToBit(M.MacroDefinitionOffsets[Index]);
|
|
|
|
LoadPreprocessedEntity(M);
|
2010-07-22 02:31:37 +04:00
|
|
|
}
|
|
|
|
|
2010-10-02 23:29:26 +04:00
|
|
|
return MacroDefinitionsLoaded[ID - 1];
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
}
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) {
|
2011-06-01 09:43:53 +04:00
|
|
|
std::string Filename = filenameStrRef;
|
|
|
|
MaybeAddSystemRootToFilename(Filename);
|
|
|
|
const FileEntry *File = FileMgr.getFile(Filename);
|
|
|
|
if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() &&
|
|
|
|
OriginalDir != CurrentDir) {
|
|
|
|
std::string resolved = resolveFileRelativeToOriginalDir(Filename,
|
|
|
|
OriginalDir,
|
|
|
|
CurrentDir);
|
|
|
|
if (!resolved.empty())
|
|
|
|
File = FileMgr.getFile(resolved);
|
|
|
|
}
|
|
|
|
|
|
|
|
return File;
|
|
|
|
}
|
|
|
|
|
2011-07-29 02:16:57 +04:00
|
|
|
MacroID ASTReader::getGlobalMacroDefinitionID(Module &M, unsigned LocalID) {
|
2011-08-04 20:36:56 +04:00
|
|
|
if (LocalID < NUM_PREDEF_MACRO_IDS)
|
|
|
|
return LocalID;
|
|
|
|
|
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::iterator I
|
|
|
|
= M.MacroDefinitionRemap.find(LocalID - NUM_PREDEF_MACRO_IDS);
|
|
|
|
assert(I != M.MacroDefinitionRemap.end() &&
|
|
|
|
"Invalid index into macro definition ID remap");
|
|
|
|
|
|
|
|
return LocalID + I->second;
|
2011-07-29 02:16:57 +04:00
|
|
|
}
|
|
|
|
|
2009-07-07 04:12:59 +04:00
|
|
|
/// \brief If we are loading a relocatable PCH file, and the filename is
|
|
|
|
/// not an absolute path, add the system root to the beginning of the file
|
|
|
|
/// name.
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::MaybeAddSystemRootToFilename(std::string &Filename) {
|
2009-07-07 04:12:59 +04:00
|
|
|
// If this is not a relocatable PCH file, there's nothing to do.
|
|
|
|
if (!RelocatablePCH)
|
|
|
|
return;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-12-18 00:22:22 +03:00
|
|
|
if (Filename.empty() || llvm::sys::path::is_absolute(Filename))
|
2009-07-07 04:12:59 +04:00
|
|
|
return;
|
|
|
|
|
2011-07-22 20:35:34 +04:00
|
|
|
if (isysroot.empty()) {
|
2009-07-07 04:12:59 +04:00
|
|
|
// If no system root was given, default to '/'
|
|
|
|
Filename.insert(Filename.begin(), '/');
|
|
|
|
return;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2011-07-22 20:35:34 +04:00
|
|
|
unsigned Length = isysroot.size();
|
2009-07-07 04:12:59 +04:00
|
|
|
if (isysroot[Length - 1] != '/')
|
|
|
|
Filename.insert(Filename.begin(), '/');
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2011-07-22 20:35:34 +04:00
|
|
|
Filename.insert(Filename.begin(), isysroot.begin(), isysroot.end());
|
2009-07-07 04:12:59 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader::ASTReadResult
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadASTBlock(Module &F) {
|
2010-07-16 21:50:48 +04:00
|
|
|
llvm::BitstreamCursor &Stream = F.Stream;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2009-04-11 00:39:37 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
// Read all of the records and blocks for the ASt file.
|
2009-04-10 02:27:44 +04:00
|
|
|
RecordData Record;
|
2010-07-20 00:52:06 +04:00
|
|
|
bool First = true;
|
2009-04-10 21:25:41 +04:00
|
|
|
while (!Stream.AtEndOfStream()) {
|
2009-04-10 02:27:44 +04:00
|
|
|
unsigned Code = Stream.ReadCode();
|
|
|
|
if (Code == llvm::bitc::END_BLOCK) {
|
2009-04-11 00:39:37 +04:00
|
|
|
if (Stream.ReadBlockEnd()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("error at end of module block in AST file");
|
2009-04-11 00:39:37 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2009-04-12 01:15:38 +04:00
|
|
|
|
2009-04-11 00:39:37 +04:00
|
|
|
return Success;
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
2009-04-10 21:25:41 +04:00
|
|
|
|
2009-04-10 02:27:44 +04:00
|
|
|
if (Code == llvm::bitc::ENTER_SUBBLOCK) {
|
2009-04-10 21:25:41 +04:00
|
|
|
switch (Stream.ReadSubBlockID()) {
|
2010-08-19 03:57:32 +04:00
|
|
|
case DECLTYPES_BLOCK_ID:
|
2009-04-27 05:05:14 +04:00
|
|
|
// We lazily load the decls block, but we want to set up the
|
|
|
|
// DeclsCursor cursor to point into it. Clone our current bitcode
|
|
|
|
// cursor to it, enter the block and read the abbrevs in that block.
|
|
|
|
// With the main cursor, we just skip over it.
|
2010-07-16 21:50:48 +04:00
|
|
|
F.DeclsCursor = Stream;
|
2009-04-27 05:05:14 +04:00
|
|
|
if (Stream.SkipBlock() || // Skip with the main cursor.
|
|
|
|
// Read the abbrevs.
|
2010-08-19 03:57:32 +04:00
|
|
|
ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2009-04-27 05:05:14 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
break;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-10-24 21:26:36 +04:00
|
|
|
case DECL_UPDATES_BLOCK_ID:
|
|
|
|
if (Stream.SkipBlock()) {
|
|
|
|
Error("malformed block record in AST file");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case PREPROCESSOR_BLOCK_ID:
|
2010-07-16 21:50:48 +04:00
|
|
|
F.MacroCursor = Stream;
|
2010-01-04 22:18:44 +03:00
|
|
|
if (PP)
|
|
|
|
PP->setExternalSource(this);
|
|
|
|
|
2010-10-21 02:00:55 +04:00
|
|
|
if (Stream.SkipBlock() ||
|
|
|
|
ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2009-04-12 01:15:38 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2010-10-21 02:00:55 +04:00
|
|
|
F.MacroStartOffset = F.MacroCursor.GetCurrentBitNo();
|
2009-04-12 01:15:38 +04:00
|
|
|
break;
|
2009-04-23 14:39:46 +04:00
|
|
|
|
2011-02-09 00:58:10 +03:00
|
|
|
case PREPROCESSOR_DETAIL_BLOCK_ID:
|
|
|
|
F.PreprocessorDetailCursor = Stream;
|
|
|
|
if (Stream.SkipBlock() ||
|
|
|
|
ReadBlockAbbrevs(F.PreprocessorDetailCursor,
|
|
|
|
PREPROCESSOR_DETAIL_BLOCK_ID)) {
|
|
|
|
Error("malformed preprocessor detail record in AST file");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
F.PreprocessorDetailStartOffset
|
|
|
|
= F.PreprocessorDetailCursor.GetCurrentBitNo();
|
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case SOURCE_MANAGER_BLOCK_ID:
|
2010-07-20 00:52:06 +04:00
|
|
|
switch (ReadSourceManagerBlock(F)) {
|
2009-04-11 03:10:45 +04:00
|
|
|
case Success:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Failure:
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed source manager block in AST file");
|
2009-04-11 00:39:37 +04:00
|
|
|
return Failure;
|
2009-04-11 03:10:45 +04:00
|
|
|
|
|
|
|
case IgnorePCH:
|
|
|
|
return IgnorePCH;
|
2009-04-11 00:39:37 +04:00
|
|
|
}
|
2009-04-10 21:25:41 +04:00
|
|
|
break;
|
|
|
|
}
|
2010-07-20 00:52:06 +04:00
|
|
|
First = false;
|
2009-04-10 02:27:44 +04:00
|
|
|
continue;
|
|
|
|
}
|
2009-04-10 21:25:41 +04:00
|
|
|
|
2009-04-10 02:27:44 +04:00
|
|
|
if (Code == llvm::bitc::DEFINE_ABBREV) {
|
|
|
|
Stream.ReadAbbrevRecord();
|
|
|
|
continue;
|
|
|
|
}
|
2009-04-10 21:25:41 +04:00
|
|
|
|
|
|
|
// Read and process a record.
|
2009-04-10 02:27:44 +04:00
|
|
|
Record.clear();
|
2009-04-11 01:16:55 +04:00
|
|
|
const char *BlobStart = 0;
|
|
|
|
unsigned BlobLen = 0;
|
2010-08-19 03:57:32 +04:00
|
|
|
switch ((ASTRecordTypes)Stream.ReadRecord(Code, Record,
|
2010-10-05 19:59:54 +04:00
|
|
|
&BlobStart, &BlobLen)) {
|
2009-04-10 02:27:44 +04:00
|
|
|
default: // Default behavior: ignore.
|
|
|
|
break;
|
2009-04-10 21:25:41 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case METADATA: {
|
|
|
|
if (Record[0] != VERSION_MAJOR && !DisableValidation) {
|
|
|
|
Diag(Record[0] < VERSION_MAJOR? diag::warn_pch_version_too_old
|
2010-07-17 04:12:06 +04:00
|
|
|
: diag::warn_pch_version_too_new);
|
|
|
|
return IgnorePCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
RelocatablePCH = Record[4];
|
|
|
|
if (Listener) {
|
|
|
|
std::string TargetTriple(BlobStart, BlobLen);
|
|
|
|
if (Listener->ReadTargetTriple(TargetTriple))
|
|
|
|
return IgnorePCH;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case CHAINED_METADATA: {
|
2010-07-20 00:52:06 +04:00
|
|
|
if (!First) {
|
|
|
|
Error("CHAINED_METADATA is not first record in block");
|
|
|
|
return Failure;
|
|
|
|
}
|
2010-08-19 03:57:32 +04:00
|
|
|
if (Record[0] != VERSION_MAJOR && !DisableValidation) {
|
|
|
|
Diag(Record[0] < VERSION_MAJOR? diag::warn_pch_version_too_old
|
2010-07-17 04:12:06 +04:00
|
|
|
: diag::warn_pch_version_too_new);
|
|
|
|
return IgnorePCH;
|
|
|
|
}
|
|
|
|
|
2010-10-05 20:15:19 +04:00
|
|
|
// Load the chained file, which is always a PCH file.
|
2011-07-22 20:00:58 +04:00
|
|
|
// FIXME: This could end up being a module.
|
2011-07-23 14:55:15 +04:00
|
|
|
switch(ReadASTCore(StringRef(BlobStart, BlobLen), MK_PCH)) {
|
2010-07-17 04:12:06 +04:00
|
|
|
case Failure: return Failure;
|
|
|
|
// If we have to ignore the dependency, we'll have to ignore this too.
|
|
|
|
case IgnorePCH: return IgnorePCH;
|
|
|
|
case Success: break;
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
}
|
2010-07-17 04:12:06 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-08-02 20:26:37 +04:00
|
|
|
case TYPE_OFFSET: {
|
2010-07-20 02:06:55 +04:00
|
|
|
if (F.LocalNumTypes != 0) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("duplicate TYPE_OFFSET record in AST file");
|
2009-04-11 00:39:37 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2010-07-20 02:06:55 +04:00
|
|
|
F.TypeOffsets = (const uint32_t *)BlobStart;
|
|
|
|
F.LocalNumTypes = Record[0];
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
unsigned LocalBaseTypeIndex = Record[1];
|
|
|
|
F.BaseTypeIndex = getTotalNumTypes();
|
2011-07-29 04:21:44 +04:00
|
|
|
|
2011-08-02 20:26:37 +04:00
|
|
|
if (F.LocalNumTypes > 0) {
|
|
|
|
// Introduce the global -> local mapping for types within this module.
|
|
|
|
GlobalTypeMap.insert(std::make_pair(getTotalNumTypes(), &F));
|
|
|
|
|
|
|
|
// Introduce the local -> global mapping for types within this module.
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
F.TypeRemap.insert(std::make_pair(LocalBaseTypeIndex,
|
|
|
|
F.BaseTypeIndex - LocalBaseTypeIndex));
|
2011-08-02 20:26:37 +04:00
|
|
|
|
|
|
|
TypesLoaded.resize(TypesLoaded.size() + F.LocalNumTypes);
|
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
break;
|
2011-08-02 20:26:37 +04:00
|
|
|
}
|
|
|
|
|
2011-08-03 19:48:04 +04:00
|
|
|
case DECL_OFFSET: {
|
2010-07-20 02:06:55 +04:00
|
|
|
if (F.LocalNumDecls != 0) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("duplicate DECL_OFFSET record in AST file");
|
2009-04-11 00:39:37 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2010-07-20 02:06:55 +04:00
|
|
|
F.DeclOffsets = (const uint32_t *)BlobStart;
|
|
|
|
F.LocalNumDecls = Record[0];
|
2011-08-03 19:48:04 +04:00
|
|
|
unsigned LocalBaseDeclID = Record[1];
|
2011-07-29 04:56:45 +04:00
|
|
|
F.BaseDeclID = getTotalNumDecls();
|
2011-07-20 04:27:43 +04:00
|
|
|
|
2011-08-03 19:48:04 +04:00
|
|
|
if (F.LocalNumDecls > 0) {
|
|
|
|
// Introduce the global -> local mapping for declarations within this
|
|
|
|
// module.
|
2011-08-12 04:15:20 +04:00
|
|
|
GlobalDeclMap.insert(
|
|
|
|
std::make_pair(getTotalNumDecls() + NUM_PREDEF_DECL_IDS, &F));
|
2011-08-03 19:48:04 +04:00
|
|
|
|
|
|
|
// Introduce the local -> global mapping for declarations within this
|
|
|
|
// module.
|
|
|
|
F.DeclRemap.insert(std::make_pair(LocalBaseDeclID,
|
|
|
|
F.BaseDeclID - LocalBaseDeclID));
|
|
|
|
|
|
|
|
DeclsLoaded.resize(DeclsLoaded.size() + F.LocalNumDecls);
|
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
break;
|
2011-08-03 19:48:04 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TU_UPDATE_LEXICAL: {
|
2010-07-27 22:24:41 +04:00
|
|
|
DeclContextInfo Info = {
|
2011-07-22 03:29:11 +04:00
|
|
|
&F,
|
2010-08-20 20:04:35 +04:00
|
|
|
/* No visible information */ 0,
|
2010-10-15 00:14:34 +04:00
|
|
|
reinterpret_cast<const KindDeclIDPair *>(BlobStart),
|
2011-07-28 18:20:37 +04:00
|
|
|
static_cast<unsigned int>(BlobLen / sizeof(KindDeclIDPair))
|
2010-07-27 22:24:41 +04:00
|
|
|
};
|
2011-08-12 04:15:20 +04:00
|
|
|
|
|
|
|
DeclContext *TU = Context ? Context->getTranslationUnitDecl() : 0;
|
|
|
|
DeclContextOffsets[TU].push_back(Info);
|
|
|
|
if (TU)
|
|
|
|
TU->setHasExternalLexicalStorage(true);
|
|
|
|
|
2010-07-27 22:24:41 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-24 04:50:04 +04:00
|
|
|
case UPDATE_VISIBLE: {
|
2011-08-03 19:48:04 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
serialization::DeclID ID = ReadDeclID(F, Record, Idx);
|
2010-08-24 04:50:04 +04:00
|
|
|
void *Table = ASTDeclContextNameLookupTable::Create(
|
2011-08-03 19:48:04 +04:00
|
|
|
(const unsigned char *)BlobStart + Record[Idx++],
|
2010-08-24 04:50:04 +04:00
|
|
|
(const unsigned char *)BlobStart,
|
2011-07-22 04:38:23 +04:00
|
|
|
ASTDeclContextNameLookupTrait(*this, F));
|
2011-08-03 19:48:04 +04:00
|
|
|
// FIXME: Complete hack to check for the TU
|
2011-08-12 04:15:20 +04:00
|
|
|
if (ID == PREDEF_DECL_TRANSLATION_UNIT_ID && Context) { // Is it the TU?
|
2010-08-24 04:50:04 +04:00
|
|
|
DeclContextInfo Info = {
|
2011-08-03 19:48:04 +04:00
|
|
|
&F, Table, /* No lexical information */ 0, 0
|
2010-08-24 04:50:04 +04:00
|
|
|
};
|
2011-08-12 04:15:20 +04:00
|
|
|
|
|
|
|
DeclContext *TU = Context->getTranslationUnitDecl();
|
|
|
|
DeclContextOffsets[TU].push_back(Info);
|
|
|
|
TU->setHasExternalVisibleStorage(true);
|
2010-08-24 04:50:04 +04:00
|
|
|
} else
|
2011-08-03 19:48:04 +04:00
|
|
|
PendingVisibleUpdates[ID].push_back(std::make_pair(Table, &F));
|
2010-08-24 04:50:04 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case REDECLS_UPDATE_LATEST: {
|
2010-08-03 21:30:10 +04:00
|
|
|
assert(Record.size() % 2 == 0 && "Expected pairs of DeclIDs");
|
2011-08-03 19:48:04 +04:00
|
|
|
for (unsigned i = 0, e = Record.size(); i < e; /* in loop */) {
|
|
|
|
DeclID First = ReadDeclID(F, Record, i);
|
|
|
|
DeclID Latest = ReadDeclID(F, Record, i);
|
2010-08-03 21:30:10 +04:00
|
|
|
FirstLatestDeclIDs[First] = Latest;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case LANGUAGE_OPTIONS:
|
2010-07-27 04:27:13 +04:00
|
|
|
if (ParseLanguageOptions(Record) && !DisableValidation)
|
2009-04-11 00:39:37 +04:00
|
|
|
return IgnorePCH;
|
|
|
|
break;
|
2009-04-11 01:16:55 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case IDENTIFIER_TABLE:
|
2010-07-20 00:52:06 +04:00
|
|
|
F.IdentifierTableData = BlobStart;
|
2009-04-25 23:10:14 +04:00
|
|
|
if (Record[0]) {
|
2010-07-20 00:52:06 +04:00
|
|
|
F.IdentifierLookupTable
|
2010-08-19 03:57:06 +04:00
|
|
|
= ASTIdentifierLookupTable::Create(
|
2010-07-20 00:52:06 +04:00
|
|
|
(const unsigned char *)F.IdentifierTableData + Record[0],
|
|
|
|
(const unsigned char *)F.IdentifierTableData,
|
2010-10-05 19:59:54 +04:00
|
|
|
ASTIdentifierLookupTrait(*this, F));
|
2011-07-21 22:46:38 +04:00
|
|
|
if (PP) {
|
2009-06-19 04:03:23 +04:00
|
|
|
PP->getIdentifierTable().setExternalIdentifierLookup(this);
|
2011-07-21 22:46:38 +04:00
|
|
|
PP->getHeaderSearchInfo().SetExternalLookup(this);
|
|
|
|
}
|
2009-04-25 23:10:14 +04:00
|
|
|
}
|
2009-04-11 04:14:32 +04:00
|
|
|
break;
|
|
|
|
|
2011-08-04 01:49:18 +04:00
|
|
|
case IDENTIFIER_OFFSET: {
|
2010-07-20 02:28:42 +04:00
|
|
|
if (F.LocalNumIdentifiers != 0) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("duplicate IDENTIFIER_OFFSET record in AST file");
|
2009-04-11 04:14:32 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
2010-07-20 02:28:42 +04:00
|
|
|
F.IdentifierOffsets = (const uint32_t *)BlobStart;
|
|
|
|
F.LocalNumIdentifiers = Record[0];
|
2011-08-04 01:49:18 +04:00
|
|
|
unsigned LocalBaseIdentifierID = Record[1];
|
2011-07-29 04:56:45 +04:00
|
|
|
F.BaseIdentifierID = getTotalNumIdentifiers();
|
2011-07-20 04:59:32 +04:00
|
|
|
|
2011-08-04 01:49:18 +04:00
|
|
|
if (F.LocalNumIdentifiers > 0) {
|
|
|
|
// Introduce the global -> local mapping for identifiers within this
|
|
|
|
// module.
|
|
|
|
GlobalIdentifierMap.insert(std::make_pair(getTotalNumIdentifiers() + 1,
|
|
|
|
&F));
|
|
|
|
|
|
|
|
// Introduce the local -> global mapping for identifiers within this
|
|
|
|
// module.
|
|
|
|
F.IdentifierRemap.insert(
|
|
|
|
std::make_pair(LocalBaseIdentifierID,
|
|
|
|
F.BaseIdentifierID - LocalBaseIdentifierID));
|
|
|
|
|
|
|
|
IdentifiersLoaded.resize(IdentifiersLoaded.size()
|
|
|
|
+ F.LocalNumIdentifiers);
|
|
|
|
}
|
2009-04-11 04:14:32 +04:00
|
|
|
break;
|
2011-08-04 01:49:18 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case EXTERNAL_DEFINITIONS:
|
2011-07-22 02:35:25 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
ExternalDefinitions.push_back(getGlobalDeclID(F, Record[I]));
|
2009-04-14 04:24:19 +04:00
|
|
|
break;
|
2009-04-18 02:13:46 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case SPECIAL_TYPES:
|
2011-07-22 04:38:23 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
SpecialTypes.push_back(getGlobalTypeID(F, Record[I]));
|
2009-04-18 09:55:16 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case STATISTICS:
|
2010-07-21 01:20:32 +04:00
|
|
|
TotalNumStatements += Record[0];
|
|
|
|
TotalNumMacros += Record[1];
|
|
|
|
TotalLexicalDeclContexts += Record[2];
|
|
|
|
TotalVisibleDeclContexts += Record[3];
|
2009-04-18 02:13:46 +04:00
|
|
|
break;
|
2009-04-27 10:38:32 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case UNUSED_FILESCOPED_DECLS:
|
2011-07-22 02:35:25 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
UnusedFileScopedDecls.push_back(getGlobalDeclID(F, Record[I]));
|
2010-02-12 03:07:30 +03:00
|
|
|
break;
|
2010-03-07 22:10:13 +03:00
|
|
|
|
2011-05-05 03:29:54 +04:00
|
|
|
case DELEGATING_CTORS:
|
2011-07-22 02:35:25 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
DelegatingCtorDecls.push_back(getGlobalDeclID(F, Record[I]));
|
2011-05-05 03:29:54 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case WEAK_UNDECLARED_IDENTIFIERS:
|
2011-07-28 22:09:57 +04:00
|
|
|
if (Record.size() % 4 != 0) {
|
|
|
|
Error("invalid weak identifiers record");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Ignore weak undeclared identifiers from non-original PCH
|
|
|
|
// files. This isn't the way to do it :)
|
|
|
|
WeakUndeclaredIdentifiers.clear();
|
|
|
|
|
|
|
|
// Translate the weak, undeclared identifiers into global IDs.
|
|
|
|
for (unsigned I = 0, N = Record.size(); I < N; /* in loop */) {
|
|
|
|
WeakUndeclaredIdentifiers.push_back(
|
|
|
|
getGlobalIdentifierID(F, Record[I++]));
|
|
|
|
WeakUndeclaredIdentifiers.push_back(
|
|
|
|
getGlobalIdentifierID(F, Record[I++]));
|
|
|
|
WeakUndeclaredIdentifiers.push_back(
|
|
|
|
ReadSourceLocation(F, Record, I).getRawEncoding());
|
|
|
|
WeakUndeclaredIdentifiers.push_back(Record[I++]);
|
|
|
|
}
|
2010-08-05 13:48:08 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case LOCALLY_SCOPED_EXTERNAL_DECLS:
|
2011-07-22 02:35:25 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
LocallyScopedExternalDecls.push_back(getGlobalDeclID(F, Record[I]));
|
2009-04-23 02:18:58 +04:00
|
|
|
break;
|
2009-04-25 01:10:55 +04:00
|
|
|
|
2011-08-04 03:28:44 +04:00
|
|
|
case SELECTOR_OFFSETS: {
|
2010-08-04 01:58:15 +04:00
|
|
|
F.SelectorOffsets = (const uint32_t *)BlobStart;
|
2010-08-05 00:40:17 +04:00
|
|
|
F.LocalNumSelectors = Record[0];
|
2011-08-04 03:28:44 +04:00
|
|
|
unsigned LocalBaseSelectorID = Record[1];
|
2011-07-29 04:56:45 +04:00
|
|
|
F.BaseSelectorID = getTotalNumSelectors();
|
2011-07-20 05:10:58 +04:00
|
|
|
|
2011-08-04 03:28:44 +04:00
|
|
|
if (F.LocalNumSelectors > 0) {
|
|
|
|
// Introduce the global -> local mapping for selectors within this
|
|
|
|
// module.
|
|
|
|
GlobalSelectorMap.insert(std::make_pair(getTotalNumSelectors()+1, &F));
|
|
|
|
|
|
|
|
// Introduce the local -> global mapping for selectors within this
|
|
|
|
// module.
|
|
|
|
F.SelectorRemap.insert(std::make_pair(LocalBaseSelectorID,
|
|
|
|
F.BaseSelectorID - LocalBaseSelectorID));
|
2009-04-25 21:48:32 +04:00
|
|
|
|
2011-08-04 03:28:44 +04:00
|
|
|
SelectorsLoaded.resize(SelectorsLoaded.size() + F.LocalNumSelectors);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case METHOD_POOL:
|
2010-08-05 00:40:17 +04:00
|
|
|
F.SelectorLookupTableData = (const unsigned char *)BlobStart;
|
2009-04-25 21:48:32 +04:00
|
|
|
if (Record[0])
|
2010-08-05 00:40:17 +04:00
|
|
|
F.SelectorLookupTable
|
2010-08-19 03:57:06 +04:00
|
|
|
= ASTSelectorLookupTable::Create(
|
2010-08-05 00:40:17 +04:00
|
|
|
F.SelectorLookupTableData + Record[0],
|
|
|
|
F.SelectorLookupTableData,
|
2011-07-22 02:35:25 +04:00
|
|
|
ASTSelectorLookupTrait(*this, F));
|
2010-08-05 01:22:45 +04:00
|
|
|
TotalNumMethodPoolEntries += Record[1];
|
2009-04-25 01:10:55 +04:00
|
|
|
break;
|
2009-04-26 04:07:37 +04:00
|
|
|
|
2010-09-22 04:42:30 +04:00
|
|
|
case REFERENCED_SELECTOR_POOL:
|
2011-07-28 18:41:43 +04:00
|
|
|
if (!Record.empty()) {
|
|
|
|
for (unsigned Idx = 0, N = Record.size() - 1; Idx < N; /* in loop */) {
|
|
|
|
ReferencedSelectorsData.push_back(getGlobalSelectorID(F,
|
|
|
|
Record[Idx++]));
|
|
|
|
ReferencedSelectorsData.push_back(ReadSourceLocation(F, Record, Idx).
|
|
|
|
getRawEncoding());
|
|
|
|
}
|
|
|
|
}
|
2010-07-23 23:11:11 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case PP_COUNTER_VALUE:
|
2009-06-19 04:03:23 +04:00
|
|
|
if (!Record.empty() && Listener)
|
|
|
|
Listener->ReadCounter(Record[0]);
|
2009-04-26 04:07:37 +04:00
|
|
|
break;
|
2009-04-27 10:38:32 +04:00
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
case SOURCE_LOCATION_OFFSETS: {
|
|
|
|
F.SLocEntryOffsets = (const uint32_t *)BlobStart;
|
2010-07-21 01:20:32 +04:00
|
|
|
F.LocalNumSLocEntries = Record[0];
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
llvm::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
|
|
|
|
SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries, Record[1]);
|
|
|
|
// Make our entry in the range map. BaseID is negative and growing, so
|
|
|
|
// we invert it. Because we invert it, though, we need the other end of
|
|
|
|
// the range.
|
|
|
|
unsigned RangeStart =
|
|
|
|
unsigned(-F.SLocEntryBaseID) - F.LocalNumSLocEntries + 1;
|
|
|
|
GlobalSLocEntryMap.insert(std::make_pair(RangeStart, &F));
|
|
|
|
F.FirstLoc = SourceLocation::getFromRawEncoding(F.SLocEntryBaseOffset);
|
|
|
|
|
|
|
|
// Initialize the remapping table.
|
|
|
|
// Invalid stays invalid.
|
|
|
|
F.SLocRemap.insert(std::make_pair(0U, 0));
|
|
|
|
// This module. Base was 2 when being compiled.
|
|
|
|
F.SLocRemap.insert(std::make_pair(2U,
|
|
|
|
static_cast<int>(F.SLocEntryBaseOffset - 2)));
|
2011-07-21 22:46:38 +04:00
|
|
|
|
|
|
|
TotalNumSLocEntries += F.LocalNumSLocEntries;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-08-01 20:01:55 +04:00
|
|
|
case MODULE_OFFSET_MAP: {
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
// Additional remapping information.
|
|
|
|
const unsigned char *Data = (const unsigned char*)BlobStart;
|
|
|
|
const unsigned char *DataEnd = Data + BlobLen;
|
2011-08-02 14:56:51 +04:00
|
|
|
|
|
|
|
// Continuous range maps we may be updating in our module.
|
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::Builder SLocRemap(F.SLocRemap);
|
2011-08-04 01:49:18 +04:00
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::Builder
|
|
|
|
IdentifierRemap(F.IdentifierRemap);
|
2011-08-04 22:56:47 +04:00
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::Builder
|
|
|
|
PreprocessedEntityRemap(F.PreprocessedEntityRemap);
|
2011-08-04 20:36:56 +04:00
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::Builder
|
|
|
|
MacroDefinitionRemap(F.MacroDefinitionRemap);
|
2011-08-04 03:28:44 +04:00
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::Builder
|
|
|
|
SelectorRemap(F.SelectorRemap);
|
2011-08-03 19:48:04 +04:00
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::Builder DeclRemap(F.DeclRemap);
|
2011-08-02 20:26:37 +04:00
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::Builder TypeRemap(F.TypeRemap);
|
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
while(Data < DataEnd) {
|
|
|
|
uint16_t Len = io::ReadUnalignedLE16(Data);
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef Name = StringRef((const char*)Data, Len);
|
2011-08-02 14:56:51 +04:00
|
|
|
Data += Len;
|
2011-07-26 22:21:30 +04:00
|
|
|
Module *OM = ModuleMgr.lookup(Name);
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
if (!OM) {
|
|
|
|
Error("SourceLocation remap refers to unknown module");
|
|
|
|
return Failure;
|
|
|
|
}
|
2011-08-02 14:56:51 +04:00
|
|
|
|
|
|
|
uint32_t SLocOffset = io::ReadUnalignedLE32(Data);
|
|
|
|
uint32_t IdentifierIDOffset = io::ReadUnalignedLE32(Data);
|
|
|
|
uint32_t PreprocessedEntityIDOffset = io::ReadUnalignedLE32(Data);
|
|
|
|
uint32_t MacroDefinitionIDOffset = io::ReadUnalignedLE32(Data);
|
|
|
|
uint32_t SelectorIDOffset = io::ReadUnalignedLE32(Data);
|
|
|
|
uint32_t DeclIDOffset = io::ReadUnalignedLE32(Data);
|
2011-08-02 20:26:37 +04:00
|
|
|
uint32_t TypeIndexOffset = io::ReadUnalignedLE32(Data);
|
2011-08-02 14:56:51 +04:00
|
|
|
|
|
|
|
// Source location offset is mapped to OM->SLocEntryBaseOffset.
|
|
|
|
SLocRemap.insert(std::make_pair(SLocOffset,
|
|
|
|
static_cast<int>(OM->SLocEntryBaseOffset - SLocOffset)));
|
2011-08-04 01:49:18 +04:00
|
|
|
IdentifierRemap.insert(
|
|
|
|
std::make_pair(IdentifierIDOffset,
|
|
|
|
OM->BaseIdentifierID - IdentifierIDOffset));
|
2011-08-04 22:56:47 +04:00
|
|
|
PreprocessedEntityRemap.insert(
|
|
|
|
std::make_pair(PreprocessedEntityIDOffset,
|
|
|
|
OM->BasePreprocessedEntityID - PreprocessedEntityIDOffset));
|
2011-08-04 20:36:56 +04:00
|
|
|
MacroDefinitionRemap.insert(
|
|
|
|
std::make_pair(MacroDefinitionIDOffset,
|
|
|
|
OM->BaseMacroDefinitionID - MacroDefinitionIDOffset));
|
2011-08-04 03:28:44 +04:00
|
|
|
SelectorRemap.insert(std::make_pair(SelectorIDOffset,
|
|
|
|
OM->BaseSelectorID - SelectorIDOffset));
|
2011-08-03 19:48:04 +04:00
|
|
|
DeclRemap.insert(std::make_pair(DeclIDOffset,
|
|
|
|
OM->BaseDeclID - DeclIDOffset));
|
|
|
|
|
2011-08-02 20:26:37 +04:00
|
|
|
TypeRemap.insert(std::make_pair(TypeIndexOffset,
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
OM->BaseTypeIndex - TypeIndexOffset));
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case SOURCE_MANAGER_LINE_TABLE:
|
|
|
|
if (ParseLineTable(F, Record))
|
|
|
|
return Failure;
|
2009-04-27 10:38:32 +04:00
|
|
|
break;
|
|
|
|
|
2011-06-03 00:01:46 +04:00
|
|
|
case FILE_SOURCE_LOCATION_OFFSETS:
|
|
|
|
F.SLocFileOffsets = (const uint32_t *)BlobStart;
|
|
|
|
F.LocalNumSLocFileEntries = Record[0];
|
|
|
|
break;
|
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
case SOURCE_LOCATION_PRELOADS: {
|
|
|
|
// Need to transform from the local view (1-based IDs) to the global view,
|
|
|
|
// which is based off F.SLocEntryBaseID.
|
|
|
|
PreloadSLocEntries.reserve(PreloadSLocEntries.size() + Record.size());
|
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
PreloadSLocEntries.push_back(int(Record[I] - 1) + F.SLocEntryBaseID);
|
2009-04-27 10:38:32 +04:00
|
|
|
break;
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
}
|
2009-04-27 22:38:38 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case STAT_CACHE: {
|
2011-02-05 22:42:43 +03:00
|
|
|
if (!DisableStatCache) {
|
|
|
|
ASTStatCache *MyStatCache =
|
|
|
|
new ASTStatCache((const unsigned char *)BlobStart + Record[0],
|
|
|
|
(const unsigned char *)BlobStart,
|
|
|
|
NumStatHits, NumStatMisses);
|
|
|
|
FileMgr.addStatCache(MyStatCache);
|
|
|
|
F.StatCache = MyStatCache;
|
|
|
|
}
|
2009-04-27 22:38:38 +04:00
|
|
|
break;
|
2009-10-16 22:18:30 +04:00
|
|
|
}
|
2010-03-07 22:10:13 +03:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case EXT_VECTOR_DECLS:
|
2011-07-22 02:35:25 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
ExtVectorDecls.push_back(getGlobalDeclID(F, Record[I]));
|
2009-04-28 00:06:05 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case VTABLE_USES:
|
2011-07-28 23:11:31 +04:00
|
|
|
if (Record.size() % 3 != 0) {
|
|
|
|
Error("Invalid VTABLE_USES record");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
2010-08-05 22:21:25 +04:00
|
|
|
// Later tables overwrite earlier ones.
|
2011-07-28 23:11:31 +04:00
|
|
|
// FIXME: Modules will have some trouble with this. This is clearly not
|
|
|
|
// the right way to do this.
|
2011-07-22 02:35:25 +04:00
|
|
|
VTableUses.clear();
|
2011-07-28 23:11:31 +04:00
|
|
|
|
|
|
|
for (unsigned Idx = 0, N = Record.size(); Idx != N; /* In loop */) {
|
|
|
|
VTableUses.push_back(getGlobalDeclID(F, Record[Idx++]));
|
|
|
|
VTableUses.push_back(
|
|
|
|
ReadSourceLocation(F, Record, Idx).getRawEncoding());
|
|
|
|
VTableUses.push_back(Record[Idx++]);
|
|
|
|
}
|
2010-07-06 19:37:04 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case DYNAMIC_CLASSES:
|
2011-07-22 02:35:25 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
DynamicClasses.push_back(getGlobalDeclID(F, Record[I]));
|
2010-07-06 19:37:04 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case PENDING_IMPLICIT_INSTANTIATIONS:
|
2011-07-28 23:26:52 +04:00
|
|
|
if (PendingInstantiations.size() % 2 != 0) {
|
|
|
|
Error("Invalid PENDING_IMPLICIT_INSTANTIATIONS block");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Later lists of pending instantiations overwrite earlier ones.
|
|
|
|
// FIXME: This is most certainly wrong for modules.
|
|
|
|
PendingInstantiations.clear();
|
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
|
|
|
|
PendingInstantiations.push_back(getGlobalDeclID(F, Record[I++]));
|
|
|
|
PendingInstantiations.push_back(
|
|
|
|
ReadSourceLocation(F, Record, I).getRawEncoding());
|
|
|
|
}
|
2010-08-05 13:48:16 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case SEMA_DECL_REFS:
|
2010-08-05 22:21:25 +04:00
|
|
|
// Later tables overwrite earlier ones.
|
2011-07-22 02:35:25 +04:00
|
|
|
// FIXME: Modules will have some trouble with this.
|
|
|
|
SemaDeclRefs.clear();
|
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
SemaDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
|
2010-08-02 11:14:54 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case ORIGINAL_FILE_NAME:
|
2010-08-19 03:57:06 +04:00
|
|
|
// The primary AST will be the last to get here, so it will be the one
|
2010-07-21 01:20:32 +04:00
|
|
|
// that's used.
|
2009-11-11 08:29:04 +03:00
|
|
|
ActualOriginalFileName.assign(BlobStart, BlobLen);
|
|
|
|
OriginalFileName = ActualOriginalFileName;
|
2009-07-07 04:12:59 +04:00
|
|
|
MaybeAddSystemRootToFilename(OriginalFileName);
|
2009-05-12 05:31:05 +04:00
|
|
|
break;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2011-05-07 01:43:30 +04:00
|
|
|
case ORIGINAL_FILE_ID:
|
|
|
|
OriginalFileID = FileID::get(Record[0]);
|
|
|
|
break;
|
|
|
|
|
2011-02-15 20:54:22 +03:00
|
|
|
case ORIGINAL_PCH_DIR:
|
|
|
|
// The primary AST will be the last to get here, so it will be the one
|
|
|
|
// that's used.
|
|
|
|
OriginalDir.assign(BlobStart, BlobLen);
|
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case VERSION_CONTROL_BRANCH_REVISION: {
|
2010-02-13 02:31:14 +03:00
|
|
|
const std::string &CurBranch = getClangFullRepositoryVersion();
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef ASTBranch(BlobStart, BlobLen);
|
|
|
|
if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Diag(diag::warn_pch_different_branch) << ASTBranch << CurBranch;
|
2009-10-06 01:07:28 +04:00
|
|
|
return IgnorePCH;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2010-07-22 00:07:32 +04:00
|
|
|
|
2011-07-21 04:47:40 +04:00
|
|
|
case MACRO_DEFINITION_OFFSETS: {
|
2010-07-22 00:07:32 +04:00
|
|
|
F.MacroDefinitionOffsets = (const uint32_t *)BlobStart;
|
|
|
|
F.NumPreallocatedPreprocessingEntities = Record[0];
|
2011-08-04 22:56:47 +04:00
|
|
|
unsigned LocalBasePreprocessedEntityID = Record[1];
|
|
|
|
F.LocalNumMacroDefinitions = Record[2];
|
|
|
|
unsigned LocalBaseMacroID = Record[3];
|
2011-08-04 20:36:56 +04:00
|
|
|
|
2011-07-21 04:47:40 +04:00
|
|
|
unsigned StartingID;
|
|
|
|
if (PP) {
|
|
|
|
if (!PP->getPreprocessingRecord())
|
|
|
|
PP->createPreprocessingRecord(true);
|
|
|
|
if (!PP->getPreprocessingRecord()->getExternalSource())
|
|
|
|
PP->getPreprocessingRecord()->SetExternalSource(*this);
|
|
|
|
StartingID
|
|
|
|
= PP->getPreprocessingRecord()
|
|
|
|
->allocateLoadedEntities(F.NumPreallocatedPreprocessingEntities);
|
|
|
|
} else {
|
|
|
|
// FIXME: We'll eventually want to kill this path, since it assumes
|
|
|
|
// a particular allocation strategy in the preprocessing record.
|
2011-08-04 22:56:47 +04:00
|
|
|
StartingID = getTotalNumPreprocessedEntities()
|
|
|
|
- F.NumPreallocatedPreprocessingEntities;
|
2011-07-21 04:47:40 +04:00
|
|
|
}
|
2011-07-29 04:56:45 +04:00
|
|
|
F.BaseMacroDefinitionID = getTotalNumMacroDefinitions();
|
|
|
|
F.BasePreprocessedEntityID = StartingID;
|
2011-07-21 04:47:40 +04:00
|
|
|
|
2011-08-04 22:56:47 +04:00
|
|
|
if (F.NumPreallocatedPreprocessingEntities > 0) {
|
|
|
|
// Introduce the global -> local mapping for preprocessed entities in
|
|
|
|
// this module.
|
|
|
|
GlobalPreprocessedEntityMap.insert(std::make_pair(StartingID, &F));
|
|
|
|
|
|
|
|
// Introduce the local -> global mapping for preprocessed entities in
|
|
|
|
// this module.
|
|
|
|
F.PreprocessedEntityRemap.insert(
|
|
|
|
std::make_pair(LocalBasePreprocessedEntityID,
|
|
|
|
F.BasePreprocessedEntityID - LocalBasePreprocessedEntityID));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-08-04 20:36:56 +04:00
|
|
|
if (F.LocalNumMacroDefinitions > 0) {
|
|
|
|
// Introduce the global -> local mapping for macro definitions within
|
|
|
|
// this module.
|
|
|
|
GlobalMacroDefinitionMap.insert(
|
|
|
|
std::make_pair(getTotalNumMacroDefinitions() + 1, &F));
|
|
|
|
|
|
|
|
// Introduce the local -> global mapping for macro definitions within
|
|
|
|
// this module.
|
|
|
|
F.MacroDefinitionRemap.insert(
|
|
|
|
std::make_pair(LocalBaseMacroID,
|
|
|
|
F.BaseMacroDefinitionID - LocalBaseMacroID));
|
|
|
|
|
|
|
|
MacroDefinitionsLoaded.resize(
|
2011-07-20 05:29:15 +04:00
|
|
|
MacroDefinitionsLoaded.size() + F.LocalNumMacroDefinitions);
|
2011-08-04 20:36:56 +04:00
|
|
|
}
|
|
|
|
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
break;
|
2011-07-21 04:47:40 +04:00
|
|
|
}
|
|
|
|
|
2010-10-24 21:26:36 +04:00
|
|
|
case DECL_UPDATE_OFFSETS: {
|
|
|
|
if (Record.size() % 2 != 0) {
|
|
|
|
Error("invalid DECL_UPDATE_OFFSETS block in AST file");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; I += 2)
|
2011-08-03 19:48:04 +04:00
|
|
|
DeclUpdateOffsets[getGlobalDeclID(F, Record[I])]
|
|
|
|
.push_back(std::make_pair(&F, Record[I+1]));
|
2010-10-24 21:26:36 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case DECL_REPLACEMENTS: {
|
2010-08-13 04:28:03 +04:00
|
|
|
if (Record.size() % 2 != 0) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("invalid DECL_REPLACEMENTS block in AST file");
|
2010-08-13 04:28:03 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; I += 2)
|
2011-08-03 19:48:04 +04:00
|
|
|
ReplacedDecls[getGlobalDeclID(F, Record[I])]
|
|
|
|
= std::make_pair(&F, Record[I+1]);
|
2010-08-13 04:28:03 +04:00
|
|
|
break;
|
|
|
|
}
|
2010-10-30 02:39:52 +04:00
|
|
|
|
|
|
|
case CXX_BASE_SPECIFIER_OFFSETS: {
|
|
|
|
if (F.LocalNumCXXBaseSpecifiers != 0) {
|
|
|
|
Error("duplicate CXX_BASE_SPECIFIER_OFFSETS record in AST file");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
F.LocalNumCXXBaseSpecifiers = Record[0];
|
|
|
|
F.CXXBaseSpecifiersOffsets = (const uint32_t *)BlobStart;
|
2011-07-22 01:15:19 +04:00
|
|
|
NumCXXBaseSpecifiersLoaded += F.LocalNumCXXBaseSpecifiers;
|
2010-10-30 02:39:52 +04:00
|
|
|
break;
|
|
|
|
}
|
2010-11-06 01:10:18 +03:00
|
|
|
|
2011-01-14 23:54:07 +03:00
|
|
|
case DIAG_PRAGMA_MAPPINGS:
|
2010-11-06 01:10:18 +03:00
|
|
|
if (Record.size() % 2 != 0) {
|
|
|
|
Error("invalid DIAG_USER_MAPPINGS block in AST file");
|
|
|
|
return Failure;
|
|
|
|
}
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
|
|
|
|
if (F.PragmaDiagMappings.empty())
|
|
|
|
F.PragmaDiagMappings.swap(Record);
|
2010-11-06 01:10:18 +03:00
|
|
|
else
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
F.PragmaDiagMappings.insert(F.PragmaDiagMappings.end(),
|
|
|
|
Record.begin(), Record.end());
|
2010-11-06 01:10:18 +03:00
|
|
|
break;
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
|
2011-02-10 00:04:32 +03:00
|
|
|
case CUDA_SPECIAL_DECL_REFS:
|
|
|
|
// Later tables overwrite earlier ones.
|
2011-07-22 02:35:25 +04:00
|
|
|
// FIXME: Modules will have trouble with this.
|
|
|
|
CUDASpecialDeclRefs.clear();
|
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
CUDASpecialDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
|
2011-02-10 00:04:32 +03:00
|
|
|
break;
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
|
2011-07-28 08:50:02 +04:00
|
|
|
case HEADER_SEARCH_TABLE: {
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
F.HeaderFileInfoTableData = BlobStart;
|
|
|
|
F.LocalNumHeaderFileInfos = Record[1];
|
2011-07-28 08:50:02 +04:00
|
|
|
F.HeaderFileFrameworkStrings = BlobStart + Record[2];
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
if (Record[0]) {
|
|
|
|
F.HeaderFileInfoTable
|
|
|
|
= HeaderFileInfoLookupTable::Create(
|
|
|
|
(const unsigned char *)F.HeaderFileInfoTableData + Record[0],
|
2011-07-28 08:50:02 +04:00
|
|
|
(const unsigned char *)F.HeaderFileInfoTableData,
|
2011-07-29 00:55:49 +04:00
|
|
|
HeaderFileInfoTrait(*this, F,
|
|
|
|
PP? &PP->getHeaderSearchInfo() : 0,
|
2011-07-28 08:50:02 +04:00
|
|
|
BlobStart + Record[2]));
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
if (PP)
|
|
|
|
PP->getHeaderSearchInfo().SetExternalSource(this);
|
|
|
|
}
|
|
|
|
break;
|
2011-07-28 08:50:02 +04:00
|
|
|
}
|
|
|
|
|
2011-02-15 22:46:30 +03:00
|
|
|
case FP_PRAGMA_OPTIONS:
|
|
|
|
// Later tables overwrite earlier ones.
|
|
|
|
FPPragmaOptions.swap(Record);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OPENCL_EXTENSIONS:
|
|
|
|
// Later tables overwrite earlier ones.
|
|
|
|
OpenCLExtensions.swap(Record);
|
|
|
|
break;
|
2011-05-05 03:29:54 +04:00
|
|
|
|
|
|
|
case TENTATIVE_DEFINITIONS:
|
2011-07-22 02:35:25 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
TentativeDefinitions.push_back(getGlobalDeclID(F, Record[I]));
|
2011-05-05 03:29:54 +04:00
|
|
|
break;
|
2011-06-28 20:20:02 +04:00
|
|
|
|
|
|
|
case KNOWN_NAMESPACES:
|
2011-07-22 02:35:25 +04:00
|
|
|
for (unsigned I = 0, N = Record.size(); I != N; ++I)
|
|
|
|
KnownNamespaces.push_back(getGlobalDeclID(F, Record[I]));
|
2011-06-28 20:20:02 +04:00
|
|
|
break;
|
2009-04-11 04:14:32 +04:00
|
|
|
}
|
2010-07-20 00:52:06 +04:00
|
|
|
First = false;
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("premature end of bitstream in AST file");
|
2009-04-11 00:39:37 +04:00
|
|
|
return Failure;
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2011-06-01 09:43:53 +04:00
|
|
|
ASTReader::ASTReadResult ASTReader::validateFileEntries() {
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleIterator I = ModuleMgr.begin(),
|
|
|
|
E = ModuleMgr.end(); I != E; ++I) {
|
|
|
|
Module *F = *I;
|
2011-06-01 09:43:53 +04:00
|
|
|
llvm::BitstreamCursor &SLocEntryCursor = F->SLocEntryCursor;
|
|
|
|
|
2011-06-03 00:01:46 +04:00
|
|
|
for (unsigned i = 0, e = F->LocalNumSLocFileEntries; i != e; ++i) {
|
|
|
|
SLocEntryCursor.JumpToBit(F->SLocFileOffsets[i]);
|
2011-06-01 09:43:53 +04:00
|
|
|
unsigned Code = SLocEntryCursor.ReadCode();
|
|
|
|
if (Code == llvm::bitc::END_BLOCK ||
|
|
|
|
Code == llvm::bitc::ENTER_SUBBLOCK ||
|
|
|
|
Code == llvm::bitc::DEFINE_ABBREV) {
|
|
|
|
Error("incorrectly-formatted source location entry in AST file");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
RecordData Record;
|
|
|
|
const char *BlobStart;
|
|
|
|
unsigned BlobLen;
|
|
|
|
switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
|
|
|
|
default:
|
|
|
|
Error("incorrectly-formatted source location entry in AST file");
|
|
|
|
return Failure;
|
|
|
|
|
|
|
|
case SM_SLOC_FILE_ENTRY: {
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef Filename(BlobStart, BlobLen);
|
2011-06-01 09:43:53 +04:00
|
|
|
const FileEntry *File = getFileEntry(Filename);
|
|
|
|
|
|
|
|
if (File == 0) {
|
|
|
|
std::string ErrorStr = "could not find file '";
|
|
|
|
ErrorStr += Filename;
|
|
|
|
ErrorStr += "' referenced by AST file";
|
|
|
|
Error(ErrorStr.c_str());
|
|
|
|
return IgnorePCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Record.size() < 6) {
|
|
|
|
Error("source location entry is incorrect");
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The stat info from the FileEntry came from the cached stat
|
|
|
|
// info of the PCH, so we cannot trust it.
|
|
|
|
struct stat StatBuf;
|
|
|
|
if (::stat(File->getName(), &StatBuf) != 0) {
|
|
|
|
StatBuf.st_size = File->getSize();
|
|
|
|
StatBuf.st_mtime = File->getModificationTime();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (((off_t)Record[4] != StatBuf.st_size
|
|
|
|
#if !defined(LLVM_ON_WIN32)
|
|
|
|
// In our regression testing, the Windows file system seems to
|
|
|
|
// have inconsistent modification times that sometimes
|
|
|
|
// erroneously trigger this error-handling path.
|
|
|
|
|| (time_t)Record[5] != StatBuf.st_mtime
|
|
|
|
#endif
|
|
|
|
)) {
|
|
|
|
Error(diag::err_fe_pch_file_modified, Filename);
|
|
|
|
return IgnorePCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Success;
|
|
|
|
}
|
|
|
|
|
2010-10-05 20:15:19 +04:00
|
|
|
ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
|
2011-07-22 20:00:58 +04:00
|
|
|
ModuleKind Type) {
|
2010-10-05 20:15:19 +04:00
|
|
|
switch(ReadASTCore(FileName, Type)) {
|
2010-07-17 00:41:52 +04:00
|
|
|
case Failure: return Failure;
|
|
|
|
case IgnorePCH: return IgnorePCH;
|
|
|
|
case Success: break;
|
2009-04-11 03:10:45 +04:00
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2010-07-17 04:12:06 +04:00
|
|
|
// Here comes stuff that we only do once the entire chain is loaded.
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2011-06-01 09:43:53 +04:00
|
|
|
if (!DisableValidation) {
|
|
|
|
switch(validateFileEntries()) {
|
|
|
|
case Failure: return Failure;
|
|
|
|
case IgnorePCH: return IgnorePCH;
|
|
|
|
case Success: break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-22 04:42:30 +04:00
|
|
|
// Preload SLocEntries.
|
|
|
|
for (unsigned I = 0, N = PreloadSLocEntries.size(); I != N; ++I) {
|
|
|
|
ASTReadResult Result = ReadSLocEntryRecord(PreloadSLocEntries[I]);
|
|
|
|
if (Result != Success)
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
return Failure;
|
2010-09-22 04:42:30 +04:00
|
|
|
}
|
2011-07-21 22:46:38 +04:00
|
|
|
PreloadSLocEntries.clear();
|
|
|
|
|
2010-07-17 00:41:52 +04:00
|
|
|
// Check the predefines buffers.
|
2011-07-27 20:30:06 +04:00
|
|
|
if (!DisableValidation && Type != MK_Module && CheckPredefinesBuffers())
|
2009-04-29 00:33:11 +04:00
|
|
|
return IgnorePCH;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
if (PP) {
|
2009-07-18 13:26:51 +04:00
|
|
|
// Initialization of keywords and pragmas occurs before the
|
2010-08-19 03:57:06 +04:00
|
|
|
// AST file is read, so there may be some identifiers that were
|
2009-06-19 04:03:23 +04:00
|
|
|
// loaded into the IdentifierTable before we intercepted the
|
|
|
|
// creation of identifiers. Iterate through the list of known
|
|
|
|
// identifiers and determine whether we have to establish
|
|
|
|
// preprocessor definitions or top-level identifier declaration
|
|
|
|
// chains for those identifiers.
|
|
|
|
//
|
|
|
|
// We copy the IdentifierInfo pointers to a small vector first,
|
|
|
|
// since de-serializing declarations or macro definitions can add
|
|
|
|
// new entries into the identifier table, invalidating the
|
|
|
|
// iterators.
|
2011-07-27 20:30:06 +04:00
|
|
|
//
|
|
|
|
// FIXME: We need a lazier way to load this information, e.g., by marking
|
|
|
|
// the identifier data as 'dirty', so that it will be looked up in the
|
|
|
|
// AST file(s) if it is uttered in the source. This could save us some
|
|
|
|
// module load time.
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<IdentifierInfo *, 128> Identifiers;
|
2009-06-19 04:03:23 +04:00
|
|
|
for (IdentifierTable::iterator Id = PP->getIdentifierTable().begin(),
|
|
|
|
IdEnd = PP->getIdentifierTable().end();
|
|
|
|
Id != IdEnd; ++Id)
|
|
|
|
Identifiers.push_back(Id->second);
|
2010-07-22 00:07:32 +04:00
|
|
|
// We need to search the tables in all files.
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleIterator J = ModuleMgr.begin(),
|
|
|
|
M = ModuleMgr.end(); J != M; ++J) {
|
2010-08-19 03:57:06 +04:00
|
|
|
ASTIdentifierLookupTable *IdTable
|
2011-07-26 00:32:21 +04:00
|
|
|
= (ASTIdentifierLookupTable *)(*J)->IdentifierLookupTable;
|
2010-08-19 03:57:06 +04:00
|
|
|
// Not all AST files necessarily have identifier tables, only the useful
|
2010-07-22 21:01:13 +04:00
|
|
|
// ones.
|
|
|
|
if (!IdTable)
|
|
|
|
continue;
|
2010-07-22 00:07:32 +04:00
|
|
|
for (unsigned I = 0, N = Identifiers.size(); I != N; ++I) {
|
|
|
|
IdentifierInfo *II = Identifiers[I];
|
|
|
|
// Look in the on-disk hash tables for an entry for this identifier
|
2011-07-26 00:32:21 +04:00
|
|
|
ASTIdentifierLookupTrait Info(*this, *(*J), II);
|
2010-07-22 00:07:32 +04:00
|
|
|
std::pair<const char*,unsigned> Key(II->getNameStart(),II->getLength());
|
2010-08-19 03:57:06 +04:00
|
|
|
ASTIdentifierLookupTable::iterator Pos = IdTable->find(Key, &Info);
|
2010-07-21 01:20:32 +04:00
|
|
|
if (Pos == IdTable->end())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Dereferencing the iterator has the effect of populating the
|
|
|
|
// IdentifierInfo node with the various declarations it needs.
|
|
|
|
(void)*Pos;
|
|
|
|
}
|
2009-06-19 04:03:23 +04:00
|
|
|
}
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
}
|
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
if (Context)
|
|
|
|
InitializeContext(*Context);
|
2009-04-15 01:18:50 +04:00
|
|
|
|
2010-10-24 21:26:36 +04:00
|
|
|
if (DeserializationListener)
|
|
|
|
DeserializationListener->ReaderInitialized(this);
|
|
|
|
|
2010-11-30 08:23:00 +03:00
|
|
|
// If this AST file is a precompiled preamble, then set the main file ID of
|
|
|
|
// the source manager to the file source file from which the preamble was
|
|
|
|
// built. This is the only valid way to use a precompiled preamble.
|
2011-07-22 20:00:58 +04:00
|
|
|
if (Type == MK_Preamble) {
|
2011-05-07 01:43:30 +04:00
|
|
|
if (OriginalFileID.isInvalid()) {
|
|
|
|
SourceLocation Loc
|
|
|
|
= SourceMgr.getLocation(FileMgr.getFile(getOriginalSourceFile()), 1, 1);
|
|
|
|
if (Loc.isValid())
|
|
|
|
OriginalFileID = SourceMgr.getDecomposedLoc(Loc).first;
|
2010-11-30 08:23:00 +03:00
|
|
|
}
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
else {
|
2011-07-26 00:32:21 +04:00
|
|
|
OriginalFileID = FileID::get(ModuleMgr.getPrimaryModule().SLocEntryBaseID
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
+ OriginalFileID.getOpaqueValue() - 1);
|
|
|
|
}
|
|
|
|
|
2011-05-07 01:43:30 +04:00
|
|
|
if (!OriginalFileID.isInvalid())
|
|
|
|
SourceMgr.SetPreambleFileID(OriginalFileID);
|
2010-11-30 08:23:00 +03:00
|
|
|
}
|
|
|
|
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
return Success;
|
2009-04-15 01:18:50 +04:00
|
|
|
}
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
|
2011-07-22 20:00:58 +04:00
|
|
|
ModuleKind Type) {
|
2011-07-26 22:21:30 +04:00
|
|
|
Module &F = ModuleMgr.addModule(FileName, Type);
|
2010-07-17 00:41:52 +04:00
|
|
|
|
2011-02-15 20:54:22 +03:00
|
|
|
if (FileName != "-") {
|
|
|
|
CurrentDir = llvm::sys::path::parent_path(FileName);
|
|
|
|
if (CurrentDir.empty()) CurrentDir = ".";
|
|
|
|
}
|
|
|
|
|
2011-08-02 21:40:32 +04:00
|
|
|
if (llvm::MemoryBuffer *Buffer = ModuleMgr.lookupBuffer(FileName)) {
|
|
|
|
F.Buffer.reset(Buffer);
|
2011-03-09 20:21:42 +03:00
|
|
|
assert(F.Buffer && "Passed null buffer");
|
|
|
|
} else {
|
|
|
|
// Open the AST file.
|
|
|
|
//
|
|
|
|
// FIXME: This shouldn't be here, we should just take a raw_ostream.
|
|
|
|
std::string ErrStr;
|
|
|
|
llvm::error_code ec;
|
|
|
|
if (FileName == "-") {
|
|
|
|
ec = llvm::MemoryBuffer::getSTDIN(F.Buffer);
|
|
|
|
if (ec)
|
|
|
|
ErrStr = ec.message();
|
|
|
|
} else
|
|
|
|
F.Buffer.reset(FileMgr.getBufferForFile(FileName, &ErrStr));
|
|
|
|
if (!F.Buffer) {
|
|
|
|
Error(ErrStr.c_str());
|
|
|
|
return IgnorePCH;
|
|
|
|
}
|
2010-07-17 00:41:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the stream
|
|
|
|
F.StreamFile.init((const unsigned char *)F.Buffer->getBufferStart(),
|
|
|
|
(const unsigned char *)F.Buffer->getBufferEnd());
|
|
|
|
llvm::BitstreamCursor &Stream = F.Stream;
|
|
|
|
Stream.init(F.StreamFile);
|
2010-07-22 00:07:32 +04:00
|
|
|
F.SizeInBits = F.Buffer->getBufferSize() * 8;
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
|
2010-07-17 00:41:52 +04:00
|
|
|
// Sniff for the signature.
|
|
|
|
if (Stream.Read(8) != 'C' ||
|
|
|
|
Stream.Read(8) != 'P' ||
|
|
|
|
Stream.Read(8) != 'C' ||
|
|
|
|
Stream.Read(8) != 'H') {
|
|
|
|
Diag(diag::err_not_a_pch_file) << FileName;
|
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
2010-07-17 04:12:06 +04:00
|
|
|
while (!Stream.AtEndOfStream()) {
|
|
|
|
unsigned Code = Stream.ReadCode();
|
|
|
|
|
|
|
|
if (Code != llvm::bitc::ENTER_SUBBLOCK) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("invalid record at top-level of AST file");
|
2010-07-17 04:12:06 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned BlockID = Stream.ReadSubBlockID();
|
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
// We only know the AST subblock ID.
|
2010-07-17 04:12:06 +04:00
|
|
|
switch (BlockID) {
|
|
|
|
case llvm::bitc::BLOCKINFO_BLOCK_ID:
|
|
|
|
if (Stream.ReadBlockInfoBlock()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed BlockInfoBlock in AST file");
|
2010-07-17 04:12:06 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
break;
|
2010-08-19 03:57:32 +04:00
|
|
|
case AST_BLOCK_ID:
|
2010-08-19 03:56:56 +04:00
|
|
|
switch (ReadASTBlock(F)) {
|
2010-07-17 04:12:06 +04:00
|
|
|
case Success:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Failure:
|
|
|
|
return Failure;
|
|
|
|
|
|
|
|
case IgnorePCH:
|
|
|
|
// FIXME: We could consider reading through to the end of this
|
2010-08-19 03:57:06 +04:00
|
|
|
// AST block, skipping subblocks, to see if there are other
|
|
|
|
// AST blocks elsewhere.
|
2010-07-17 04:12:06 +04:00
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
// FIXME: We can't clear loaded slocentries anymore.
|
|
|
|
//SourceMgr.ClearPreallocatedSLocEntries();
|
2010-07-17 04:12:06 +04:00
|
|
|
|
|
|
|
// Remove the stat cache.
|
|
|
|
if (F.StatCache)
|
2010-08-19 03:57:06 +04:00
|
|
|
FileMgr.removeStatCache((ASTStatCache*)F.StatCache);
|
2010-07-17 04:12:06 +04:00
|
|
|
|
|
|
|
return IgnorePCH;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (Stream.SkipBlock()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("malformed block record in AST file");
|
2010-07-17 04:12:06 +04:00
|
|
|
return Failure;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2010-07-17 00:41:52 +04:00
|
|
|
}
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
// Once read, set the Module bit base offset and update the size in
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
// bits of all files we've seen.
|
|
|
|
F.GlobalBitOffset = TotalModulesSizeInBits;
|
|
|
|
TotalModulesSizeInBits += F.SizeInBits;
|
|
|
|
GlobalBitOffsetsMap.insert(std::make_pair(F.GlobalBitOffset, &F));
|
2010-07-17 00:41:52 +04:00
|
|
|
return Success;
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::setPreprocessor(Preprocessor &pp) {
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
PP = &pp;
|
2011-07-21 04:47:40 +04:00
|
|
|
|
|
|
|
if (unsigned N = getTotalNumPreprocessedEntities()) {
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
if (!PP->getPreprocessingRecord())
|
2011-05-06 20:33:08 +04:00
|
|
|
PP->createPreprocessingRecord(true);
|
2011-07-21 04:47:40 +04:00
|
|
|
PP->getPreprocessingRecord()->SetExternalSource(*this);
|
|
|
|
PP->getPreprocessingRecord()->allocateLoadedEntities(N);
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
}
|
2011-07-21 04:47:40 +04:00
|
|
|
|
|
|
|
PP->getHeaderSearchInfo().SetExternalLookup(this);
|
2011-07-21 22:46:38 +04:00
|
|
|
PP->getHeaderSearchInfo().SetExternalSource(this);
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::InitializeContext(ASTContext &Ctx) {
|
2009-06-19 04:03:23 +04:00
|
|
|
Context = &Ctx;
|
|
|
|
assert(Context && "Passed null context!");
|
|
|
|
|
|
|
|
assert(PP && "Forgot to set Preprocessor ?");
|
|
|
|
PP->getIdentifierTable().setExternalIdentifierLookup(this);
|
2010-01-04 22:18:44 +03:00
|
|
|
PP->setExternalSource(this);
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
|
2010-10-01 05:18:02 +04:00
|
|
|
// If we have an update block for the TU waiting, we have to add it before
|
|
|
|
// deserializing the decl.
|
2011-08-12 04:15:20 +04:00
|
|
|
TranslationUnitDecl *TU = Ctx.getTranslationUnitDecl();
|
2010-10-01 05:18:02 +04:00
|
|
|
DeclContextOffsetsMap::iterator DCU = DeclContextOffsets.find(0);
|
|
|
|
if (DCU != DeclContextOffsets.end()) {
|
|
|
|
// Insertion could invalidate map, so grab vector.
|
|
|
|
DeclContextInfos T;
|
|
|
|
T.swap(DCU->second);
|
|
|
|
DeclContextOffsets.erase(DCU);
|
2011-08-12 04:15:20 +04:00
|
|
|
DeclContextOffsets[TU].swap(T);
|
2010-10-01 05:18:02 +04:00
|
|
|
}
|
2011-08-12 04:15:20 +04:00
|
|
|
|
|
|
|
// If there's a listener, notify them that we "read" the translation unit.
|
|
|
|
if (DeserializationListener)
|
|
|
|
DeserializationListener->DeclRead(PREDEF_DECL_TRANSLATION_UNIT_ID, TU);
|
2010-10-01 05:18:02 +04:00
|
|
|
|
2011-08-12 04:15:20 +04:00
|
|
|
// Make sure we load the declaration update records for the translation unit,
|
|
|
|
// if there are any.
|
|
|
|
loadDeclUpdateRecords(PREDEF_DECL_TRANSLATION_UNIT_ID, TU);
|
|
|
|
|
|
|
|
// Note that the translation unit has external lexical and visible storage.
|
|
|
|
TU->setHasExternalLexicalStorage(true);
|
|
|
|
TU->setHasExternalVisibleStorage(true);
|
2009-06-19 04:03:23 +04:00
|
|
|
|
2011-08-12 02:18:49 +04:00
|
|
|
// FIXME: Find a better way to deal with collisions between these
|
|
|
|
// built-in types. Right now, we just ignore the problem.
|
|
|
|
|
|
|
|
// Load the special types.
|
2011-08-06 03:07:10 +04:00
|
|
|
if (Context->getBuiltinVaListType().isNull()) {
|
|
|
|
Context->setBuiltinVaListType(
|
|
|
|
GetType(SpecialTypes[SPECIAL_TYPE_BUILTIN_VA_LIST]));
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unsigned Sel = SpecialTypes[SPECIAL_TYPE_OBJC_SELECTOR]) {
|
|
|
|
if (Context->ObjCSelTypedefType.isNull())
|
2011-08-12 02:04:35 +04:00
|
|
|
Context->ObjCSelTypedefType = GetType(Sel);
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unsigned Proto = SpecialTypes[SPECIAL_TYPE_OBJC_PROTOCOL]) {
|
|
|
|
if (Context->ObjCProtoType.isNull())
|
2011-08-12 02:04:35 +04:00
|
|
|
Context->ObjCProtoType = GetType(Proto);
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unsigned String = SpecialTypes[SPECIAL_TYPE_CF_CONSTANT_STRING]) {
|
|
|
|
if (!Context->CFConstantStringTypeDecl)
|
2011-08-06 03:07:10 +04:00
|
|
|
Context->setCFConstantStringType(GetType(String));
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unsigned File = SpecialTypes[SPECIAL_TYPE_FILE]) {
|
|
|
|
QualType FileType = GetType(File);
|
|
|
|
if (FileType.isNull()) {
|
|
|
|
Error("FILE type is NULL");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Context->FILEDecl) {
|
2011-08-06 03:07:10 +04:00
|
|
|
if (const TypedefType *Typedef = FileType->getAs<TypedefType>())
|
|
|
|
Context->setFILEDecl(Typedef->getDecl());
|
|
|
|
else {
|
|
|
|
const TagType *Tag = FileType->getAs<TagType>();
|
|
|
|
if (!Tag) {
|
|
|
|
Error("Invalid FILE type in AST file");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Context->setFILEDecl(Tag->getDecl());
|
|
|
|
}
|
2009-07-07 20:35:42 +04:00
|
|
|
}
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unsigned Jmp_buf = SpecialTypes[SPECIAL_TYPE_jmp_buf]) {
|
|
|
|
QualType Jmp_bufType = GetType(Jmp_buf);
|
|
|
|
if (Jmp_bufType.isNull()) {
|
|
|
|
Error("jmp_buf type is NULL");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Context->jmp_bufDecl) {
|
2011-08-06 03:07:10 +04:00
|
|
|
if (const TypedefType *Typedef = Jmp_bufType->getAs<TypedefType>())
|
|
|
|
Context->setjmp_bufDecl(Typedef->getDecl());
|
|
|
|
else {
|
|
|
|
const TagType *Tag = Jmp_bufType->getAs<TagType>();
|
|
|
|
if (!Tag) {
|
|
|
|
Error("Invalid jmp_buf type in AST file");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Context->setjmp_bufDecl(Tag->getDecl());
|
|
|
|
}
|
2010-03-18 03:56:54 +03:00
|
|
|
}
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unsigned Sigjmp_buf = SpecialTypes[SPECIAL_TYPE_sigjmp_buf]) {
|
|
|
|
QualType Sigjmp_bufType = GetType(Sigjmp_buf);
|
|
|
|
if (Sigjmp_bufType.isNull()) {
|
|
|
|
Error("sigjmp_buf type is NULL");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Context->sigjmp_bufDecl) {
|
2011-08-06 03:07:10 +04:00
|
|
|
if (const TypedefType *Typedef = Sigjmp_bufType->getAs<TypedefType>())
|
|
|
|
Context->setsigjmp_bufDecl(Typedef->getDecl());
|
|
|
|
else {
|
|
|
|
const TagType *Tag = Sigjmp_bufType->getAs<TagType>();
|
|
|
|
assert(Tag && "Invalid sigjmp_buf type in AST file");
|
|
|
|
Context->setsigjmp_bufDecl(Tag->getDecl());
|
|
|
|
}
|
2009-07-28 06:25:19 +04:00
|
|
|
}
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unsigned ObjCIdRedef
|
|
|
|
= SpecialTypes[SPECIAL_TYPE_OBJC_ID_REDEFINITION]) {
|
|
|
|
if (Context->ObjCIdRedefinitionType.isNull())
|
2011-08-06 03:07:10 +04:00
|
|
|
Context->ObjCIdRedefinitionType = GetType(ObjCIdRedef);
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unsigned ObjCClassRedef
|
|
|
|
= SpecialTypes[SPECIAL_TYPE_OBJC_CLASS_REDEFINITION]) {
|
|
|
|
if (Context->ObjCClassRedefinitionType.isNull())
|
2011-08-06 03:07:10 +04:00
|
|
|
Context->ObjCClassRedefinitionType = GetType(ObjCClassRedef);
|
2011-08-12 02:18:49 +04:00
|
|
|
}
|
2011-08-06 03:07:10 +04:00
|
|
|
|
2011-08-12 02:18:49 +04:00
|
|
|
if (unsigned ObjCSelRedef
|
|
|
|
= SpecialTypes[SPECIAL_TYPE_OBJC_SEL_REDEFINITION]) {
|
|
|
|
if (Context->ObjCSelRedefinitionType.isNull())
|
|
|
|
Context->ObjCSelRedefinitionType = GetType(ObjCSelRedef);
|
2009-07-28 06:25:19 +04:00
|
|
|
}
|
2011-04-15 02:09:26 +04:00
|
|
|
|
2011-08-12 02:18:49 +04:00
|
|
|
if (SpecialTypes[SPECIAL_TYPE_INT128_INSTALLED])
|
|
|
|
Context->setInt128Installed();
|
|
|
|
|
2011-01-14 23:54:07 +03:00
|
|
|
ReadPragmaDiagnosticMappings(Context->getDiagnostics());
|
2011-02-10 00:04:32 +03:00
|
|
|
|
|
|
|
// If there were any CUDA special declarations, deserialize them.
|
|
|
|
if (!CUDASpecialDeclRefs.empty()) {
|
|
|
|
assert(CUDASpecialDeclRefs.size() == 1 && "More decl refs than expected!");
|
|
|
|
Context->setcudaConfigureCallDecl(
|
|
|
|
cast<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[0])));
|
|
|
|
}
|
2009-06-19 04:03:23 +04:00
|
|
|
}
|
|
|
|
|
2009-05-12 05:31:05 +04:00
|
|
|
/// \brief Retrieve the name of the original source file name
|
2010-08-19 03:57:06 +04:00
|
|
|
/// directly from the AST file, without actually loading the AST
|
2009-05-12 05:31:05 +04:00
|
|
|
/// file.
|
2010-08-19 03:57:06 +04:00
|
|
|
std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
|
2010-11-04 01:45:23 +03:00
|
|
|
FileManager &FileMgr,
|
2009-12-03 12:13:06 +03:00
|
|
|
Diagnostic &Diags) {
|
2010-08-19 03:57:06 +04:00
|
|
|
// Open the AST file.
|
2009-05-12 05:31:05 +04:00
|
|
|
std::string ErrStr;
|
|
|
|
llvm::OwningPtr<llvm::MemoryBuffer> Buffer;
|
2010-11-23 11:35:12 +03:00
|
|
|
Buffer.reset(FileMgr.getBufferForFile(ASTFileName, &ErrStr));
|
2009-05-12 05:31:05 +04:00
|
|
|
if (!Buffer) {
|
2009-12-03 12:13:06 +03:00
|
|
|
Diags.Report(diag::err_fe_unable_to_read_pch_file) << ErrStr;
|
2009-05-12 05:31:05 +04:00
|
|
|
return std::string();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the stream
|
|
|
|
llvm::BitstreamReader StreamFile;
|
|
|
|
llvm::BitstreamCursor Stream;
|
2009-09-09 19:08:12 +04:00
|
|
|
StreamFile.init((const unsigned char *)Buffer->getBufferStart(),
|
2009-05-12 05:31:05 +04:00
|
|
|
(const unsigned char *)Buffer->getBufferEnd());
|
|
|
|
Stream.init(StreamFile);
|
|
|
|
|
|
|
|
// Sniff for the signature.
|
|
|
|
if (Stream.Read(8) != 'C' ||
|
|
|
|
Stream.Read(8) != 'P' ||
|
|
|
|
Stream.Read(8) != 'C' ||
|
|
|
|
Stream.Read(8) != 'H') {
|
2010-08-19 03:57:06 +04:00
|
|
|
Diags.Report(diag::err_fe_not_a_pch_file) << ASTFileName;
|
2009-05-12 05:31:05 +04:00
|
|
|
return std::string();
|
|
|
|
}
|
|
|
|
|
|
|
|
RecordData Record;
|
|
|
|
while (!Stream.AtEndOfStream()) {
|
|
|
|
unsigned Code = Stream.ReadCode();
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-05-12 05:31:05 +04:00
|
|
|
if (Code == llvm::bitc::ENTER_SUBBLOCK) {
|
|
|
|
unsigned BlockID = Stream.ReadSubBlockID();
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
// We only know the AST subblock ID.
|
2009-05-12 05:31:05 +04:00
|
|
|
switch (BlockID) {
|
2010-08-19 03:57:32 +04:00
|
|
|
case AST_BLOCK_ID:
|
|
|
|
if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
|
2009-05-12 05:31:05 +04:00
|
|
|
return std::string();
|
|
|
|
}
|
|
|
|
break;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-05-12 05:31:05 +04:00
|
|
|
default:
|
|
|
|
if (Stream.SkipBlock()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
|
2009-05-12 05:31:05 +04:00
|
|
|
return std::string();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Code == llvm::bitc::END_BLOCK) {
|
|
|
|
if (Stream.ReadBlockEnd()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Diags.Report(diag::err_fe_pch_error_at_end_block) << ASTFileName;
|
2009-05-12 05:31:05 +04:00
|
|
|
return std::string();
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Code == llvm::bitc::DEFINE_ABBREV) {
|
|
|
|
Stream.ReadAbbrevRecord();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Record.clear();
|
|
|
|
const char *BlobStart = 0;
|
|
|
|
unsigned BlobLen = 0;
|
2009-09-09 19:08:12 +04:00
|
|
|
if (Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen)
|
2010-08-19 03:57:32 +04:00
|
|
|
== ORIGINAL_FILE_NAME)
|
2009-05-12 05:31:05 +04:00
|
|
|
return std::string(BlobStart, BlobLen);
|
2009-09-09 19:08:12 +04:00
|
|
|
}
|
2009-05-12 05:31:05 +04:00
|
|
|
|
|
|
|
return std::string();
|
|
|
|
}
|
|
|
|
|
2009-04-11 00:39:37 +04:00
|
|
|
/// \brief Parse the record that corresponds to a LangOptions data
|
|
|
|
/// structure.
|
|
|
|
///
|
2010-08-19 03:57:06 +04:00
|
|
|
/// This routine parses the language options from the AST file and then gives
|
|
|
|
/// them to the AST listener if one is set.
|
2009-04-11 00:39:37 +04:00
|
|
|
///
|
2010-08-19 03:57:06 +04:00
|
|
|
/// \returns true if the listener deems the file unacceptable, false otherwise.
|
2010-08-19 03:56:43 +04:00
|
|
|
bool ASTReader::ParseLanguageOptions(
|
2011-07-23 14:55:15 +04:00
|
|
|
const SmallVectorImpl<uint64_t> &Record) {
|
2009-06-19 04:03:23 +04:00
|
|
|
if (Listener) {
|
|
|
|
LangOptions LangOpts;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
#define PARSE_LANGOPT(Option) \
|
|
|
|
LangOpts.Option = Record[Idx]; \
|
|
|
|
++Idx
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
PARSE_LANGOPT(Trigraphs);
|
|
|
|
PARSE_LANGOPT(BCPLComment);
|
|
|
|
PARSE_LANGOPT(DollarIdents);
|
|
|
|
PARSE_LANGOPT(AsmPreprocessor);
|
|
|
|
PARSE_LANGOPT(GNUMode);
|
2010-04-18 00:17:31 +04:00
|
|
|
PARSE_LANGOPT(GNUKeywords);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(ImplicitInt);
|
|
|
|
PARSE_LANGOPT(Digraphs);
|
|
|
|
PARSE_LANGOPT(HexFloats);
|
|
|
|
PARSE_LANGOPT(C99);
|
2011-04-15 04:35:23 +04:00
|
|
|
PARSE_LANGOPT(C1X);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(Microsoft);
|
|
|
|
PARSE_LANGOPT(CPlusPlus);
|
|
|
|
PARSE_LANGOPT(CPlusPlus0x);
|
|
|
|
PARSE_LANGOPT(CXXOperatorNames);
|
|
|
|
PARSE_LANGOPT(ObjC1);
|
|
|
|
PARSE_LANGOPT(ObjC2);
|
|
|
|
PARSE_LANGOPT(ObjCNonFragileABI);
|
2010-02-09 22:31:38 +03:00
|
|
|
PARSE_LANGOPT(ObjCNonFragileABI2);
|
2011-01-07 21:59:25 +03:00
|
|
|
PARSE_LANGOPT(AppleKext);
|
2010-12-24 00:35:43 +03:00
|
|
|
PARSE_LANGOPT(ObjCDefaultSynthProperties);
|
2011-06-15 03:20:43 +04:00
|
|
|
PARSE_LANGOPT(ObjCInferRelatedResultType);
|
2010-04-23 01:01:59 +04:00
|
|
|
PARSE_LANGOPT(NoConstantCFStrings);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(PascalStrings);
|
|
|
|
PARSE_LANGOPT(WritableStrings);
|
|
|
|
PARSE_LANGOPT(LaxVectorConversions);
|
2009-06-26 03:01:11 +04:00
|
|
|
PARSE_LANGOPT(AltiVec);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(Exceptions);
|
2011-02-20 02:53:54 +03:00
|
|
|
PARSE_LANGOPT(ObjCExceptions);
|
2011-02-23 06:04:54 +03:00
|
|
|
PARSE_LANGOPT(CXXExceptions);
|
|
|
|
PARSE_LANGOPT(SjLjExceptions);
|
2011-02-01 18:15:22 +03:00
|
|
|
PARSE_LANGOPT(MSBitfields);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(NeXTRuntime);
|
|
|
|
PARSE_LANGOPT(Freestanding);
|
|
|
|
PARSE_LANGOPT(NoBuiltin);
|
|
|
|
PARSE_LANGOPT(ThreadsafeStatics);
|
2009-09-03 18:36:33 +04:00
|
|
|
PARSE_LANGOPT(POSIXThreads);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(Blocks);
|
|
|
|
PARSE_LANGOPT(EmitAllDecls);
|
|
|
|
PARSE_LANGOPT(MathErrno);
|
2010-06-27 01:25:03 +04:00
|
|
|
LangOpts.setSignedOverflowBehavior((LangOptions::SignedOverflowBehaviorTy)
|
|
|
|
Record[Idx++]);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(HeinousExtensions);
|
|
|
|
PARSE_LANGOPT(Optimize);
|
|
|
|
PARSE_LANGOPT(OptimizeSize);
|
|
|
|
PARSE_LANGOPT(Static);
|
|
|
|
PARSE_LANGOPT(PICLevel);
|
|
|
|
PARSE_LANGOPT(GNUInline);
|
|
|
|
PARSE_LANGOPT(NoInline);
|
2011-04-24 00:05:38 +04:00
|
|
|
PARSE_LANGOPT(Deprecated);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(AccessControl);
|
|
|
|
PARSE_LANGOPT(CharIsSigned);
|
2009-11-05 23:14:16 +03:00
|
|
|
PARSE_LANGOPT(ShortWChar);
|
2011-01-15 05:56:16 +03:00
|
|
|
PARSE_LANGOPT(ShortEnums);
|
2010-06-27 01:25:03 +04:00
|
|
|
LangOpts.setGCMode((LangOptions::GCMode)Record[Idx++]);
|
2010-10-23 01:05:15 +04:00
|
|
|
LangOpts.setVisibilityMode((Visibility)Record[Idx++]);
|
2009-09-21 08:16:19 +04:00
|
|
|
LangOpts.setStackProtectorMode((LangOptions::StackProtectorMode)
|
2010-06-27 01:25:03 +04:00
|
|
|
Record[Idx++]);
|
2009-06-19 04:03:23 +04:00
|
|
|
PARSE_LANGOPT(InstantiationDepth);
|
2009-06-26 03:01:11 +04:00
|
|
|
PARSE_LANGOPT(OpenCL);
|
2010-12-01 22:14:57 +03:00
|
|
|
PARSE_LANGOPT(CUDA);
|
2009-12-12 04:27:46 +03:00
|
|
|
PARSE_LANGOPT(CatchUndefined);
|
2011-02-15 22:46:30 +03:00
|
|
|
PARSE_LANGOPT(DefaultFPContract);
|
2011-03-01 20:36:40 +03:00
|
|
|
PARSE_LANGOPT(ElideConstructors);
|
|
|
|
PARSE_LANGOPT(SpellChecking);
|
2011-03-01 20:40:53 +03:00
|
|
|
PARSE_LANGOPT(MRTD);
|
2011-06-16 03:02:42 +04:00
|
|
|
PARSE_LANGOPT(ObjCAutoRefCount);
|
2011-08-04 19:46:00 +04:00
|
|
|
PARSE_LANGOPT(ObjCInferRelatedReturnType);
|
2009-06-19 04:03:23 +04:00
|
|
|
#undef PARSE_LANGOPT
|
|
|
|
|
|
|
|
return Listener->ReadLanguageOptions(LangOpts);
|
2009-04-11 00:39:37 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::ReadPreprocessedEntities() {
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
|
|
|
|
Module &F = *(*I);
|
2011-02-09 00:58:10 +03:00
|
|
|
if (!F.PreprocessorDetailCursor.getBitStreamReader())
|
|
|
|
continue;
|
|
|
|
|
2011-07-26 00:32:21 +04:00
|
|
|
SavedStreamPosition SavedPosition(F.PreprocessorDetailCursor);
|
2011-02-09 00:58:10 +03:00
|
|
|
F.PreprocessorDetailCursor.JumpToBit(F.PreprocessorDetailStartOffset);
|
|
|
|
while (LoadPreprocessedEntity(F)) { }
|
|
|
|
}
|
Implement serialization and lazy deserialization of the preprocessing
record (which includes all macro instantiations and definitions). As
with all lay deserialization, this introduces a new external source
(here, an external preprocessing record source) that loads all of the
preprocessed entities prior to iterating over the entities.
The preprocessing record is an optional part of the precompiled header
that is disabled by default (enabled with
-detailed-preprocessing-record). When the preprocessor given to the
PCH writer has a preprocessing record, that record is written into the
PCH file. When the PCH reader is given a PCH file that contains a
preprocessing record, it will be lazily loaded (which, effectively,
implicitly adds -detailed-preprocessing-record). This is the first
case where we have sections of the precompiled header that are
added/removed based on a compilation flag, which is
unfortunate. However, this data consumes ~550k in the PCH file for
Cocoa.h (out of ~9.9MB), and there is a non-trivial cost to gathering
this detailed preprocessing information, so it's too expensive to turn
on by default. In the future, we should investigate a better encoding
of this information.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@99002 91177308-0d34-0410-b5e6-96231b3b80d8
2010-03-20 00:51:54 +03:00
|
|
|
}
|
|
|
|
|
2011-02-11 22:46:30 +03:00
|
|
|
PreprocessedEntity *ASTReader::ReadPreprocessedEntityAtOffset(uint64_t Offset) {
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
RecordLocation Loc = getLocalBitOffset(Offset);
|
2010-11-30 09:16:57 +03:00
|
|
|
|
2011-02-09 00:58:10 +03:00
|
|
|
// Keep track of where we are in the stream, then jump back there
|
|
|
|
// after reading this entity.
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
SavedStreamPosition SavedPosition(Loc.F->PreprocessorDetailCursor);
|
|
|
|
Loc.F->PreprocessorDetailCursor.JumpToBit(Loc.Offset);
|
|
|
|
return LoadPreprocessedEntity(*Loc.F);
|
2010-11-30 09:16:57 +03:00
|
|
|
}
|
|
|
|
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
|
|
|
|
Module &F = *(*I);
|
2011-07-28 08:50:02 +04:00
|
|
|
|
2011-07-29 00:55:49 +04:00
|
|
|
HeaderFileInfoTrait Trait(*this, F, &PP->getHeaderSearchInfo(),
|
2011-07-28 08:50:02 +04:00
|
|
|
F.HeaderFileFrameworkStrings,
|
|
|
|
FE->getName());
|
|
|
|
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
HeaderFileInfoLookupTable *Table
|
|
|
|
= static_cast<HeaderFileInfoLookupTable *>(F.HeaderFileInfoTable);
|
|
|
|
if (!Table)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Look in the on-disk hash table for an entry for this file name.
|
|
|
|
HeaderFileInfoLookupTable::iterator Pos = Table->find(FE->getName(),
|
|
|
|
&Trait);
|
|
|
|
if (Pos == Table->end())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
HeaderFileInfo HFI = *Pos;
|
|
|
|
if (Listener)
|
|
|
|
Listener->ReadHeaderFileInfo(HFI, FE->getUID());
|
|
|
|
|
|
|
|
return HFI;
|
|
|
|
}
|
|
|
|
|
|
|
|
return HeaderFileInfo();
|
|
|
|
}
|
|
|
|
|
2011-01-14 23:54:07 +03:00
|
|
|
void ASTReader::ReadPragmaDiagnosticMappings(Diagnostic &Diag) {
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
|
|
|
|
Module &F = *(*I);
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
while (Idx < F.PragmaDiagMappings.size()) {
|
|
|
|
SourceLocation Loc = ReadSourceLocation(F, F.PragmaDiagMappings[Idx++]);
|
|
|
|
while (1) {
|
|
|
|
assert(Idx < F.PragmaDiagMappings.size() &&
|
|
|
|
"Invalid data, didn't find '-1' marking end of diag/map pairs");
|
|
|
|
if (Idx >= F.PragmaDiagMappings.size()) {
|
|
|
|
break; // Something is messed up but at least avoid infinite loop in
|
|
|
|
// release build.
|
|
|
|
}
|
|
|
|
unsigned DiagID = F.PragmaDiagMappings[Idx++];
|
|
|
|
if (DiagID == (unsigned)-1) {
|
|
|
|
break; // no more diag/map pairs for this location.
|
|
|
|
}
|
|
|
|
diag::Mapping Map = (diag::Mapping)F.PragmaDiagMappings[Idx++];
|
|
|
|
Diag.setDiagnosticMapping(DiagID, Map, Loc);
|
|
|
|
}
|
2011-01-14 23:54:07 +03:00
|
|
|
}
|
2010-11-06 01:10:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-21 02:37:49 +04:00
|
|
|
/// \brief Get the correct cursor and offset for loading a type.
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
|
2011-08-02 20:26:37 +04:00
|
|
|
GlobalTypeMapType::iterator I = GlobalTypeMap.find(Index);
|
2011-07-21 01:31:32 +04:00
|
|
|
assert(I != GlobalTypeMap.end() && "Corrupted global type map");
|
2011-07-29 04:21:44 +04:00
|
|
|
Module *M = I->second;
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
return RecordLocation(M, M->TypeOffsets[Index - M->BaseTypeIndex]);
|
2010-07-21 02:37:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Read and return the type with the given index..
|
2009-04-10 02:27:44 +04:00
|
|
|
///
|
2010-07-21 02:37:49 +04:00
|
|
|
/// The index is the type ID, shifted and minus the number of predefs. This
|
|
|
|
/// routine actually reads the record corresponding to the type at the given
|
|
|
|
/// location. It is a helper routine for GetType, which deals with reading type
|
|
|
|
/// IDs.
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ASTReader::readTypeRecord(unsigned Index) {
|
2010-07-21 02:37:49 +04:00
|
|
|
RecordLocation Loc = TypeCursorForIndex(Index);
|
2010-10-05 19:59:54 +04:00
|
|
|
llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
|
2010-07-16 21:50:48 +04:00
|
|
|
|
2009-04-15 01:18:50 +04:00
|
|
|
// Keep track of where we are in the stream, then jump back there
|
|
|
|
// after reading this type.
|
2009-10-17 04:13:19 +04:00
|
|
|
SavedStreamPosition SavedPosition(DeclsCursor);
|
2009-04-15 01:18:50 +04:00
|
|
|
|
2010-06-29 02:28:35 +04:00
|
|
|
ReadingKindTracker ReadingKind(Read_Type, *this);
|
2010-08-11 22:52:41 +04:00
|
|
|
|
2009-07-06 22:54:52 +04:00
|
|
|
// Note that we are loading a type record.
|
2010-07-30 14:03:16 +04:00
|
|
|
Deserializing AType(this);
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2011-07-22 04:38:23 +04:00
|
|
|
unsigned Idx = 0;
|
2010-10-05 19:59:54 +04:00
|
|
|
DeclsCursor.JumpToBit(Loc.Offset);
|
2009-04-10 02:27:44 +04:00
|
|
|
RecordData Record;
|
2009-10-17 04:13:19 +04:00
|
|
|
unsigned Code = DeclsCursor.ReadCode();
|
2010-08-19 03:57:32 +04:00
|
|
|
switch ((TypeCode)DeclsCursor.ReadRecord(Code, Record)) {
|
|
|
|
case TYPE_EXT_QUAL: {
|
2010-03-18 03:56:54 +03:00
|
|
|
if (Record.size() != 2) {
|
|
|
|
Error("Incorrect encoding of extended qualifier type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Base = readType(*Loc.F, Record, Idx);
|
|
|
|
Qualifiers Quals = Qualifiers::fromOpaqueValue(Record[Idx++]);
|
2009-09-24 23:53:00 +04:00
|
|
|
return Context->getQualifiedType(Base, Quals);
|
2009-04-16 02:00:08 +04:00
|
|
|
}
|
2009-04-14 00:46:52 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_COMPLEX: {
|
2010-03-18 03:56:54 +03:00
|
|
|
if (Record.size() != 1) {
|
|
|
|
Error("Incorrect encoding of complex type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ElemType = readType(*Loc.F, Record, Idx);
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->getComplexType(ElemType);
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_POINTER: {
|
2010-03-18 03:56:54 +03:00
|
|
|
if (Record.size() != 1) {
|
|
|
|
Error("Incorrect encoding of pointer type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType PointeeType = readType(*Loc.F, Record, Idx);
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->getPointerType(PointeeType);
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_BLOCK_POINTER: {
|
2010-03-18 03:56:54 +03:00
|
|
|
if (Record.size() != 1) {
|
|
|
|
Error("Incorrect encoding of block pointer type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType PointeeType = readType(*Loc.F, Record, Idx);
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->getBlockPointerType(PointeeType);
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_LVALUE_REFERENCE: {
|
2011-04-12 14:38:03 +04:00
|
|
|
if (Record.size() != 2) {
|
2010-03-18 03:56:54 +03:00
|
|
|
Error("Incorrect encoding of lvalue reference type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType PointeeType = readType(*Loc.F, Record, Idx);
|
2011-04-12 14:38:03 +04:00
|
|
|
return Context->getLValueReferenceType(PointeeType, Record[1]);
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_RVALUE_REFERENCE: {
|
2010-03-18 03:56:54 +03:00
|
|
|
if (Record.size() != 1) {
|
|
|
|
Error("Incorrect encoding of rvalue reference type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType PointeeType = readType(*Loc.F, Record, Idx);
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->getRValueReferenceType(PointeeType);
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_MEMBER_POINTER: {
|
2010-07-02 15:55:15 +04:00
|
|
|
if (Record.size() != 2) {
|
2010-03-18 03:56:54 +03:00
|
|
|
Error("Incorrect encoding of member pointer type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType PointeeType = readType(*Loc.F, Record, Idx);
|
|
|
|
QualType ClassType = readType(*Loc.F, Record, Idx);
|
2010-12-10 20:03:06 +03:00
|
|
|
if (PointeeType.isNull() || ClassType.isNull())
|
|
|
|
return QualType();
|
|
|
|
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->getMemberPointerType(PointeeType, ClassType.getTypePtr());
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_CONSTANT_ARRAY: {
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ElementType = readType(*Loc.F, Record, Idx);
|
2009-04-14 00:46:52 +04:00
|
|
|
ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
|
|
|
|
unsigned IndexTypeQuals = Record[2];
|
|
|
|
unsigned Idx = 3;
|
|
|
|
llvm::APInt Size = ReadAPInt(Record, Idx);
|
2009-07-06 19:59:29 +04:00
|
|
|
return Context->getConstantArrayType(ElementType, Size,
|
|
|
|
ASM, IndexTypeQuals);
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_INCOMPLETE_ARRAY: {
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ElementType = readType(*Loc.F, Record, Idx);
|
2009-04-14 00:46:52 +04:00
|
|
|
ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
|
|
|
|
unsigned IndexTypeQuals = Record[2];
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->getIncompleteArrayType(ElementType, ASM, IndexTypeQuals);
|
2009-04-14 00:46:52 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_VARIABLE_ARRAY: {
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ElementType = readType(*Loc.F, Record, Idx);
|
2009-04-15 01:18:50 +04:00
|
|
|
ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
|
|
|
|
unsigned IndexTypeQuals = Record[2];
|
2010-10-05 19:59:54 +04:00
|
|
|
SourceLocation LBLoc = ReadSourceLocation(*Loc.F, Record[3]);
|
|
|
|
SourceLocation RBLoc = ReadSourceLocation(*Loc.F, Record[4]);
|
|
|
|
return Context->getVariableArrayType(ElementType, ReadExpr(*Loc.F),
|
2009-07-06 19:59:29 +04:00
|
|
|
ASM, IndexTypeQuals,
|
|
|
|
SourceRange(LBLoc, RBLoc));
|
2009-04-14 00:46:52 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_VECTOR: {
|
2010-06-23 10:00:24 +04:00
|
|
|
if (Record.size() != 3) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("incorrect encoding of vector type in AST file");
|
2009-04-14 00:46:52 +04:00
|
|
|
return QualType();
|
|
|
|
}
|
|
|
|
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ElementType = readType(*Loc.F, Record, Idx);
|
2009-04-14 00:46:52 +04:00
|
|
|
unsigned NumElements = Record[1];
|
2010-11-11 00:56:12 +03:00
|
|
|
unsigned VecKind = Record[2];
|
2010-06-23 10:00:24 +04:00
|
|
|
return Context->getVectorType(ElementType, NumElements,
|
2010-11-11 00:56:12 +03:00
|
|
|
(VectorType::VectorKind)VecKind);
|
2009-04-14 00:46:52 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_EXT_VECTOR: {
|
2010-06-23 10:00:24 +04:00
|
|
|
if (Record.size() != 3) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("incorrect encoding of extended vector type in AST file");
|
2009-04-14 00:46:52 +04:00
|
|
|
return QualType();
|
|
|
|
}
|
|
|
|
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ElementType = readType(*Loc.F, Record, Idx);
|
2009-04-14 00:46:52 +04:00
|
|
|
unsigned NumElements = Record[1];
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->getExtVectorType(ElementType, NumElements);
|
2009-04-14 00:46:52 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_FUNCTION_NO_PROTO: {
|
2011-06-16 03:02:42 +04:00
|
|
|
if (Record.size() != 6) {
|
2009-04-29 01:53:25 +04:00
|
|
|
Error("incorrect encoding of no-proto function type");
|
2009-04-14 00:46:52 +04:00
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ResultType = readType(*Loc.F, Record, Idx);
|
2011-06-16 03:02:42 +04:00
|
|
|
FunctionType::ExtInfo Info(Record[1], Record[2], Record[3],
|
|
|
|
(CallingConv)Record[4], Record[5]);
|
2010-03-31 00:24:48 +04:00
|
|
|
return Context->getFunctionNoProtoType(ResultType, Info);
|
2009-04-14 00:46:52 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_FUNCTION_PROTO: {
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ResultType = readType(*Loc.F, Record, Idx);
|
2010-12-14 11:05:40 +03:00
|
|
|
|
|
|
|
FunctionProtoType::ExtProtoInfo EPI;
|
|
|
|
EPI.ExtInfo = FunctionType::ExtInfo(/*noreturn*/ Record[1],
|
2011-04-09 12:18:08 +04:00
|
|
|
/*hasregparm*/ Record[2],
|
|
|
|
/*regparm*/ Record[3],
|
2011-06-16 03:02:42 +04:00
|
|
|
static_cast<CallingConv>(Record[4]),
|
|
|
|
/*produces*/ Record[5]);
|
2010-12-14 11:05:40 +03:00
|
|
|
|
2011-06-16 03:02:42 +04:00
|
|
|
unsigned Idx = 6;
|
2009-04-14 00:46:52 +04:00
|
|
|
unsigned NumParams = Record[Idx++];
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<QualType, 16> ParamTypes;
|
2009-04-14 00:46:52 +04:00
|
|
|
for (unsigned I = 0; I != NumParams; ++I)
|
2011-07-22 04:38:23 +04:00
|
|
|
ParamTypes.push_back(readType(*Loc.F, Record, Idx));
|
2010-12-14 11:05:40 +03:00
|
|
|
|
|
|
|
EPI.Variadic = Record[Idx++];
|
|
|
|
EPI.TypeQuals = Record[Idx++];
|
2011-01-26 08:01:58 +03:00
|
|
|
EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
|
2011-03-12 14:50:43 +03:00
|
|
|
ExceptionSpecificationType EST =
|
|
|
|
static_cast<ExceptionSpecificationType>(Record[Idx++]);
|
|
|
|
EPI.ExceptionSpecType = EST;
|
|
|
|
if (EST == EST_Dynamic) {
|
|
|
|
EPI.NumExceptions = Record[Idx++];
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<QualType, 2> Exceptions;
|
2011-03-12 14:50:43 +03:00
|
|
|
for (unsigned I = 0; I != EPI.NumExceptions; ++I)
|
2011-07-22 04:38:23 +04:00
|
|
|
Exceptions.push_back(readType(*Loc.F, Record, Idx));
|
2011-03-12 14:50:43 +03:00
|
|
|
EPI.Exceptions = Exceptions.data();
|
|
|
|
} else if (EST == EST_ComputedNoexcept) {
|
|
|
|
EPI.NoexceptExpr = ReadExpr(*Loc.F);
|
|
|
|
}
|
2009-05-21 13:52:38 +04:00
|
|
|
return Context->getFunctionType(ResultType, ParamTypes.data(), NumParams,
|
2010-12-14 11:05:40 +03:00
|
|
|
EPI);
|
2009-04-14 00:46:52 +04:00
|
|
|
}
|
|
|
|
|
2011-07-22 02:35:25 +04:00
|
|
|
case TYPE_UNRESOLVED_USING: {
|
|
|
|
unsigned Idx = 0;
|
2009-12-05 01:46:56 +03:00
|
|
|
return Context->getTypeDeclType(
|
2011-07-22 02:35:25 +04:00
|
|
|
ReadDeclAs<UnresolvedUsingTypenameDecl>(*Loc.F, Record, Idx));
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_TYPEDEF: {
|
2010-07-02 15:55:11 +04:00
|
|
|
if (Record.size() != 2) {
|
2010-03-18 03:56:54 +03:00
|
|
|
Error("incorrect encoding of typedef type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 02:35:25 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
TypedefNameDecl *Decl = ReadDeclAs<TypedefNameDecl>(*Loc.F, Record, Idx);
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Canonical = readType(*Loc.F, Record, Idx);
|
2010-10-26 04:51:02 +04:00
|
|
|
if (!Canonical.isNull())
|
|
|
|
Canonical = Context->getCanonicalType(Canonical);
|
2010-07-02 15:55:11 +04:00
|
|
|
return Context->getTypedefType(Decl, Canonical);
|
|
|
|
}
|
2009-04-14 00:46:52 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_TYPEOF_EXPR:
|
2010-10-05 19:59:54 +04:00
|
|
|
return Context->getTypeOfExprType(ReadExpr(*Loc.F));
|
2009-04-14 00:46:52 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_TYPEOF: {
|
2009-04-14 00:46:52 +04:00
|
|
|
if (Record.size() != 1) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("incorrect encoding of typeof(type) in AST file");
|
2009-04-14 00:46:52 +04:00
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType UnderlyingType = readType(*Loc.F, Record, Idx);
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->getTypeOfType(UnderlyingType);
|
2009-04-14 00:46:52 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_DECLTYPE:
|
2010-10-05 19:59:54 +04:00
|
|
|
return Context->getDecltypeType(ReadExpr(*Loc.F));
|
2009-06-24 23:06:50 +04:00
|
|
|
|
2011-05-25 02:41:36 +04:00
|
|
|
case TYPE_UNARY_TRANSFORM: {
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType BaseType = readType(*Loc.F, Record, Idx);
|
|
|
|
QualType UnderlyingType = readType(*Loc.F, Record, Idx);
|
2011-05-25 02:41:36 +04:00
|
|
|
UnaryTransformType::UTTKind UKind = (UnaryTransformType::UTTKind)Record[2];
|
|
|
|
return Context->getUnaryTransformType(BaseType, UnderlyingType, UKind);
|
|
|
|
}
|
|
|
|
|
2011-02-20 06:19:35 +03:00
|
|
|
case TYPE_AUTO:
|
2011-07-22 04:38:23 +04:00
|
|
|
return Context->getAutoType(readType(*Loc.F, Record, Idx));
|
2011-02-20 06:19:35 +03:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_RECORD: {
|
2010-07-08 17:09:53 +04:00
|
|
|
if (Record.size() != 2) {
|
2010-03-18 03:56:54 +03:00
|
|
|
Error("incorrect encoding of record type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 02:35:25 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
bool IsDependent = Record[Idx++];
|
|
|
|
QualType T
|
|
|
|
= Context->getRecordType(ReadDeclAs<RecordDecl>(*Loc.F, Record, Idx));
|
2011-01-19 09:33:43 +03:00
|
|
|
const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
|
2010-07-08 17:09:53 +04:00
|
|
|
return T;
|
|
|
|
}
|
2009-04-14 00:46:52 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_ENUM: {
|
2010-07-08 17:09:53 +04:00
|
|
|
if (Record.size() != 2) {
|
2010-03-18 03:56:54 +03:00
|
|
|
Error("incorrect encoding of enum type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 02:35:25 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
bool IsDependent = Record[Idx++];
|
|
|
|
QualType T
|
|
|
|
= Context->getEnumType(ReadDeclAs<EnumDecl>(*Loc.F, Record, Idx));
|
2011-01-19 09:33:43 +03:00
|
|
|
const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
|
2010-07-08 17:09:53 +04:00
|
|
|
return T;
|
|
|
|
}
|
2009-04-13 22:14:40 +04:00
|
|
|
|
2011-01-06 04:58:22 +03:00
|
|
|
case TYPE_ATTRIBUTED: {
|
|
|
|
if (Record.size() != 3) {
|
|
|
|
Error("incorrect encoding of attributed type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType modifiedType = readType(*Loc.F, Record, Idx);
|
|
|
|
QualType equivalentType = readType(*Loc.F, Record, Idx);
|
2011-01-06 04:58:22 +03:00
|
|
|
AttributedType::Kind kind = static_cast<AttributedType::Kind>(Record[2]);
|
|
|
|
return Context->getAttributedType(kind, modifiedType, equivalentType);
|
|
|
|
}
|
|
|
|
|
2010-12-10 19:29:40 +03:00
|
|
|
case TYPE_PAREN: {
|
|
|
|
if (Record.size() != 1) {
|
|
|
|
Error("incorrect encoding of paren type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType InnerType = readType(*Loc.F, Record, Idx);
|
2010-12-10 19:29:40 +03:00
|
|
|
return Context->getParenType(InnerType);
|
|
|
|
}
|
|
|
|
|
2010-12-20 05:24:11 +03:00
|
|
|
case TYPE_PACK_EXPANSION: {
|
2011-02-01 18:24:58 +03:00
|
|
|
if (Record.size() != 2) {
|
2010-12-20 05:24:11 +03:00
|
|
|
Error("incorrect encoding of pack expansion type");
|
|
|
|
return QualType();
|
|
|
|
}
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Pattern = readType(*Loc.F, Record, Idx);
|
2010-12-20 05:24:11 +03:00
|
|
|
if (Pattern.isNull())
|
|
|
|
return QualType();
|
2011-01-14 20:04:44 +03:00
|
|
|
llvm::Optional<unsigned> NumExpansions;
|
|
|
|
if (Record[1])
|
|
|
|
NumExpansions = Record[1] - 1;
|
|
|
|
return Context->getPackExpansionType(Pattern, NumExpansions);
|
2010-12-20 05:24:11 +03:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_ELABORATED: {
|
2010-06-25 20:24:58 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
|
2011-07-22 02:35:25 +04:00
|
|
|
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType NamedType = readType(*Loc.F, Record, Idx);
|
2010-06-25 20:24:58 +04:00
|
|
|
return Context->getElaboratedType(Keyword, NNS, NamedType);
|
2009-09-05 04:15:47 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_OBJC_INTERFACE: {
|
2009-04-22 10:45:28 +04:00
|
|
|
unsigned Idx = 0;
|
2011-07-22 02:35:25 +04:00
|
|
|
ObjCInterfaceDecl *ItfD
|
|
|
|
= ReadDeclAs<ObjCInterfaceDecl>(*Loc.F, Record, Idx);
|
2010-05-15 15:32:37 +04:00
|
|
|
return Context->getObjCInterfaceType(ItfD);
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_OBJC_OBJECT: {
|
2010-05-15 15:32:37 +04:00
|
|
|
unsigned Idx = 0;
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Base = readType(*Loc.F, Record, Idx);
|
2009-04-22 10:45:28 +04:00
|
|
|
unsigned NumProtos = Record[Idx++];
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<ObjCProtocolDecl*, 4> Protos;
|
2009-04-22 10:45:28 +04:00
|
|
|
for (unsigned I = 0; I != NumProtos; ++I)
|
2011-07-22 02:35:25 +04:00
|
|
|
Protos.push_back(ReadDeclAs<ObjCProtocolDecl>(*Loc.F, Record, Idx));
|
2010-10-21 07:16:25 +04:00
|
|
|
return Context->getObjCObjectType(Base, Protos.data(), NumProtos);
|
2009-04-22 10:45:28 +04:00
|
|
|
}
|
2009-04-14 00:46:52 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_OBJC_OBJECT_POINTER: {
|
2009-04-22 10:40:03 +04:00
|
|
|
unsigned Idx = 0;
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Pointee = readType(*Loc.F, Record, Idx);
|
2010-05-15 15:32:37 +04:00
|
|
|
return Context->getObjCObjectPointerType(Pointee);
|
2009-04-22 10:40:03 +04:00
|
|
|
}
|
2009-09-29 23:42:55 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_SUBST_TEMPLATE_TYPE_PARM: {
|
2009-10-18 13:09:24 +04:00
|
|
|
unsigned Idx = 0;
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Parm = readType(*Loc.F, Record, Idx);
|
|
|
|
QualType Replacement = readType(*Loc.F, Record, Idx);
|
2009-10-18 13:09:24 +04:00
|
|
|
return
|
|
|
|
Context->getSubstTemplateTypeParmType(cast<TemplateTypeParmType>(Parm),
|
|
|
|
Replacement);
|
|
|
|
}
|
2010-03-10 06:28:59 +03:00
|
|
|
|
2011-01-14 05:55:32 +03:00
|
|
|
case TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK: {
|
|
|
|
unsigned Idx = 0;
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Parm = readType(*Loc.F, Record, Idx);
|
2011-01-14 05:55:32 +03:00
|
|
|
TemplateArgument ArgPack = ReadTemplateArgument(*Loc.F, Record, Idx);
|
|
|
|
return Context->getSubstTemplateTypeParmPackType(
|
|
|
|
cast<TemplateTypeParmType>(Parm),
|
|
|
|
ArgPack);
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_INJECTED_CLASS_NAME: {
|
2011-07-22 02:35:25 +04:00
|
|
|
CXXRecordDecl *D = ReadDeclAs<CXXRecordDecl>(*Loc.F, Record, Idx);
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType TST = readType(*Loc.F, Record, Idx); // probably derivable
|
2010-07-02 15:55:20 +04:00
|
|
|
// FIXME: ASTContext::getInjectedClassNameType is not currently suitable
|
2010-08-19 03:57:06 +04:00
|
|
|
// for AST reading, too much interdependencies.
|
2010-07-02 15:55:20 +04:00
|
|
|
return
|
|
|
|
QualType(new (*Context, TypeAlignment) InjectedClassNameType(D, TST), 0);
|
2010-03-10 06:28:59 +03:00
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_TEMPLATE_TYPE_PARM: {
|
2010-06-19 23:29:09 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
unsigned Depth = Record[Idx++];
|
|
|
|
unsigned Index = Record[Idx++];
|
|
|
|
bool Pack = Record[Idx++];
|
2011-07-22 02:35:25 +04:00
|
|
|
TemplateTypeParmDecl *D
|
|
|
|
= ReadDeclAs<TemplateTypeParmDecl>(*Loc.F, Record, Idx);
|
Re-applies the patch first applied way back in r106099, with
accompanying fixes to make it work today.
The core of this patch is to provide a link from a TemplateTypeParmType
back to the TemplateTypeParmDecl node which declared it. This in turn
provides much more precise information about the type, where it came
from, and how it functions for AST consumers.
To make the patch work almost a year after its first attempt, it needed
serialization support, and it now retains the old getName() interface.
Finally, it requires us to not attempt to instantiate the type in an
unsupported friend decl -- specifically those coming from template
friend decls but which refer to a specific type through a dependent
name.
A cleaner representation of the last item would be to build
FriendTemplateDecl nodes for these, storing their template parameters
etc, and to perform proper instantation of them like any other template
declaration. They can still be flagged as unsupported for the purpose of
access checking, etc.
This passed an asserts-enabled bootstrap for me, and the reduced test
case mentioned in the original review thread no longer causes issues,
likely fixed at somewhere amidst the 24k revisions that have elapsed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@130628 91177308-0d34-0410-b5e6-96231b3b80d8
2011-05-01 04:51:33 +04:00
|
|
|
return Context->getTemplateTypeParmType(Depth, Index, Pack, D);
|
2010-06-19 23:29:09 +04:00
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_DEPENDENT_NAME: {
|
2010-06-24 12:57:31 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
|
2011-07-22 02:35:25 +04:00
|
|
|
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
|
2011-07-29 00:55:49 +04:00
|
|
|
const IdentifierInfo *Name = this->GetIdentifierInfo(*Loc.F, Record, Idx);
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Canon = readType(*Loc.F, Record, Idx);
|
2010-10-26 04:51:02 +04:00
|
|
|
if (!Canon.isNull())
|
|
|
|
Canon = Context->getCanonicalType(Canon);
|
2010-07-02 15:55:24 +04:00
|
|
|
return Context->getDependentNameType(Keyword, NNS, Name, Canon);
|
2010-06-24 12:57:31 +04:00
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION: {
|
2010-06-25 20:24:58 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
|
2011-07-22 02:35:25 +04:00
|
|
|
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
|
2011-07-29 00:55:49 +04:00
|
|
|
const IdentifierInfo *Name = this->GetIdentifierInfo(*Loc.F, Record, Idx);
|
2010-06-25 20:24:58 +04:00
|
|
|
unsigned NumArgs = Record[Idx++];
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<TemplateArgument, 8> Args;
|
2010-06-25 20:24:58 +04:00
|
|
|
Args.reserve(NumArgs);
|
|
|
|
while (NumArgs--)
|
2010-10-05 19:59:54 +04:00
|
|
|
Args.push_back(ReadTemplateArgument(*Loc.F, Record, Idx));
|
2010-06-25 20:24:58 +04:00
|
|
|
return Context->getDependentTemplateSpecializationType(Keyword, NNS, Name,
|
|
|
|
Args.size(), Args.data());
|
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_DEPENDENT_SIZED_ARRAY: {
|
2010-06-30 12:49:25 +04:00
|
|
|
unsigned Idx = 0;
|
|
|
|
|
|
|
|
// ArrayType
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType ElementType = readType(*Loc.F, Record, Idx);
|
2010-06-30 12:49:25 +04:00
|
|
|
ArrayType::ArraySizeModifier ASM
|
|
|
|
= (ArrayType::ArraySizeModifier)Record[Idx++];
|
|
|
|
unsigned IndexTypeQuals = Record[Idx++];
|
|
|
|
|
|
|
|
// DependentSizedArrayType
|
2010-10-05 19:59:54 +04:00
|
|
|
Expr *NumElts = ReadExpr(*Loc.F);
|
|
|
|
SourceRange Brackets = ReadSourceRange(*Loc.F, Record, Idx);
|
2010-06-30 12:49:25 +04:00
|
|
|
|
|
|
|
return Context->getDependentSizedArrayType(ElementType, NumElts, ASM,
|
|
|
|
IndexTypeQuals, Brackets);
|
|
|
|
}
|
2010-06-19 23:28:53 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case TYPE_TEMPLATE_SPECIALIZATION: {
|
2010-06-19 23:29:09 +04:00
|
|
|
unsigned Idx = 0;
|
2010-07-08 17:09:53 +04:00
|
|
|
bool IsDependent = Record[Idx++];
|
2011-01-15 09:45:20 +03:00
|
|
|
TemplateName Name = ReadTemplateName(*Loc.F, Record, Idx);
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<TemplateArgument, 8> Args;
|
2010-10-05 19:59:54 +04:00
|
|
|
ReadTemplateArgumentList(Args, *Loc.F, Record, Idx);
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType Underlying = readType(*Loc.F, Record, Idx);
|
2010-07-08 17:09:53 +04:00
|
|
|
QualType T;
|
2011-05-06 01:57:07 +04:00
|
|
|
if (Underlying.isNull())
|
2010-07-08 17:09:53 +04:00
|
|
|
T = Context->getCanonicalTemplateSpecializationType(Name, Args.data(),
|
|
|
|
Args.size());
|
2010-07-02 15:55:11 +04:00
|
|
|
else
|
2010-07-08 17:09:53 +04:00
|
|
|
T = Context->getTemplateSpecializationType(Name, Args.data(),
|
2011-05-06 01:57:07 +04:00
|
|
|
Args.size(), Underlying);
|
2011-01-19 09:33:43 +03:00
|
|
|
const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
|
2010-07-08 17:09:53 +04:00
|
|
|
return T;
|
2010-06-19 23:29:09 +04:00
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
// Suppress a GCC warning
|
|
|
|
return QualType();
|
|
|
|
}
|
|
|
|
|
2010-10-05 19:59:54 +04:00
|
|
|
class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader &Reader;
|
2011-07-22 20:00:58 +04:00
|
|
|
Module &F;
|
2010-07-23 02:43:28 +04:00
|
|
|
llvm::BitstreamCursor &DeclsCursor;
|
2010-08-19 03:56:43 +04:00
|
|
|
const ASTReader::RecordData &Record;
|
2009-10-17 01:56:05 +04:00
|
|
|
unsigned &Idx;
|
|
|
|
|
2010-10-05 19:59:54 +04:00
|
|
|
SourceLocation ReadSourceLocation(const ASTReader::RecordData &R,
|
|
|
|
unsigned &I) {
|
|
|
|
return Reader.ReadSourceLocation(F, R, I);
|
|
|
|
}
|
|
|
|
|
2011-07-22 02:35:25 +04:00
|
|
|
template<typename T>
|
|
|
|
T *ReadDeclAs(const ASTReader::RecordData &Record, unsigned &Idx) {
|
|
|
|
return Reader.ReadDeclAs<T>(F, Record, Idx);
|
|
|
|
}
|
|
|
|
|
2009-10-17 01:56:05 +04:00
|
|
|
public:
|
2011-07-22 20:00:58 +04:00
|
|
|
TypeLocReader(ASTReader &Reader, Module &F,
|
2010-08-19 03:56:43 +04:00
|
|
|
const ASTReader::RecordData &Record, unsigned &Idx)
|
2010-10-05 19:59:54 +04:00
|
|
|
: Reader(Reader), F(F), DeclsCursor(F.DeclsCursor), Record(Record), Idx(Idx)
|
|
|
|
{ }
|
2009-10-17 01:56:05 +04:00
|
|
|
|
2009-10-18 05:05:36 +04:00
|
|
|
// We want compile-time assurance that we've enumerated all of
|
|
|
|
// these, so unfortunately we have to declare them first, then
|
|
|
|
// define them out-of-line.
|
|
|
|
#define ABSTRACT_TYPELOC(CLASS, PARENT)
|
2009-10-17 01:56:05 +04:00
|
|
|
#define TYPELOC(CLASS, PARENT) \
|
2009-10-18 05:05:36 +04:00
|
|
|
void Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc);
|
2009-10-17 01:56:05 +04:00
|
|
|
#include "clang/AST/TypeLocNodes.def"
|
|
|
|
|
2009-10-18 05:05:36 +04:00
|
|
|
void VisitFunctionTypeLoc(FunctionTypeLoc);
|
|
|
|
void VisitArrayTypeLoc(ArrayTypeLoc);
|
2009-10-17 01:56:05 +04:00
|
|
|
};
|
|
|
|
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
|
2009-10-17 01:56:05 +04:00
|
|
|
// nothing to do
|
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setBuiltinLoc(ReadSourceLocation(Record, Idx));
|
2010-01-18 21:04:31 +03:00
|
|
|
if (TL.needsExtraLocalData()) {
|
|
|
|
TL.setWrittenTypeSpec(static_cast<DeclSpec::TST>(Record[Idx++]));
|
|
|
|
TL.setWrittenSignSpec(static_cast<DeclSpec::TSS>(Record[Idx++]));
|
|
|
|
TL.setWrittenWidthSpec(static_cast<DeclSpec::TSW>(Record[Idx++]));
|
|
|
|
TL.setModeAttr(Record[Idx++]);
|
|
|
|
}
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitComplexTypeLoc(ComplexTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitPointerTypeLoc(PointerTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setStarLoc(ReadSourceLocation(Record, Idx));
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setCaretLoc(ReadSourceLocation(Record, Idx));
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setAmpLoc(ReadSourceLocation(Record, Idx));
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setAmpAmpLoc(ReadSourceLocation(Record, Idx));
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setStarLoc(ReadSourceLocation(Record, Idx));
|
2011-03-05 17:42:21 +03:00
|
|
|
TL.setClassTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitArrayTypeLoc(ArrayTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setLBracketLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setRBracketLoc(ReadSourceLocation(Record, Idx));
|
2009-10-17 01:56:05 +04:00
|
|
|
if (Record[Idx++])
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setSizeExpr(Reader.ReadExpr(F));
|
2009-10-17 04:13:19 +04:00
|
|
|
else
|
2009-10-18 05:05:36 +04:00
|
|
|
TL.setSizeExpr(0);
|
|
|
|
}
|
|
|
|
void TypeLocReader::VisitConstantArrayTypeLoc(ConstantArrayTypeLoc TL) {
|
|
|
|
VisitArrayTypeLoc(TL);
|
|
|
|
}
|
|
|
|
void TypeLocReader::VisitIncompleteArrayTypeLoc(IncompleteArrayTypeLoc TL) {
|
|
|
|
VisitArrayTypeLoc(TL);
|
|
|
|
}
|
|
|
|
void TypeLocReader::VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
|
|
|
|
VisitArrayTypeLoc(TL);
|
|
|
|
}
|
|
|
|
void TypeLocReader::VisitDependentSizedArrayTypeLoc(
|
|
|
|
DependentSizedArrayTypeLoc TL) {
|
|
|
|
VisitArrayTypeLoc(TL);
|
|
|
|
}
|
|
|
|
void TypeLocReader::VisitDependentSizedExtVectorTypeLoc(
|
|
|
|
DependentSizedExtVectorTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitVectorTypeLoc(VectorTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
|
2011-03-12 14:17:06 +03:00
|
|
|
TL.setLocalRangeBegin(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx));
|
2010-10-01 22:44:50 +04:00
|
|
|
TL.setTrailingReturn(Record[Idx++]);
|
2009-10-18 05:05:36 +04:00
|
|
|
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) {
|
2011-07-22 02:35:25 +04:00
|
|
|
TL.setArg(i, ReadDeclAs<ParmVarDecl>(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
void TypeLocReader::VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc TL) {
|
|
|
|
VisitFunctionTypeLoc(TL);
|
|
|
|
}
|
|
|
|
void TypeLocReader::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
|
|
|
|
VisitFunctionTypeLoc(TL);
|
|
|
|
}
|
2009-12-05 01:46:56 +03:00
|
|
|
void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-12-05 01:46:56 +03:00
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setLParenLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setRParenLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setLParenLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setRParenLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
2011-05-25 02:41:36 +04:00
|
|
|
void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
|
|
|
|
TL.setKWLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setLParenLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setRParenLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
|
|
|
|
}
|
2011-02-20 06:19:35 +03:00
|
|
|
void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
|
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
2011-01-06 04:58:22 +03:00
|
|
|
void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
|
|
|
|
TL.setAttrNameLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
if (TL.hasAttrOperand()) {
|
|
|
|
SourceRange range;
|
|
|
|
range.setBegin(ReadSourceLocation(Record, Idx));
|
|
|
|
range.setEnd(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setAttrOperandParensRange(range);
|
|
|
|
}
|
|
|
|
if (TL.hasAttrExprOperand()) {
|
|
|
|
if (Record[Idx++])
|
|
|
|
TL.setAttrExprOperand(Reader.ReadExpr(F));
|
|
|
|
else
|
|
|
|
TL.setAttrExprOperand(0);
|
|
|
|
} else if (TL.hasAttrEnumOperand())
|
|
|
|
TL.setAttrEnumOperandLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
2009-10-18 13:09:24 +04:00
|
|
|
void TypeLocReader::VisitSubstTemplateTypeParmTypeLoc(
|
|
|
|
SubstTemplateTypeParmTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 13:09:24 +04:00
|
|
|
}
|
2011-01-14 05:55:32 +03:00
|
|
|
void TypeLocReader::VisitSubstTemplateTypeParmPackTypeLoc(
|
|
|
|
SubstTemplateTypeParmPackTypeLoc TL) {
|
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitTemplateSpecializationTypeLoc(
|
|
|
|
TemplateSpecializationTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
|
2009-10-29 11:12:44 +03:00
|
|
|
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
|
|
|
|
TL.setArgLocInfo(i,
|
2010-10-05 19:59:54 +04:00
|
|
|
Reader.GetTemplateArgumentLocInfo(F,
|
|
|
|
TL.getTypePtr()->getArg(i).getKind(),
|
|
|
|
Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
2010-12-10 19:29:40 +03:00
|
|
|
void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
|
|
|
|
TL.setLParenLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setRParenLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
}
|
2010-05-12 01:36:43 +04:00
|
|
|
void TypeLocReader::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
|
2011-03-01 21:12:44 +03:00
|
|
|
TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
2010-03-10 06:28:59 +03:00
|
|
|
void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2010-03-10 06:28:59 +03:00
|
|
|
}
|
2010-03-31 21:34:00 +04:00
|
|
|
void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
|
2011-03-01 04:34:45 +03:00
|
|
|
TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
2010-06-11 04:33:02 +04:00
|
|
|
void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc(
|
|
|
|
DependentTemplateSpecializationTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
|
2011-03-01 23:11:18 +03:00
|
|
|
TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
|
2010-06-11 04:33:02 +04:00
|
|
|
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
|
|
|
|
TL.setArgLocInfo(I,
|
2010-10-05 19:59:54 +04:00
|
|
|
Reader.GetTemplateArgumentLocInfo(F,
|
|
|
|
TL.getTypePtr()->getArg(I).getKind(),
|
|
|
|
Record, Idx));
|
2010-06-11 04:33:02 +04:00
|
|
|
}
|
2010-12-20 05:24:11 +03:00
|
|
|
void TypeLocReader::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
|
|
|
|
TL.setEllipsisLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
}
|
2009-10-18 05:05:36 +04:00
|
|
|
void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setNameLoc(ReadSourceLocation(Record, Idx));
|
2010-05-15 15:32:37 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
|
|
|
|
TL.setHasBaseTypeAsWritten(Record[Idx++]);
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
|
|
|
|
TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
|
2009-10-23 02:37:11 +04:00
|
|
|
for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setProtocolLoc(i, ReadSourceLocation(Record, Idx));
|
2009-10-18 05:05:36 +04:00
|
|
|
}
|
|
|
|
void TypeLocReader::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TL.setStarLoc(ReadSourceLocation(Record, Idx));
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
TypeSourceInfo *ASTReader::GetTypeSourceInfo(Module &F,
|
2010-07-23 02:43:28 +04:00
|
|
|
const RecordData &Record,
|
2009-10-17 01:56:05 +04:00
|
|
|
unsigned &Idx) {
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType InfoTy = readType(F, Record, Idx);
|
2009-10-17 01:56:05 +04:00
|
|
|
if (InfoTy.isNull())
|
|
|
|
return 0;
|
|
|
|
|
2009-12-07 05:54:59 +03:00
|
|
|
TypeSourceInfo *TInfo = getContext()->CreateTypeSourceInfo(InfoTy);
|
2010-10-05 19:59:54 +04:00
|
|
|
TypeLocReader TLR(*this, F, Record, Idx);
|
2009-12-07 05:54:59 +03:00
|
|
|
for (TypeLoc TL = TInfo->getTypeLoc(); !TL.isNull(); TL = TL.getNextTypeLoc())
|
2009-10-17 01:56:05 +04:00
|
|
|
TLR.Visit(TL);
|
2009-12-07 05:54:59 +03:00
|
|
|
return TInfo;
|
2009-10-17 01:56:05 +04:00
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
QualType ASTReader::GetType(TypeID ID) {
|
2009-09-24 23:53:00 +04:00
|
|
|
unsigned FastQuals = ID & Qualifiers::FastMask;
|
|
|
|
unsigned Index = ID >> Qualifiers::FastWidth;
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
if (Index < NUM_PREDEF_TYPE_IDS) {
|
2009-04-10 02:27:44 +04:00
|
|
|
QualType T;
|
2010-08-19 03:57:32 +04:00
|
|
|
switch ((PredefinedTypeIDs)Index) {
|
|
|
|
case PREDEF_TYPE_NULL_ID: return QualType();
|
|
|
|
case PREDEF_TYPE_VOID_ID: T = Context->VoidTy; break;
|
|
|
|
case PREDEF_TYPE_BOOL_ID: T = Context->BoolTy; break;
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case PREDEF_TYPE_CHAR_U_ID:
|
|
|
|
case PREDEF_TYPE_CHAR_S_ID:
|
2009-04-10 02:27:44 +04:00
|
|
|
// FIXME: Check that the signedness of CharTy is correct!
|
2009-04-28 01:45:14 +04:00
|
|
|
T = Context->CharTy;
|
2009-04-10 02:27:44 +04:00
|
|
|
break;
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
case PREDEF_TYPE_UCHAR_ID: T = Context->UnsignedCharTy; break;
|
|
|
|
case PREDEF_TYPE_USHORT_ID: T = Context->UnsignedShortTy; break;
|
|
|
|
case PREDEF_TYPE_UINT_ID: T = Context->UnsignedIntTy; break;
|
|
|
|
case PREDEF_TYPE_ULONG_ID: T = Context->UnsignedLongTy; break;
|
|
|
|
case PREDEF_TYPE_ULONGLONG_ID: T = Context->UnsignedLongLongTy; break;
|
|
|
|
case PREDEF_TYPE_UINT128_ID: T = Context->UnsignedInt128Ty; break;
|
|
|
|
case PREDEF_TYPE_SCHAR_ID: T = Context->SignedCharTy; break;
|
|
|
|
case PREDEF_TYPE_WCHAR_ID: T = Context->WCharTy; break;
|
|
|
|
case PREDEF_TYPE_SHORT_ID: T = Context->ShortTy; break;
|
|
|
|
case PREDEF_TYPE_INT_ID: T = Context->IntTy; break;
|
|
|
|
case PREDEF_TYPE_LONG_ID: T = Context->LongTy; break;
|
|
|
|
case PREDEF_TYPE_LONGLONG_ID: T = Context->LongLongTy; break;
|
|
|
|
case PREDEF_TYPE_INT128_ID: T = Context->Int128Ty; break;
|
|
|
|
case PREDEF_TYPE_FLOAT_ID: T = Context->FloatTy; break;
|
|
|
|
case PREDEF_TYPE_DOUBLE_ID: T = Context->DoubleTy; break;
|
|
|
|
case PREDEF_TYPE_LONGDOUBLE_ID: T = Context->LongDoubleTy; break;
|
|
|
|
case PREDEF_TYPE_OVERLOAD_ID: T = Context->OverloadTy; break;
|
2011-04-27 00:42:42 +04:00
|
|
|
case PREDEF_TYPE_BOUND_MEMBER: T = Context->BoundMemberTy; break;
|
2010-08-19 03:57:32 +04:00
|
|
|
case PREDEF_TYPE_DEPENDENT_ID: T = Context->DependentTy; break;
|
2011-04-07 12:22:57 +04:00
|
|
|
case PREDEF_TYPE_UNKNOWN_ANY: T = Context->UnknownAnyTy; break;
|
2010-08-19 03:57:32 +04:00
|
|
|
case PREDEF_TYPE_NULLPTR_ID: T = Context->NullPtrTy; break;
|
|
|
|
case PREDEF_TYPE_CHAR16_ID: T = Context->Char16Ty; break;
|
|
|
|
case PREDEF_TYPE_CHAR32_ID: T = Context->Char32Ty; break;
|
|
|
|
case PREDEF_TYPE_OBJC_ID: T = Context->ObjCBuiltinIdTy; break;
|
|
|
|
case PREDEF_TYPE_OBJC_CLASS: T = Context->ObjCBuiltinClassTy; break;
|
|
|
|
case PREDEF_TYPE_OBJC_SEL: T = Context->ObjCBuiltinSelTy; break;
|
2011-08-09 19:13:55 +04:00
|
|
|
case PREDEF_TYPE_AUTO_DEDUCT: T = Context->getAutoDeductType(); break;
|
|
|
|
|
|
|
|
case PREDEF_TYPE_AUTO_RREF_DEDUCT:
|
|
|
|
T = Context->getAutoRRefDeductType();
|
|
|
|
break;
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(!T.isNull() && "Unknown predefined type");
|
2009-09-24 23:53:00 +04:00
|
|
|
return T.withFastQualifiers(FastQuals);
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
Index -= NUM_PREDEF_TYPE_IDS;
|
2010-07-21 02:37:49 +04:00
|
|
|
assert(Index < TypesLoaded.size() && "Type index out-of-range");
|
2010-07-15 00:26:45 +04:00
|
|
|
if (TypesLoaded[Index].isNull()) {
|
2011-07-22 04:38:23 +04:00
|
|
|
TypesLoaded[Index] = readTypeRecord(Index);
|
2010-10-05 22:37:06 +04:00
|
|
|
if (TypesLoaded[Index].isNull())
|
|
|
|
return QualType();
|
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
TypesLoaded[Index]->setFromAST();
|
2010-07-15 03:45:08 +04:00
|
|
|
if (DeserializationListener)
|
2010-08-20 20:03:59 +04:00
|
|
|
DeserializationListener->TypeRead(TypeIdx::fromTypeID(ID),
|
2010-07-16 20:36:56 +04:00
|
|
|
TypesLoaded[Index]);
|
2010-07-15 00:26:45 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-09-24 23:53:00 +04:00
|
|
|
return TypesLoaded[Index].withFastQualifiers(FastQuals);
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
QualType ASTReader::getLocalType(Module &F, unsigned LocalID) {
|
2011-07-22 04:38:23 +04:00
|
|
|
return GetType(getGlobalTypeID(F, LocalID));
|
|
|
|
}
|
|
|
|
|
|
|
|
serialization::TypeID
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::getGlobalTypeID(Module &F, unsigned LocalID) const {
|
2011-08-02 20:26:37 +04:00
|
|
|
unsigned FastQuals = LocalID & Qualifiers::FastMask;
|
|
|
|
unsigned LocalIndex = LocalID >> Qualifiers::FastWidth;
|
|
|
|
|
|
|
|
if (LocalIndex < NUM_PREDEF_TYPE_IDS)
|
|
|
|
return LocalID;
|
|
|
|
|
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::iterator I
|
|
|
|
= F.TypeRemap.find(LocalIndex - NUM_PREDEF_TYPE_IDS);
|
|
|
|
assert(I != F.TypeRemap.end() && "Invalid index into type index remap");
|
|
|
|
|
|
|
|
unsigned GlobalIndex = LocalIndex + I->second;
|
|
|
|
return (GlobalIndex << Qualifiers::FastWidth) | FastQuals;
|
|
|
|
}
|
|
|
|
|
2009-10-29 11:12:44 +03:00
|
|
|
TemplateArgumentLocInfo
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::GetTemplateArgumentLocInfo(Module &F,
|
2010-10-05 19:59:54 +04:00
|
|
|
TemplateArgument::ArgKind Kind,
|
2009-10-29 11:12:44 +03:00
|
|
|
const RecordData &Record,
|
2010-06-29 02:28:35 +04:00
|
|
|
unsigned &Index) {
|
2009-10-29 11:12:44 +03:00
|
|
|
switch (Kind) {
|
|
|
|
case TemplateArgument::Expression:
|
2010-10-05 19:59:54 +04:00
|
|
|
return ReadExpr(F);
|
2009-10-29 11:12:44 +03:00
|
|
|
case TemplateArgument::Type:
|
2010-10-05 19:59:54 +04:00
|
|
|
return GetTypeSourceInfo(F, Record, Index);
|
2009-11-11 04:00:40 +03:00
|
|
|
case TemplateArgument::Template: {
|
2011-03-02 20:09:35 +03:00
|
|
|
NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
|
|
|
|
Index);
|
2011-01-05 21:58:31 +03:00
|
|
|
SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
|
2011-03-02 20:09:35 +03:00
|
|
|
return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
|
2011-01-05 21:58:31 +03:00
|
|
|
SourceLocation());
|
|
|
|
}
|
|
|
|
case TemplateArgument::TemplateExpansion: {
|
2011-03-02 20:09:35 +03:00
|
|
|
NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
|
|
|
|
Index);
|
2010-10-05 19:59:54 +04:00
|
|
|
SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
|
2011-01-05 20:40:24 +03:00
|
|
|
SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Index);
|
2011-03-02 20:09:35 +03:00
|
|
|
return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
|
2011-01-05 20:40:24 +03:00
|
|
|
EllipsisLoc);
|
2009-11-11 04:00:40 +03:00
|
|
|
}
|
2009-10-29 11:12:44 +03:00
|
|
|
case TemplateArgument::Null:
|
|
|
|
case TemplateArgument::Integral:
|
|
|
|
case TemplateArgument::Declaration:
|
|
|
|
case TemplateArgument::Pack:
|
|
|
|
return TemplateArgumentLocInfo();
|
|
|
|
}
|
2009-12-12 08:05:38 +03:00
|
|
|
llvm_unreachable("unexpected template argument loc");
|
2009-10-29 11:12:44 +03:00
|
|
|
return TemplateArgumentLocInfo();
|
|
|
|
}
|
|
|
|
|
2010-06-22 13:54:59 +04:00
|
|
|
TemplateArgumentLoc
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadTemplateArgumentLoc(Module &F,
|
2010-07-23 02:43:28 +04:00
|
|
|
const RecordData &Record, unsigned &Index) {
|
2010-10-05 19:59:54 +04:00
|
|
|
TemplateArgument Arg = ReadTemplateArgument(F, Record, Index);
|
2010-06-28 13:31:42 +04:00
|
|
|
|
|
|
|
if (Arg.getKind() == TemplateArgument::Expression) {
|
|
|
|
if (Record[Index++]) // bool InfoHasSameExpr.
|
|
|
|
return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo(Arg.getAsExpr()));
|
|
|
|
}
|
2010-10-05 19:59:54 +04:00
|
|
|
return TemplateArgumentLoc(Arg, GetTemplateArgumentLocInfo(F, Arg.getKind(),
|
2010-06-29 02:28:35 +04:00
|
|
|
Record, Index));
|
2010-06-22 13:54:59 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
Decl *ASTReader::GetExternalDecl(uint32_t ID) {
|
2010-06-01 13:23:16 +04:00
|
|
|
return GetDecl(ID);
|
|
|
|
}
|
|
|
|
|
2011-08-04 04:01:48 +04:00
|
|
|
uint64_t ASTReader::readCXXBaseSpecifiers(Module &M, const RecordData &Record,
|
|
|
|
unsigned &Idx){
|
|
|
|
if (Idx >= Record.size())
|
2010-10-30 02:39:52 +04:00
|
|
|
return 0;
|
|
|
|
|
2011-08-04 04:01:48 +04:00
|
|
|
unsigned LocalID = Record[Idx++];
|
|
|
|
return getGlobalBitOffset(M, M.CXXBaseSpecifiersOffsets[LocalID - 1]);
|
2010-10-30 02:39:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
RecordLocation Loc = getLocalBitOffset(Offset);
|
|
|
|
llvm::BitstreamCursor &Cursor = Loc.F->DeclsCursor;
|
2010-10-30 02:39:52 +04:00
|
|
|
SavedStreamPosition SavedPosition(Cursor);
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
Cursor.JumpToBit(Loc.Offset);
|
2010-10-30 02:39:52 +04:00
|
|
|
ReadingKindTracker ReadingKind(Read_Decl, *this);
|
|
|
|
RecordData Record;
|
|
|
|
unsigned Code = Cursor.ReadCode();
|
|
|
|
unsigned RecCode = Cursor.ReadRecord(Code, Record);
|
|
|
|
if (RecCode != DECL_CXX_BASE_SPECIFIERS) {
|
|
|
|
Error("Malformed AST file: missing C++ base specifiers");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Idx = 0;
|
|
|
|
unsigned NumBases = Record[Idx++];
|
|
|
|
void *Mem = Context->Allocate(sizeof(CXXBaseSpecifier) * NumBases);
|
|
|
|
CXXBaseSpecifier *Bases = new (Mem) CXXBaseSpecifier [NumBases];
|
|
|
|
for (unsigned I = 0; I != NumBases; ++I)
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
Bases[I] = ReadCXXBaseSpecifier(*Loc.F, Record, Idx);
|
2010-10-30 02:39:52 +04:00
|
|
|
return Bases;
|
|
|
|
}
|
|
|
|
|
2011-07-22 02:35:25 +04:00
|
|
|
serialization::DeclID
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::getGlobalDeclID(Module &F, unsigned LocalID) const {
|
2011-08-03 20:05:40 +04:00
|
|
|
if (LocalID < NUM_PREDEF_DECL_IDS)
|
2011-08-03 19:48:04 +04:00
|
|
|
return LocalID;
|
|
|
|
|
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::iterator I
|
2011-08-03 20:05:40 +04:00
|
|
|
= F.DeclRemap.find(LocalID - NUM_PREDEF_DECL_IDS);
|
2011-08-03 19:48:04 +04:00
|
|
|
assert(I != F.DeclRemap.end() && "Invalid index into decl index remap");
|
|
|
|
|
|
|
|
return LocalID + I->second;
|
2011-07-22 02:35:25 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:57:32 +04:00
|
|
|
Decl *ASTReader::GetDecl(DeclID ID) {
|
2011-08-03 20:05:40 +04:00
|
|
|
if (ID < NUM_PREDEF_DECL_IDS) {
|
|
|
|
switch ((PredefinedDeclIDs)ID) {
|
2011-08-12 04:15:20 +04:00
|
|
|
case PREDEF_DECL_NULL_ID:
|
2011-08-03 20:05:40 +04:00
|
|
|
return 0;
|
2011-08-12 04:15:20 +04:00
|
|
|
|
|
|
|
case PREDEF_DECL_TRANSLATION_UNIT_ID:
|
|
|
|
assert(Context && "No context available?");
|
|
|
|
return Context->getTranslationUnitDecl();
|
2011-08-12 09:46:01 +04:00
|
|
|
|
|
|
|
case PREDEF_DECL_OBJC_ID_ID:
|
|
|
|
assert(Context && "No context available?");
|
|
|
|
return Context->getObjCIdDecl();
|
2011-08-12 09:59:41 +04:00
|
|
|
|
|
|
|
case PREDEF_DECL_OBJC_CLASS_ID:
|
|
|
|
assert(Context && "No context available?");
|
|
|
|
return Context->getObjCClassDecl();
|
2011-08-03 20:05:40 +04:00
|
|
|
}
|
|
|
|
|
2009-04-10 02:27:44 +04:00
|
|
|
return 0;
|
2011-08-03 20:05:40 +04:00
|
|
|
}
|
|
|
|
|
2011-08-12 04:15:20 +04:00
|
|
|
unsigned Index = ID - NUM_PREDEF_DECL_IDS;
|
|
|
|
|
|
|
|
if (Index > DeclsLoaded.size()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("declaration ID out-of-range for AST file");
|
2009-04-25 22:35:21 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2011-08-12 04:15:20 +04:00
|
|
|
|
|
|
|
if (!DeclsLoaded[Index]) {
|
2011-08-03 19:48:04 +04:00
|
|
|
ReadDeclRecord(ID);
|
2010-07-15 03:45:08 +04:00
|
|
|
if (DeserializationListener)
|
|
|
|
DeserializationListener->DeclRead(ID, DeclsLoaded[Index]);
|
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2009-04-25 22:35:21 +04:00
|
|
|
return DeclsLoaded[Index];
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
serialization::DeclID ASTReader::ReadDeclID(Module &F,
|
2011-07-22 02:35:25 +04:00
|
|
|
const RecordData &Record,
|
|
|
|
unsigned &Idx) {
|
|
|
|
if (Idx >= Record.size()) {
|
|
|
|
Error("Corrupted AST file");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return getGlobalDeclID(F, Record[Idx++]);
|
|
|
|
}
|
|
|
|
|
2009-04-27 09:46:25 +04:00
|
|
|
/// \brief Resolve the offset of a statement into a statement.
|
|
|
|
///
|
|
|
|
/// This operation will read a new statement from the external
|
|
|
|
/// source each time it is called, and is meant to be used via a
|
|
|
|
/// LazyOffsetPtr (which is used by Decls for the body of functions, etc).
|
2010-08-19 03:56:43 +04:00
|
|
|
Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
|
2010-10-28 13:29:32 +04:00
|
|
|
// Switch case IDs are per Decl.
|
|
|
|
ClearSwitchCaseIDs();
|
|
|
|
|
2010-07-22 21:01:13 +04:00
|
|
|
// Offset here is a global offset across the entire chain.
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
RecordLocation Loc = getLocalBitOffset(Offset);
|
|
|
|
Loc.F->DeclsCursor.JumpToBit(Loc.Offset);
|
|
|
|
return ReadStmtFromStream(*Loc.F);
|
2009-04-18 04:07:54 +04:00
|
|
|
}
|
|
|
|
|
2011-07-16 01:46:17 +04:00
|
|
|
ExternalLoadResult ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
|
2010-10-15 00:14:34 +04:00
|
|
|
bool (*isKindWeWant)(Decl::Kind),
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVectorImpl<Decl*> &Decls) {
|
2010-07-22 21:01:13 +04:00
|
|
|
// There might be lexical decls in multiple parts of the chain, for the TU
|
|
|
|
// at least.
|
2010-09-28 06:24:44 +04:00
|
|
|
// DeclContextOffsets might reallocate as we load additional decls below,
|
|
|
|
// so make a copy of the vector.
|
|
|
|
DeclContextInfos Infos = DeclContextOffsets[DC];
|
2010-07-22 21:01:13 +04:00
|
|
|
for (DeclContextInfos::iterator I = Infos.begin(), E = Infos.end();
|
|
|
|
I != E; ++I) {
|
2010-07-27 04:17:23 +04:00
|
|
|
// IDs can be 0 if this context doesn't contain declarations.
|
|
|
|
if (!I->LexicalDecls)
|
2010-07-22 21:01:13 +04:00
|
|
|
continue;
|
2009-04-15 01:18:50 +04:00
|
|
|
|
2010-07-22 21:01:13 +04:00
|
|
|
// Load all of the declaration IDs
|
2010-10-15 00:14:34 +04:00
|
|
|
for (const KindDeclIDPair *ID = I->LexicalDecls,
|
|
|
|
*IDE = ID + I->NumLexicalDecls; ID != IDE; ++ID) {
|
|
|
|
if (isKindWeWant && !isKindWeWant((Decl::Kind)ID->first))
|
|
|
|
continue;
|
2011-07-22 02:35:25 +04:00
|
|
|
|
2011-07-22 03:29:11 +04:00
|
|
|
Decl *D = GetLocalDecl(*I->F, ID->second);
|
2010-09-28 06:24:44 +04:00
|
|
|
assert(D && "Null decl in lexical decls");
|
|
|
|
Decls.push_back(D);
|
|
|
|
}
|
2010-03-18 03:56:54 +03:00
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2009-04-23 02:34:57 +04:00
|
|
|
++NumLexicalDeclContextsRead;
|
2011-07-16 01:46:17 +04:00
|
|
|
return ELR_Success;
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-06-01 13:23:16 +04:00
|
|
|
DeclContext::lookup_result
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
|
2010-06-01 13:23:16 +04:00
|
|
|
DeclarationName Name) {
|
2009-09-09 19:08:12 +04:00
|
|
|
assert(DC->hasExternalVisibleStorage() &&
|
2009-04-10 02:27:44 +04:00
|
|
|
"DeclContext has no visible decls in storage");
|
2010-08-20 20:04:35 +04:00
|
|
|
if (!Name)
|
|
|
|
return DeclContext::lookup_result(DeclContext::lookup_iterator(0),
|
|
|
|
DeclContext::lookup_iterator(0));
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<NamedDecl *, 64> Decls;
|
2010-08-24 04:49:55 +04:00
|
|
|
// There might be visible decls in multiple parts of the chain, for the TU
|
2010-08-24 04:50:16 +04:00
|
|
|
// and namespaces. For any given name, the last available results replace
|
|
|
|
// all earlier ones. For this reason, we walk in reverse.
|
2011-08-12 03:26:42 +04:00
|
|
|
// Copy the DeclContextInfos vector instead of using a reference to the
|
|
|
|
// vector stored in the map, because DeclContextOffsets can change while
|
|
|
|
// we load declarations with GetLocalDeclAs.
|
|
|
|
DeclContextInfos Infos = DeclContextOffsets[DC];
|
2010-08-24 04:50:16 +04:00
|
|
|
for (DeclContextInfos::reverse_iterator I = Infos.rbegin(), E = Infos.rend();
|
2010-07-22 21:01:13 +04:00
|
|
|
I != E; ++I) {
|
2010-08-20 20:04:35 +04:00
|
|
|
if (!I->NameLookupTableData)
|
2010-07-22 21:01:13 +04:00
|
|
|
continue;
|
|
|
|
|
2010-08-20 20:04:35 +04:00
|
|
|
ASTDeclContextNameLookupTable *LookupTable =
|
|
|
|
(ASTDeclContextNameLookupTable*)I->NameLookupTableData;
|
|
|
|
ASTDeclContextNameLookupTable::iterator Pos = LookupTable->find(Name);
|
|
|
|
if (Pos == LookupTable->end())
|
2010-07-22 21:01:13 +04:00
|
|
|
continue;
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2010-08-20 20:04:35 +04:00
|
|
|
ASTDeclContextNameLookupTrait::data_type Data = *Pos;
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
for (; Data.first != Data.second; ++Data.first) {
|
|
|
|
NamedDecl *ND = GetLocalDeclAs<NamedDecl>(*I->F, *Data.first);
|
|
|
|
if (!ND)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (ND->getDeclName() != Name) {
|
|
|
|
assert(!Name.getCXXNameType().isNull() &&
|
|
|
|
"Name mismatch without a type");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Decls.push_back(ND);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we rejected all of the declarations we found, e.g., because the
|
|
|
|
// name didn't actually match, continue looking through DeclContexts.
|
|
|
|
if (Decls.empty())
|
|
|
|
continue;
|
|
|
|
|
2010-08-24 04:50:16 +04:00
|
|
|
break;
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2009-04-23 02:34:57 +04:00
|
|
|
++NumVisibleDeclContextsRead;
|
2010-06-01 13:23:16 +04:00
|
|
|
|
2010-08-20 20:04:35 +04:00
|
|
|
SetExternalVisibleDeclsForName(DC, Name, Decls);
|
2010-06-01 13:23:16 +04:00
|
|
|
return const_cast<DeclContext*>(DC)->lookup(Name);
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2010-08-21 03:35:55 +04:00
|
|
|
void ASTReader::MaterializeVisibleDecls(const DeclContext *DC) {
|
|
|
|
assert(DC->hasExternalVisibleStorage() &&
|
|
|
|
"DeclContext has no visible decls in storage");
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<NamedDecl *, 64> Decls;
|
2010-08-21 03:35:55 +04:00
|
|
|
// There might be visible decls in multiple parts of the chain, for the TU
|
|
|
|
// and namespaces.
|
|
|
|
DeclContextInfos &Infos = DeclContextOffsets[DC];
|
|
|
|
for (DeclContextInfos::iterator I = Infos.begin(), E = Infos.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
if (!I->NameLookupTableData)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ASTDeclContextNameLookupTable *LookupTable =
|
|
|
|
(ASTDeclContextNameLookupTable*)I->NameLookupTableData;
|
|
|
|
for (ASTDeclContextNameLookupTable::item_iterator
|
|
|
|
ItemI = LookupTable->item_begin(),
|
|
|
|
ItemEnd = LookupTable->item_end() ; ItemI != ItemEnd; ++ItemI) {
|
|
|
|
ASTDeclContextNameLookupTable::item_iterator::value_type Val
|
|
|
|
= *ItemI;
|
|
|
|
ASTDeclContextNameLookupTrait::data_type Data = Val.second;
|
|
|
|
Decls.clear();
|
|
|
|
for (; Data.first != Data.second; ++Data.first)
|
2011-07-22 03:29:11 +04:00
|
|
|
Decls.push_back(GetLocalDeclAs<NamedDecl>(*I->F, *Data.first));
|
2010-08-21 03:35:55 +04:00
|
|
|
MaterializeVisibleDeclsForName(DC, Val.first, Decls);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::PassInterestingDeclsToConsumer() {
|
2010-07-07 19:46:26 +04:00
|
|
|
assert(Consumer);
|
|
|
|
while (!InterestingDecls.empty()) {
|
|
|
|
DeclGroupRef DG(InterestingDecls.front());
|
|
|
|
InterestingDecls.pop_front();
|
2010-08-11 22:52:41 +04:00
|
|
|
Consumer->HandleInterestingDecl(DG);
|
2010-07-07 19:46:26 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::StartTranslationUnit(ASTConsumer *Consumer) {
|
2009-04-22 23:09:20 +04:00
|
|
|
this->Consumer = Consumer;
|
|
|
|
|
2009-04-14 04:24:19 +04:00
|
|
|
if (!Consumer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (unsigned I = 0, N = ExternalDefinitions.size(); I != N; ++I) {
|
2010-07-07 19:46:26 +04:00
|
|
|
// Force deserialization of this decl, which will cause it to be queued for
|
|
|
|
// passing to the consumer.
|
2009-09-17 07:06:44 +04:00
|
|
|
GetDecl(ExternalDefinitions[I]);
|
2009-04-14 04:24:19 +04:00
|
|
|
}
|
2009-04-25 04:41:30 +04:00
|
|
|
|
2010-07-07 19:46:26 +04:00
|
|
|
PassInterestingDeclsToConsumer();
|
2009-04-14 04:24:19 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::PrintStats() {
|
2010-08-19 03:57:06 +04:00
|
|
|
std::fprintf(stderr, "*** AST File Statistics:\n");
|
2009-04-10 02:27:44 +04:00
|
|
|
|
2009-09-09 19:08:12 +04:00
|
|
|
unsigned NumTypesLoaded
|
2009-04-25 23:10:14 +04:00
|
|
|
= TypesLoaded.size() - std::count(TypesLoaded.begin(), TypesLoaded.end(),
|
2009-09-24 23:53:00 +04:00
|
|
|
QualType());
|
2009-04-25 23:10:14 +04:00
|
|
|
unsigned NumDeclsLoaded
|
|
|
|
= DeclsLoaded.size() - std::count(DeclsLoaded.begin(), DeclsLoaded.end(),
|
|
|
|
(Decl *)0);
|
|
|
|
unsigned NumIdentifiersLoaded
|
|
|
|
= IdentifiersLoaded.size() - std::count(IdentifiersLoaded.begin(),
|
|
|
|
IdentifiersLoaded.end(),
|
|
|
|
(IdentifierInfo *)0);
|
2009-09-09 19:08:12 +04:00
|
|
|
unsigned NumSelectorsLoaded
|
2009-04-25 23:10:14 +04:00
|
|
|
= SelectorsLoaded.size() - std::count(SelectorsLoaded.begin(),
|
|
|
|
SelectorsLoaded.end(),
|
|
|
|
Selector());
|
2009-04-25 21:48:32 +04:00
|
|
|
|
2009-04-27 22:38:38 +04:00
|
|
|
std::fprintf(stderr, " %u stat cache hits\n", NumStatHits);
|
|
|
|
std::fprintf(stderr, " %u stat cache misses\n", NumStatMisses);
|
2011-07-21 22:46:38 +04:00
|
|
|
if (unsigned TotalNumSLocEntries = getTotalNumSLocs())
|
2009-04-27 10:38:32 +04:00
|
|
|
std::fprintf(stderr, " %u/%u source location entries read (%f%%)\n",
|
|
|
|
NumSLocEntriesRead, TotalNumSLocEntries,
|
|
|
|
((float)NumSLocEntriesRead/TotalNumSLocEntries * 100));
|
2009-04-25 22:35:21 +04:00
|
|
|
if (!TypesLoaded.empty())
|
2009-04-25 21:48:32 +04:00
|
|
|
std::fprintf(stderr, " %u/%u types read (%f%%)\n",
|
2009-04-25 22:35:21 +04:00
|
|
|
NumTypesLoaded, (unsigned)TypesLoaded.size(),
|
|
|
|
((float)NumTypesLoaded/TypesLoaded.size() * 100));
|
|
|
|
if (!DeclsLoaded.empty())
|
2009-04-25 21:48:32 +04:00
|
|
|
std::fprintf(stderr, " %u/%u declarations read (%f%%)\n",
|
2009-04-25 22:35:21 +04:00
|
|
|
NumDeclsLoaded, (unsigned)DeclsLoaded.size(),
|
|
|
|
((float)NumDeclsLoaded/DeclsLoaded.size() * 100));
|
2009-04-25 23:10:14 +04:00
|
|
|
if (!IdentifiersLoaded.empty())
|
2009-04-25 21:48:32 +04:00
|
|
|
std::fprintf(stderr, " %u/%u identifiers read (%f%%)\n",
|
2009-04-25 23:10:14 +04:00
|
|
|
NumIdentifiersLoaded, (unsigned)IdentifiersLoaded.size(),
|
|
|
|
((float)NumIdentifiersLoaded/IdentifiersLoaded.size() * 100));
|
2010-08-05 00:40:17 +04:00
|
|
|
if (!SelectorsLoaded.empty())
|
2009-04-25 21:48:32 +04:00
|
|
|
std::fprintf(stderr, " %u/%u selectors read (%f%%)\n",
|
2010-08-05 00:40:17 +04:00
|
|
|
NumSelectorsLoaded, (unsigned)SelectorsLoaded.size(),
|
|
|
|
((float)NumSelectorsLoaded/SelectorsLoaded.size() * 100));
|
2009-04-25 21:48:32 +04:00
|
|
|
if (TotalNumStatements)
|
|
|
|
std::fprintf(stderr, " %u/%u statements read (%f%%)\n",
|
|
|
|
NumStatementsRead, TotalNumStatements,
|
|
|
|
((float)NumStatementsRead/TotalNumStatements * 100));
|
|
|
|
if (TotalNumMacros)
|
|
|
|
std::fprintf(stderr, " %u/%u macros read (%f%%)\n",
|
|
|
|
NumMacrosRead, TotalNumMacros,
|
|
|
|
((float)NumMacrosRead/TotalNumMacros * 100));
|
|
|
|
if (TotalLexicalDeclContexts)
|
|
|
|
std::fprintf(stderr, " %u/%u lexical declcontexts read (%f%%)\n",
|
|
|
|
NumLexicalDeclContextsRead, TotalLexicalDeclContexts,
|
|
|
|
((float)NumLexicalDeclContextsRead/TotalLexicalDeclContexts
|
|
|
|
* 100));
|
|
|
|
if (TotalVisibleDeclContexts)
|
|
|
|
std::fprintf(stderr, " %u/%u visible declcontexts read (%f%%)\n",
|
|
|
|
NumVisibleDeclContextsRead, TotalVisibleDeclContexts,
|
|
|
|
((float)NumVisibleDeclContextsRead/TotalVisibleDeclContexts
|
|
|
|
* 100));
|
2010-08-05 01:22:45 +04:00
|
|
|
if (TotalNumMethodPoolEntries) {
|
2009-04-25 21:48:32 +04:00
|
|
|
std::fprintf(stderr, " %u/%u method pool entries read (%f%%)\n",
|
2010-08-05 01:22:45 +04:00
|
|
|
NumMethodPoolEntriesRead, TotalNumMethodPoolEntries,
|
|
|
|
((float)NumMethodPoolEntriesRead/TotalNumMethodPoolEntries
|
2009-04-25 21:48:32 +04:00
|
|
|
* 100));
|
2010-08-05 01:22:45 +04:00
|
|
|
std::fprintf(stderr, " %u method pool misses\n", NumMethodPoolMisses);
|
2009-04-25 21:48:32 +04:00
|
|
|
}
|
2009-04-10 02:27:44 +04:00
|
|
|
std::fprintf(stderr, "\n");
|
2011-07-21 23:50:14 +04:00
|
|
|
dump();
|
|
|
|
std::fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
template<typename Key, typename Module, unsigned InitialCapacity>
|
2011-07-21 23:50:14 +04:00
|
|
|
static void
|
2011-07-23 14:55:15 +04:00
|
|
|
dumpModuleIDMap(StringRef Name,
|
2011-07-22 20:00:58 +04:00
|
|
|
const ContinuousRangeMap<Key, Module *,
|
2011-07-21 23:50:14 +04:00
|
|
|
InitialCapacity> &Map) {
|
|
|
|
if (Map.begin() == Map.end())
|
|
|
|
return;
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
typedef ContinuousRangeMap<Key, Module *, InitialCapacity> MapType;
|
2011-07-21 23:50:14 +04:00
|
|
|
llvm::errs() << Name << ":\n";
|
|
|
|
for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
|
|
|
|
I != IEnd; ++I) {
|
|
|
|
llvm::errs() << " " << I->first << " -> " << I->second->FileName
|
|
|
|
<< "\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ASTReader::dump() {
|
2011-08-02 15:12:41 +04:00
|
|
|
llvm::errs() << "*** PCH/Module Remappings:\n";
|
Introduce a global bit-offset continuous range map into the ASTReader,
so that we have one, simple way to map from global bit offsets to
local bit offsets. Eliminates a number of loops over the chain, and
generalizes for more interesting bit remappings.
Also, as an amusing oddity, we were computing global bit offsets
*backwards* for preprocessed entities (e.g., the directly included PCH
file in the chain would start at offset zero, rather than the original
PCH that occurs first in translation unit). Even more amusingly, it
made precompiled preambles work, because we were forgetting to adjust
the local bit offset to a global bit offset when storing preprocessed
entity offsets in the ASTUnit. Two wrongs made a right, and now
they're both right.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135750 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-22 10:10:01 +04:00
|
|
|
dumpModuleIDMap("Global bit offset map", GlobalBitOffsetsMap);
|
2011-07-21 23:50:14 +04:00
|
|
|
dumpModuleIDMap("Global source location entry map", GlobalSLocEntryMap);
|
2011-07-29 04:21:44 +04:00
|
|
|
dumpModuleIDMap("Global type map", GlobalTypeMap);
|
2011-07-29 04:56:45 +04:00
|
|
|
dumpModuleIDMap("Global declaration map", GlobalDeclMap);
|
|
|
|
dumpModuleIDMap("Global identifier map", GlobalIdentifierMap);
|
|
|
|
dumpModuleIDMap("Global selector map", GlobalSelectorMap);
|
|
|
|
dumpModuleIDMap("Global macro definition map", GlobalMacroDefinitionMap);
|
|
|
|
dumpModuleIDMap("Global preprocessed entity map",
|
|
|
|
GlobalPreprocessedEntityMap);
|
2011-08-02 15:12:41 +04:00
|
|
|
|
|
|
|
llvm::errs() << "\n*** PCH/Modules Loaded:";
|
|
|
|
for (ModuleManager::ModuleConstIterator M = ModuleMgr.begin(),
|
|
|
|
MEnd = ModuleMgr.end();
|
|
|
|
M != MEnd; ++M)
|
|
|
|
(*M)->dump();
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2011-04-29 03:46:20 +04:00
|
|
|
/// Return the amount of memory used by memory buffers, breaking down
|
|
|
|
/// by heap-backed versus mmap'ed memory.
|
|
|
|
void ASTReader::getMemoryBufferSizes(MemoryBufferSizes &sizes) const {
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleConstIterator I = ModuleMgr.begin(),
|
|
|
|
E = ModuleMgr.end(); I != E; ++I) {
|
|
|
|
if (llvm::MemoryBuffer *buf = (*I)->Buffer.get()) {
|
2011-04-29 03:46:20 +04:00
|
|
|
size_t bytes = buf->getBufferSize();
|
|
|
|
switch (buf->getBufferKind()) {
|
|
|
|
case llvm::MemoryBuffer::MemoryBuffer_Malloc:
|
|
|
|
sizes.malloc_bytes += bytes;
|
|
|
|
break;
|
|
|
|
case llvm::MemoryBuffer::MemoryBuffer_MMap:
|
|
|
|
sizes.mmap_bytes += bytes;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-07-26 00:32:21 +04:00
|
|
|
}
|
2011-04-29 03:46:20 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::InitializeSema(Sema &S) {
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
SemaObj = &S;
|
2009-04-25 01:10:55 +04:00
|
|
|
S.ExternalSource = this;
|
|
|
|
|
2009-04-23 01:15:06 +04:00
|
|
|
// Makes sure any declarations that were deserialized "too early"
|
|
|
|
// still get added to the identifier's declaration chains.
|
2010-09-25 03:29:12 +04:00
|
|
|
for (unsigned I = 0, N = PreloadedDecls.size(); I != N; ++I) {
|
|
|
|
if (SemaObj->TUScope)
|
2010-08-21 13:40:31 +04:00
|
|
|
SemaObj->TUScope->AddDecl(PreloadedDecls[I]);
|
2010-09-25 03:29:12 +04:00
|
|
|
|
|
|
|
SemaObj->IdResolver.AddDecl(PreloadedDecls[I]);
|
2009-04-23 01:15:06 +04:00
|
|
|
}
|
|
|
|
PreloadedDecls.clear();
|
2009-04-23 02:02:47 +04:00
|
|
|
|
2010-08-02 11:14:54 +04:00
|
|
|
// Load the offsets of the declarations that Sema references.
|
|
|
|
// They will be lazily deserialized when needed.
|
|
|
|
if (!SemaDeclRefs.empty()) {
|
|
|
|
assert(SemaDeclRefs.size() == 2 && "More decl refs than expected!");
|
2011-07-28 04:57:24 +04:00
|
|
|
if (!SemaObj->StdNamespace)
|
|
|
|
SemaObj->StdNamespace = SemaDeclRefs[0];
|
|
|
|
if (!SemaObj->StdBadAlloc)
|
|
|
|
SemaObj->StdBadAlloc = SemaDeclRefs[1];
|
2010-08-02 11:14:54 +04:00
|
|
|
}
|
|
|
|
|
2011-02-15 22:46:30 +03:00
|
|
|
if (!FPPragmaOptions.empty()) {
|
|
|
|
assert(FPPragmaOptions.size() == 1 && "Wrong number of FP_PRAGMA_OPTIONS");
|
|
|
|
SemaObj->FPFeatures.fp_contract = FPPragmaOptions[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!OpenCLExtensions.empty()) {
|
|
|
|
unsigned I = 0;
|
|
|
|
#define OPENCLEXT(nm) SemaObj->OpenCLFeatures.nm = OpenCLExtensions[I++];
|
|
|
|
#include "clang/Basic/OpenCLExtensions.def"
|
|
|
|
|
|
|
|
assert(OpenCLExtensions.size() == I && "Wrong number of OPENCL_EXTENSIONS");
|
|
|
|
}
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
IdentifierInfo* ASTReader::get(const char *NameStart, const char *NameEnd) {
|
2010-08-02 22:30:12 +04:00
|
|
|
// Try to find this name within our on-disk hash tables. We start with the
|
|
|
|
// most recent one, since that one contains the most up-to-date info.
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
|
2010-08-19 03:57:06 +04:00
|
|
|
ASTIdentifierLookupTable *IdTable
|
2011-07-26 00:32:21 +04:00
|
|
|
= (ASTIdentifierLookupTable *)(*I)->IdentifierLookupTable;
|
2010-07-22 21:01:13 +04:00
|
|
|
if (!IdTable)
|
|
|
|
continue;
|
2010-07-22 02:31:37 +04:00
|
|
|
std::pair<const char*, unsigned> Key(NameStart, NameEnd - NameStart);
|
2010-08-19 03:57:06 +04:00
|
|
|
ASTIdentifierLookupTable::iterator Pos = IdTable->find(Key);
|
2010-07-22 02:31:37 +04:00
|
|
|
if (Pos == IdTable->end())
|
|
|
|
continue;
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
|
2010-07-22 02:31:37 +04:00
|
|
|
// Dereferencing the iterator has the effect of building the
|
|
|
|
// IdentifierInfo node and populating it with the various
|
|
|
|
// declarations it needs.
|
2010-08-02 22:30:12 +04:00
|
|
|
return *Pos;
|
2010-07-22 02:31:37 +04:00
|
|
|
}
|
2010-08-02 22:30:12 +04:00
|
|
|
return 0;
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
}
|
|
|
|
|
2010-10-15 02:11:03 +04:00
|
|
|
namespace clang {
|
|
|
|
/// \brief An identifier-lookup iterator that enumerates all of the
|
|
|
|
/// identifiers stored within a set of AST files.
|
|
|
|
class ASTIdentifierIterator : public IdentifierIterator {
|
|
|
|
/// \brief The AST reader whose identifiers are being enumerated.
|
|
|
|
const ASTReader &Reader;
|
|
|
|
|
|
|
|
/// \brief The current index into the chain of AST files stored in
|
|
|
|
/// the AST reader.
|
|
|
|
unsigned Index;
|
|
|
|
|
|
|
|
/// \brief The current position within the identifier lookup table
|
|
|
|
/// of the current AST file.
|
|
|
|
ASTIdentifierLookupTable::key_iterator Current;
|
|
|
|
|
|
|
|
/// \brief The end position within the identifier lookup table of
|
|
|
|
/// the current AST file.
|
|
|
|
ASTIdentifierLookupTable::key_iterator End;
|
|
|
|
|
|
|
|
public:
|
|
|
|
explicit ASTIdentifierIterator(const ASTReader &Reader);
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
virtual StringRef Next();
|
2010-10-15 02:11:03 +04:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
ASTIdentifierIterator::ASTIdentifierIterator(const ASTReader &Reader)
|
2011-07-26 00:32:21 +04:00
|
|
|
: Reader(Reader), Index(Reader.ModuleMgr.size() - 1) {
|
2010-10-15 02:11:03 +04:00
|
|
|
ASTIdentifierLookupTable *IdTable
|
2011-07-26 00:32:21 +04:00
|
|
|
= (ASTIdentifierLookupTable *)Reader.ModuleMgr[Index].IdentifierLookupTable;
|
2010-10-15 02:11:03 +04:00
|
|
|
Current = IdTable->key_begin();
|
|
|
|
End = IdTable->key_end();
|
|
|
|
}
|
|
|
|
|
2011-07-23 14:55:15 +04:00
|
|
|
StringRef ASTIdentifierIterator::Next() {
|
2010-10-15 02:11:03 +04:00
|
|
|
while (Current == End) {
|
|
|
|
// If we have exhausted all of our AST files, we're done.
|
|
|
|
if (Index == 0)
|
2011-07-23 14:55:15 +04:00
|
|
|
return StringRef();
|
2010-10-15 02:11:03 +04:00
|
|
|
|
|
|
|
--Index;
|
|
|
|
ASTIdentifierLookupTable *IdTable
|
2011-07-26 00:32:21 +04:00
|
|
|
= (ASTIdentifierLookupTable *)Reader.ModuleMgr[Index].
|
|
|
|
IdentifierLookupTable;
|
2010-10-15 02:11:03 +04:00
|
|
|
Current = IdTable->key_begin();
|
|
|
|
End = IdTable->key_end();
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have any identifiers remaining in the current AST file; return
|
|
|
|
// the next one.
|
|
|
|
std::pair<const char*, unsigned> Key = *Current;
|
|
|
|
++Current;
|
2011-07-23 14:55:15 +04:00
|
|
|
return StringRef(Key.first, Key.second);
|
2010-10-15 02:11:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
IdentifierIterator *ASTReader::getIdentifiers() const {
|
|
|
|
return new ASTIdentifierIterator(*this);
|
|
|
|
}
|
|
|
|
|
2009-09-09 19:08:12 +04:00
|
|
|
std::pair<ObjCMethodList, ObjCMethodList>
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader::ReadMethodPool(Selector Sel) {
|
2010-08-05 00:40:17 +04:00
|
|
|
// Find this selector in a hash table. We want to find the most recent entry.
|
2011-07-26 00:32:21 +04:00
|
|
|
for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
|
|
|
|
Module &F = *(*I);
|
2010-08-05 00:40:17 +04:00
|
|
|
if (!F.SelectorLookupTable)
|
|
|
|
continue;
|
2009-04-25 01:10:55 +04:00
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
ASTSelectorLookupTable *PoolTable
|
|
|
|
= (ASTSelectorLookupTable*)F.SelectorLookupTable;
|
|
|
|
ASTSelectorLookupTable::iterator Pos = PoolTable->find(Sel);
|
2010-08-05 00:40:17 +04:00
|
|
|
if (Pos != PoolTable->end()) {
|
|
|
|
++NumSelectorsRead;
|
2010-08-05 01:22:45 +04:00
|
|
|
// FIXME: Not quite happy with the statistics here. We probably should
|
|
|
|
// disable this tracking when called via LoadSelector.
|
|
|
|
// Also, should entries without methods count as misses?
|
|
|
|
++NumMethodPoolEntriesRead;
|
2010-08-19 03:57:06 +04:00
|
|
|
ASTSelectorLookupTrait::data_type Data = *Pos;
|
2010-08-05 00:40:17 +04:00
|
|
|
if (DeserializationListener)
|
|
|
|
DeserializationListener->SelectorRead(Data.ID, Sel);
|
|
|
|
return std::make_pair(Data.Instance, Data.Factory);
|
|
|
|
}
|
2009-04-25 21:48:32 +04:00
|
|
|
}
|
2009-04-25 01:10:55 +04:00
|
|
|
|
2010-08-05 01:22:45 +04:00
|
|
|
++NumMethodPoolMisses;
|
2010-08-05 00:40:17 +04:00
|
|
|
return std::pair<ObjCMethodList, ObjCMethodList>();
|
2009-04-25 01:10:55 +04:00
|
|
|
}
|
|
|
|
|
2011-06-28 20:20:02 +04:00
|
|
|
void ASTReader::ReadKnownNamespaces(
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVectorImpl<NamespaceDecl *> &Namespaces) {
|
2011-06-28 20:20:02 +04:00
|
|
|
Namespaces.clear();
|
|
|
|
|
|
|
|
for (unsigned I = 0, N = KnownNamespaces.size(); I != N; ++I) {
|
|
|
|
if (NamespaceDecl *Namespace
|
|
|
|
= dyn_cast_or_null<NamespaceDecl>(GetDecl(KnownNamespaces[I])))
|
|
|
|
Namespaces.push_back(Namespace);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-28 00:58:46 +04:00
|
|
|
void ASTReader::ReadTentativeDefinitions(
|
|
|
|
SmallVectorImpl<VarDecl *> &TentativeDefs) {
|
|
|
|
for (unsigned I = 0, N = TentativeDefinitions.size(); I != N; ++I) {
|
|
|
|
VarDecl *Var = dyn_cast_or_null<VarDecl>(GetDecl(TentativeDefinitions[I]));
|
|
|
|
if (Var)
|
|
|
|
TentativeDefs.push_back(Var);
|
|
|
|
}
|
|
|
|
TentativeDefinitions.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 01:45:57 +04:00
|
|
|
void ASTReader::ReadUnusedFileScopedDecls(
|
|
|
|
SmallVectorImpl<const DeclaratorDecl *> &Decls) {
|
|
|
|
for (unsigned I = 0, N = UnusedFileScopedDecls.size(); I != N; ++I) {
|
|
|
|
DeclaratorDecl *D
|
|
|
|
= dyn_cast_or_null<DeclaratorDecl>(GetDecl(UnusedFileScopedDecls[I]));
|
|
|
|
if (D)
|
|
|
|
Decls.push_back(D);
|
|
|
|
}
|
|
|
|
UnusedFileScopedDecls.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 01:57:17 +04:00
|
|
|
void ASTReader::ReadDelegatingConstructors(
|
|
|
|
SmallVectorImpl<CXXConstructorDecl *> &Decls) {
|
|
|
|
for (unsigned I = 0, N = DelegatingCtorDecls.size(); I != N; ++I) {
|
|
|
|
CXXConstructorDecl *D
|
|
|
|
= dyn_cast_or_null<CXXConstructorDecl>(GetDecl(DelegatingCtorDecls[I]));
|
|
|
|
if (D)
|
|
|
|
Decls.push_back(D);
|
|
|
|
}
|
|
|
|
DelegatingCtorDecls.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 04:39:29 +04:00
|
|
|
void ASTReader::ReadExtVectorDecls(SmallVectorImpl<TypedefNameDecl *> &Decls) {
|
|
|
|
for (unsigned I = 0, N = ExtVectorDecls.size(); I != N; ++I) {
|
|
|
|
TypedefNameDecl *D
|
|
|
|
= dyn_cast_or_null<TypedefNameDecl>(GetDecl(ExtVectorDecls[I]));
|
|
|
|
if (D)
|
|
|
|
Decls.push_back(D);
|
|
|
|
}
|
|
|
|
ExtVectorDecls.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 04:53:40 +04:00
|
|
|
void ASTReader::ReadDynamicClasses(SmallVectorImpl<CXXRecordDecl *> &Decls) {
|
|
|
|
for (unsigned I = 0, N = DynamicClasses.size(); I != N; ++I) {
|
|
|
|
CXXRecordDecl *D
|
|
|
|
= dyn_cast_or_null<CXXRecordDecl>(GetDecl(DynamicClasses[I]));
|
|
|
|
if (D)
|
|
|
|
Decls.push_back(D);
|
|
|
|
}
|
|
|
|
DynamicClasses.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 18:20:37 +04:00
|
|
|
void
|
|
|
|
ASTReader::ReadLocallyScopedExternalDecls(SmallVectorImpl<NamedDecl *> &Decls) {
|
|
|
|
for (unsigned I = 0, N = LocallyScopedExternalDecls.size(); I != N; ++I) {
|
|
|
|
NamedDecl *D
|
|
|
|
= dyn_cast_or_null<NamedDecl>(GetDecl(LocallyScopedExternalDecls[I]));
|
|
|
|
if (D)
|
|
|
|
Decls.push_back(D);
|
|
|
|
}
|
|
|
|
LocallyScopedExternalDecls.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 18:54:22 +04:00
|
|
|
void ASTReader::ReadReferencedSelectors(
|
|
|
|
SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) {
|
|
|
|
if (ReferencedSelectorsData.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// If there are @selector references added them to its pool. This is for
|
|
|
|
// implementation of -Wselector.
|
|
|
|
unsigned int DataSize = ReferencedSelectorsData.size()-1;
|
|
|
|
unsigned I = 0;
|
|
|
|
while (I < DataSize) {
|
|
|
|
Selector Sel = DecodeSelector(ReferencedSelectorsData[I++]);
|
|
|
|
SourceLocation SelLoc
|
|
|
|
= SourceLocation::getFromRawEncoding(ReferencedSelectorsData[I++]);
|
|
|
|
Sels.push_back(std::make_pair(Sel, SelLoc));
|
|
|
|
}
|
|
|
|
ReferencedSelectorsData.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 22:09:57 +04:00
|
|
|
void ASTReader::ReadWeakUndeclaredIdentifiers(
|
|
|
|
SmallVectorImpl<std::pair<IdentifierInfo *, WeakInfo> > &WeakIDs) {
|
|
|
|
if (WeakUndeclaredIdentifiers.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (unsigned I = 0, N = WeakUndeclaredIdentifiers.size(); I < N; /*none*/) {
|
|
|
|
IdentifierInfo *WeakId
|
|
|
|
= DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
|
|
|
|
IdentifierInfo *AliasId
|
|
|
|
= DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
|
|
|
|
SourceLocation Loc
|
|
|
|
= SourceLocation::getFromRawEncoding(WeakUndeclaredIdentifiers[I++]);
|
|
|
|
bool Used = WeakUndeclaredIdentifiers[I++];
|
|
|
|
WeakInfo WI(AliasId, Loc);
|
|
|
|
WI.setUsed(Used);
|
|
|
|
WeakIDs.push_back(std::make_pair(WeakId, WI));
|
|
|
|
}
|
|
|
|
WeakUndeclaredIdentifiers.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 23:11:31 +04:00
|
|
|
void ASTReader::ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables) {
|
|
|
|
for (unsigned Idx = 0, N = VTableUses.size(); Idx < N; /* In loop */) {
|
|
|
|
ExternalVTableUse VT;
|
|
|
|
VT.Record = dyn_cast_or_null<CXXRecordDecl>(GetDecl(VTableUses[Idx++]));
|
|
|
|
VT.Location = SourceLocation::getFromRawEncoding(VTableUses[Idx++]);
|
|
|
|
VT.DefinitionRequired = VTableUses[Idx++];
|
|
|
|
VTables.push_back(VT);
|
|
|
|
}
|
|
|
|
|
|
|
|
VTableUses.clear();
|
|
|
|
}
|
|
|
|
|
2011-07-28 23:49:54 +04:00
|
|
|
void ASTReader::ReadPendingInstantiations(
|
|
|
|
SmallVectorImpl<std::pair<ValueDecl *, SourceLocation> > &Pending) {
|
|
|
|
for (unsigned Idx = 0, N = PendingInstantiations.size(); Idx < N;) {
|
|
|
|
ValueDecl *D = cast<ValueDecl>(GetDecl(PendingInstantiations[Idx++]));
|
|
|
|
SourceLocation Loc
|
|
|
|
= SourceLocation::getFromRawEncoding(PendingInstantiations[Idx++]);
|
|
|
|
Pending.push_back(std::make_pair(D, Loc));
|
|
|
|
}
|
|
|
|
PendingInstantiations.clear();
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::LoadSelector(Selector Sel) {
|
2010-08-04 22:21:41 +04:00
|
|
|
// It would be complicated to avoid reading the methods anyway. So don't.
|
|
|
|
ReadMethodPool(Sel);
|
|
|
|
}
|
|
|
|
|
2011-07-29 00:55:49 +04:00
|
|
|
void ASTReader::SetIdentifierInfo(IdentifierID ID, IdentifierInfo *II) {
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
assert(ID && "Non-zero identifier ID required");
|
2009-04-29 01:53:25 +04:00
|
|
|
assert(ID <= IdentifiersLoaded.size() && "identifier ID out of range");
|
2009-04-25 23:10:14 +04:00
|
|
|
IdentifiersLoaded[ID - 1] = II;
|
2010-07-24 03:49:55 +04:00
|
|
|
if (DeserializationListener)
|
|
|
|
DeserializationListener->IdentifierRead(ID, II);
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
}
|
|
|
|
|
2009-07-06 22:54:52 +04:00
|
|
|
/// \brief Set the globally-visible declarations associated with the given
|
|
|
|
/// identifier.
|
|
|
|
///
|
2010-08-19 03:57:06 +04:00
|
|
|
/// If the AST reader is currently in a state where the given declaration IDs
|
2009-09-09 19:08:12 +04:00
|
|
|
/// cannot safely be resolved, they are queued until it is safe to resolve
|
2009-07-06 22:54:52 +04:00
|
|
|
/// them.
|
|
|
|
///
|
|
|
|
/// \param II an IdentifierInfo that refers to one or more globally-visible
|
|
|
|
/// declarations.
|
|
|
|
///
|
|
|
|
/// \param DeclIDs the set of declaration IDs with the name @p II that are
|
|
|
|
/// visible at global scope.
|
|
|
|
///
|
|
|
|
/// \param Nonrecursive should be true to indicate that the caller knows that
|
|
|
|
/// this call is non-recursive, and therefore the globally-visible declarations
|
|
|
|
/// will not be placed onto the pending queue.
|
2009-09-09 19:08:12 +04:00
|
|
|
void
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
|
2011-07-23 14:55:15 +04:00
|
|
|
const SmallVectorImpl<uint32_t> &DeclIDs,
|
2009-07-06 22:54:52 +04:00
|
|
|
bool Nonrecursive) {
|
2010-07-30 14:03:16 +04:00
|
|
|
if (NumCurrentElementsDeserializing && !Nonrecursive) {
|
2009-07-06 22:54:52 +04:00
|
|
|
PendingIdentifierInfos.push_back(PendingIdentifierInfo());
|
|
|
|
PendingIdentifierInfo &PII = PendingIdentifierInfos.back();
|
|
|
|
PII.II = II;
|
2010-09-07 03:43:28 +04:00
|
|
|
PII.DeclIDs.append(DeclIDs.begin(), DeclIDs.end());
|
2009-07-06 22:54:52 +04:00
|
|
|
return;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-07-06 22:54:52 +04:00
|
|
|
for (unsigned I = 0, N = DeclIDs.size(); I != N; ++I) {
|
|
|
|
NamedDecl *D = cast<NamedDecl>(GetDecl(DeclIDs[I]));
|
|
|
|
if (SemaObj) {
|
2010-08-13 07:15:25 +04:00
|
|
|
if (SemaObj->TUScope) {
|
|
|
|
// Introduce this declaration into the translation-unit scope
|
|
|
|
// and add it to the declaration chain for this identifier, so
|
|
|
|
// that (unqualified) name lookup will find it.
|
2010-08-21 13:40:31 +04:00
|
|
|
SemaObj->TUScope->AddDecl(D);
|
2010-08-13 07:15:25 +04:00
|
|
|
}
|
2010-09-25 03:29:12 +04:00
|
|
|
SemaObj->IdResolver.AddDeclToIdentifierChain(II, D);
|
2009-07-06 22:54:52 +04:00
|
|
|
} else {
|
|
|
|
// Queue this declaration so that it will be added to the
|
|
|
|
// translation unit scope and identifier's declaration chain
|
|
|
|
// once a Sema object is known.
|
|
|
|
PreloadedDecls.push_back(D);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-29 00:55:49 +04:00
|
|
|
IdentifierInfo *ASTReader::DecodeIdentifierInfo(IdentifierID ID) {
|
2009-04-11 04:14:32 +04:00
|
|
|
if (ID == 0)
|
|
|
|
return 0;
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-07-21 04:46:22 +04:00
|
|
|
if (IdentifiersLoaded.empty()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("no identifier table in AST file");
|
2009-04-11 04:14:32 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2009-06-19 04:03:23 +04:00
|
|
|
assert(PP && "Forgot to set Preprocessor ?");
|
2010-07-21 04:46:22 +04:00
|
|
|
ID -= 1;
|
|
|
|
if (!IdentifiersLoaded[ID]) {
|
2011-07-20 04:59:32 +04:00
|
|
|
GlobalIdentifierMapType::iterator I = GlobalIdentifierMap.find(ID + 1);
|
|
|
|
assert(I != GlobalIdentifierMap.end() && "Corrupted global identifier map");
|
2011-07-29 04:56:45 +04:00
|
|
|
Module *M = I->second;
|
|
|
|
unsigned Index = ID - M->BaseIdentifierID;
|
|
|
|
const char *Str = M->IdentifierTableData + M->IdentifierOffsets[Index];
|
2009-04-26 01:04:17 +04:00
|
|
|
|
2010-08-19 03:57:06 +04:00
|
|
|
// All of the strings in the AST file are preceded by a 16-bit length.
|
|
|
|
// Extract that 16-bit length to avoid having to execute strlen().
|
2009-10-23 08:45:31 +04:00
|
|
|
// NOTE: 'StrLenPtr' is an 'unsigned char*' so that we load bytes as
|
|
|
|
// unsigned integers. This is important to avoid integer overflow when
|
|
|
|
// we cast them to 'unsigned'.
|
2009-10-23 07:57:22 +04:00
|
|
|
const unsigned char *StrLenPtr = (const unsigned char*) Str - 2;
|
2009-04-29 00:01:51 +04:00
|
|
|
unsigned StrLen = (((unsigned) StrLenPtr[0])
|
|
|
|
| (((unsigned) StrLenPtr[1]) << 8)) - 1;
|
2010-07-21 04:46:22 +04:00
|
|
|
IdentifiersLoaded[ID]
|
2011-07-23 14:55:15 +04:00
|
|
|
= &PP->getIdentifierTable().get(StringRef(Str, StrLen));
|
2010-07-24 03:49:55 +04:00
|
|
|
if (DeserializationListener)
|
|
|
|
DeserializationListener->IdentifierRead(ID + 1, IdentifiersLoaded[ID]);
|
2009-04-11 04:14:32 +04:00
|
|
|
}
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-07-21 04:46:22 +04:00
|
|
|
return IdentifiersLoaded[ID];
|
2009-04-10 02:27:44 +04:00
|
|
|
}
|
|
|
|
|
2011-07-29 00:55:49 +04:00
|
|
|
IdentifierInfo *ASTReader::getLocalIdentifier(Module &M, unsigned LocalID) {
|
|
|
|
return DecodeIdentifierInfo(getGlobalIdentifierID(M, LocalID));
|
|
|
|
}
|
|
|
|
|
|
|
|
IdentifierID ASTReader::getGlobalIdentifierID(Module &M, unsigned LocalID) {
|
2011-08-04 01:49:18 +04:00
|
|
|
if (LocalID < NUM_PREDEF_IDENT_IDS)
|
|
|
|
return LocalID;
|
|
|
|
|
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::iterator I
|
|
|
|
= M.IdentifierRemap.find(LocalID - NUM_PREDEF_IDENT_IDS);
|
|
|
|
assert(I != M.IdentifierRemap.end()
|
|
|
|
&& "Invalid index into identifier index remap");
|
|
|
|
|
|
|
|
return LocalID + I->second;
|
2011-07-29 00:55:49 +04:00
|
|
|
}
|
|
|
|
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
bool ASTReader::ReadSLocEntry(int ID) {
|
2011-04-20 04:21:03 +04:00
|
|
|
return ReadSLocEntryRecord(ID) != Success;
|
2009-04-27 10:38:32 +04:00
|
|
|
}
|
|
|
|
|
2011-07-29 01:16:51 +04:00
|
|
|
Selector ASTReader::getLocalSelector(Module &M, unsigned LocalID) {
|
|
|
|
return DecodeSelector(getGlobalSelectorID(M, LocalID));
|
|
|
|
}
|
|
|
|
|
|
|
|
Selector ASTReader::DecodeSelector(serialization::SelectorID ID) {
|
2009-04-23 14:39:46 +04:00
|
|
|
if (ID == 0)
|
|
|
|
return Selector();
|
2009-09-09 19:08:12 +04:00
|
|
|
|
2010-08-05 00:40:17 +04:00
|
|
|
if (ID > SelectorsLoaded.size()) {
|
2010-08-19 03:57:06 +04:00
|
|
|
Error("selector ID out of range in AST file");
|
2009-04-23 14:39:46 +04:00
|
|
|
return Selector();
|
|
|
|
}
|
2009-04-25 21:48:32 +04:00
|
|
|
|
2010-08-05 00:40:17 +04:00
|
|
|
if (SelectorsLoaded[ID - 1].getAsOpaquePtr() == 0) {
|
2009-04-25 21:48:32 +04:00
|
|
|
// Load this selector from the selector table.
|
2011-07-20 05:10:58 +04:00
|
|
|
GlobalSelectorMapType::iterator I = GlobalSelectorMap.find(ID);
|
|
|
|
assert(I != GlobalSelectorMap.end() && "Corrupted global selector map");
|
2011-07-29 04:56:45 +04:00
|
|
|
Module &M = *I->second;
|
|
|
|
ASTSelectorLookupTrait Trait(*this, M);
|
2011-08-04 03:28:44 +04:00
|
|
|
unsigned Idx = ID - M.BaseSelectorID - NUM_PREDEF_SELECTOR_IDS;
|
2011-07-20 05:10:58 +04:00
|
|
|
SelectorsLoaded[ID - 1] =
|
2011-07-29 04:56:45 +04:00
|
|
|
Trait.ReadKey(M.SelectorLookupTableData + M.SelectorOffsets[Idx], 0);
|
2011-07-20 05:10:58 +04:00
|
|
|
if (DeserializationListener)
|
|
|
|
DeserializationListener->SelectorRead(ID, SelectorsLoaded[ID - 1]);
|
2009-04-25 21:48:32 +04:00
|
|
|
}
|
|
|
|
|
2010-08-05 00:40:17 +04:00
|
|
|
return SelectorsLoaded[ID - 1];
|
2009-04-23 14:39:46 +04:00
|
|
|
}
|
|
|
|
|
2011-07-28 18:41:43 +04:00
|
|
|
Selector ASTReader::GetExternalSelector(serialization::SelectorID ID) {
|
2010-04-06 21:30:22 +04:00
|
|
|
return DecodeSelector(ID);
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
uint32_t ASTReader::GetNumExternalSelectors() {
|
2010-08-05 00:40:17 +04:00
|
|
|
// ID 0 (the null selector) is considered an external selector.
|
|
|
|
return getTotalNumSelectors() + 1;
|
2010-04-06 21:30:22 +04:00
|
|
|
}
|
|
|
|
|
2011-08-04 03:28:44 +04:00
|
|
|
serialization::SelectorID
|
|
|
|
ASTReader::getGlobalSelectorID(Module &M, unsigned LocalID) const {
|
|
|
|
if (LocalID < NUM_PREDEF_SELECTOR_IDS)
|
|
|
|
return LocalID;
|
|
|
|
|
|
|
|
ContinuousRangeMap<uint32_t, int, 2>::iterator I
|
|
|
|
= M.SelectorRemap.find(LocalID - NUM_PREDEF_SELECTOR_IDS);
|
|
|
|
assert(I != M.SelectorRemap.end()
|
|
|
|
&& "Invalid index into identifier index remap");
|
|
|
|
|
|
|
|
return LocalID + I->second;
|
2011-07-28 18:41:43 +04:00
|
|
|
}
|
|
|
|
|
2009-09-09 19:08:12 +04:00
|
|
|
DeclarationName
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadDeclarationName(Module &F,
|
2011-07-22 04:38:23 +04:00
|
|
|
const RecordData &Record, unsigned &Idx) {
|
2009-04-10 02:27:44 +04:00
|
|
|
DeclarationName::NameKind Kind = (DeclarationName::NameKind)Record[Idx++];
|
|
|
|
switch (Kind) {
|
|
|
|
case DeclarationName::Identifier:
|
2011-07-29 00:55:49 +04:00
|
|
|
return DeclarationName(GetIdentifierInfo(F, Record, Idx));
|
2009-04-10 02:27:44 +04:00
|
|
|
|
|
|
|
case DeclarationName::ObjCZeroArgSelector:
|
|
|
|
case DeclarationName::ObjCOneArgSelector:
|
|
|
|
case DeclarationName::ObjCMultiArgSelector:
|
2011-07-29 01:16:51 +04:00
|
|
|
return DeclarationName(ReadSelector(F, Record, Idx));
|
2009-04-10 02:27:44 +04:00
|
|
|
|
|
|
|
case DeclarationName::CXXConstructorName:
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->DeclarationNames.getCXXConstructorName(
|
2011-07-22 04:38:23 +04:00
|
|
|
Context->getCanonicalType(readType(F, Record, Idx)));
|
2009-04-10 02:27:44 +04:00
|
|
|
|
|
|
|
case DeclarationName::CXXDestructorName:
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->DeclarationNames.getCXXDestructorName(
|
2011-07-22 04:38:23 +04:00
|
|
|
Context->getCanonicalType(readType(F, Record, Idx)));
|
2009-04-10 02:27:44 +04:00
|
|
|
|
|
|
|
case DeclarationName::CXXConversionFunctionName:
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->DeclarationNames.getCXXConversionFunctionName(
|
2011-07-22 04:38:23 +04:00
|
|
|
Context->getCanonicalType(readType(F, Record, Idx)));
|
2009-04-10 02:27:44 +04:00
|
|
|
|
|
|
|
case DeclarationName::CXXOperatorName:
|
2009-04-28 01:45:14 +04:00
|
|
|
return Context->DeclarationNames.getCXXOperatorName(
|
2009-04-10 02:27:44 +04:00
|
|
|
(OverloadedOperatorKind)Record[Idx++]);
|
|
|
|
|
2009-11-29 10:34:05 +03:00
|
|
|
case DeclarationName::CXXLiteralOperatorName:
|
|
|
|
return Context->DeclarationNames.getCXXLiteralOperatorName(
|
2011-07-29 00:55:49 +04:00
|
|
|
GetIdentifierInfo(F, Record, Idx));
|
2009-11-29 10:34:05 +03:00
|
|
|
|
2009-04-10 02:27:44 +04:00
|
|
|
case DeclarationName::CXXUsingDirective:
|
|
|
|
return DeclarationName::getUsingDirectiveName();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Required to silence GCC warning
|
|
|
|
return DeclarationName();
|
|
|
|
}
|
2009-04-11 00:39:37 +04:00
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
void ASTReader::ReadDeclarationNameLoc(Module &F,
|
2010-10-15 22:21:24 +04:00
|
|
|
DeclarationNameLoc &DNLoc,
|
|
|
|
DeclarationName Name,
|
|
|
|
const RecordData &Record, unsigned &Idx) {
|
|
|
|
switch (Name.getNameKind()) {
|
|
|
|
case DeclarationName::CXXConstructorName:
|
|
|
|
case DeclarationName::CXXDestructorName:
|
|
|
|
case DeclarationName::CXXConversionFunctionName:
|
|
|
|
DNLoc.NamedType.TInfo = GetTypeSourceInfo(F, Record, Idx);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DeclarationName::CXXOperatorName:
|
|
|
|
DNLoc.CXXOperatorName.BeginOpNameLoc
|
|
|
|
= ReadSourceLocation(F, Record, Idx).getRawEncoding();
|
|
|
|
DNLoc.CXXOperatorName.EndOpNameLoc
|
|
|
|
= ReadSourceLocation(F, Record, Idx).getRawEncoding();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DeclarationName::CXXLiteralOperatorName:
|
|
|
|
DNLoc.CXXLiteralOperatorName.OpNameLoc
|
|
|
|
= ReadSourceLocation(F, Record, Idx).getRawEncoding();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DeclarationName::Identifier:
|
|
|
|
case DeclarationName::ObjCZeroArgSelector:
|
|
|
|
case DeclarationName::ObjCOneArgSelector:
|
|
|
|
case DeclarationName::ObjCMultiArgSelector:
|
|
|
|
case DeclarationName::CXXUsingDirective:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
void ASTReader::ReadDeclarationNameInfo(Module &F,
|
2010-10-15 22:21:24 +04:00
|
|
|
DeclarationNameInfo &NameInfo,
|
|
|
|
const RecordData &Record, unsigned &Idx) {
|
2011-07-22 04:38:23 +04:00
|
|
|
NameInfo.setName(ReadDeclarationName(F, Record, Idx));
|
2010-10-15 22:21:24 +04:00
|
|
|
NameInfo.setLoc(ReadSourceLocation(F, Record, Idx));
|
|
|
|
DeclarationNameLoc DNLoc;
|
|
|
|
ReadDeclarationNameLoc(F, DNLoc, NameInfo.getName(), Record, Idx);
|
|
|
|
NameInfo.setInfo(DNLoc);
|
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
void ASTReader::ReadQualifierInfo(Module &F, QualifierInfo &Info,
|
2010-10-15 22:21:24 +04:00
|
|
|
const RecordData &Record, unsigned &Idx) {
|
2011-02-25 05:25:35 +03:00
|
|
|
Info.QualifierLoc = ReadNestedNameSpecifierLoc(F, Record, Idx);
|
2010-10-15 22:21:24 +04:00
|
|
|
unsigned NumTPLists = Record[Idx++];
|
|
|
|
Info.NumTemplParamLists = NumTPLists;
|
|
|
|
if (NumTPLists) {
|
|
|
|
Info.TemplParamLists = new (*Context) TemplateParameterList*[NumTPLists];
|
|
|
|
for (unsigned i=0; i != NumTPLists; ++i)
|
|
|
|
Info.TemplParamLists[i] = ReadTemplateParameterList(F, Record, Idx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-19 23:29:09 +04:00
|
|
|
TemplateName
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadTemplateName(Module &F, const RecordData &Record,
|
2011-01-15 09:45:20 +03:00
|
|
|
unsigned &Idx) {
|
2010-10-21 07:16:25 +04:00
|
|
|
TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++];
|
2010-06-19 23:29:09 +04:00
|
|
|
switch (Kind) {
|
|
|
|
case TemplateName::Template:
|
2011-07-22 02:35:25 +04:00
|
|
|
return TemplateName(ReadDeclAs<TemplateDecl>(F, Record, Idx));
|
2010-06-19 23:29:09 +04:00
|
|
|
|
|
|
|
case TemplateName::OverloadedTemplate: {
|
|
|
|
unsigned size = Record[Idx++];
|
|
|
|
UnresolvedSet<8> Decls;
|
|
|
|
while (size--)
|
2011-07-22 02:35:25 +04:00
|
|
|
Decls.addDecl(ReadDeclAs<NamedDecl>(F, Record, Idx));
|
2010-06-19 23:29:09 +04:00
|
|
|
|
|
|
|
return Context->getOverloadedTemplateName(Decls.begin(), Decls.end());
|
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-06-19 23:29:09 +04:00
|
|
|
case TemplateName::QualifiedTemplate: {
|
2011-07-22 02:35:25 +04:00
|
|
|
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(F, Record, Idx);
|
2010-06-19 23:29:09 +04:00
|
|
|
bool hasTemplKeyword = Record[Idx++];
|
2011-07-22 02:35:25 +04:00
|
|
|
TemplateDecl *Template = ReadDeclAs<TemplateDecl>(F, Record, Idx);
|
2010-06-19 23:29:09 +04:00
|
|
|
return Context->getQualifiedTemplateName(NNS, hasTemplKeyword, Template);
|
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-06-19 23:29:09 +04:00
|
|
|
case TemplateName::DependentTemplate: {
|
2011-07-22 02:35:25 +04:00
|
|
|
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(F, Record, Idx);
|
2010-06-19 23:29:09 +04:00
|
|
|
if (Record[Idx++]) // isIdentifier
|
|
|
|
return Context->getDependentTemplateName(NNS,
|
2011-07-29 00:55:49 +04:00
|
|
|
GetIdentifierInfo(F, Record,
|
|
|
|
Idx));
|
2010-06-19 23:29:09 +04:00
|
|
|
return Context->getDependentTemplateName(NNS,
|
2010-06-28 13:31:42 +04:00
|
|
|
(OverloadedOperatorKind)Record[Idx++]);
|
2010-06-19 23:29:09 +04:00
|
|
|
}
|
2011-06-30 12:33:18 +04:00
|
|
|
|
|
|
|
case TemplateName::SubstTemplateTemplateParm: {
|
|
|
|
TemplateTemplateParmDecl *param
|
2011-07-22 02:35:25 +04:00
|
|
|
= ReadDeclAs<TemplateTemplateParmDecl>(F, Record, Idx);
|
2011-06-30 12:33:18 +04:00
|
|
|
if (!param) return TemplateName();
|
|
|
|
TemplateName replacement = ReadTemplateName(F, Record, Idx);
|
|
|
|
return Context->getSubstTemplateTemplateParm(param, replacement);
|
|
|
|
}
|
2011-01-15 09:45:20 +03:00
|
|
|
|
|
|
|
case TemplateName::SubstTemplateTemplateParmPack: {
|
|
|
|
TemplateTemplateParmDecl *Param
|
2011-07-22 02:35:25 +04:00
|
|
|
= ReadDeclAs<TemplateTemplateParmDecl>(F, Record, Idx);
|
2011-01-15 09:45:20 +03:00
|
|
|
if (!Param)
|
|
|
|
return TemplateName();
|
|
|
|
|
|
|
|
TemplateArgument ArgPack = ReadTemplateArgument(F, Record, Idx);
|
|
|
|
if (ArgPack.getKind() != TemplateArgument::Pack)
|
|
|
|
return TemplateName();
|
|
|
|
|
|
|
|
return Context->getSubstTemplateTemplateParmPack(Param, ArgPack);
|
|
|
|
}
|
2010-06-19 23:29:09 +04:00
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-06-19 23:29:09 +04:00
|
|
|
assert(0 && "Unhandled template name kind!");
|
|
|
|
return TemplateName();
|
|
|
|
}
|
|
|
|
|
|
|
|
TemplateArgument
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadTemplateArgument(Module &F,
|
2010-07-23 02:43:28 +04:00
|
|
|
const RecordData &Record, unsigned &Idx) {
|
2011-01-05 21:58:31 +03:00
|
|
|
TemplateArgument::ArgKind Kind = (TemplateArgument::ArgKind)Record[Idx++];
|
|
|
|
switch (Kind) {
|
2010-06-19 23:29:09 +04:00
|
|
|
case TemplateArgument::Null:
|
|
|
|
return TemplateArgument();
|
|
|
|
case TemplateArgument::Type:
|
2011-07-22 04:38:23 +04:00
|
|
|
return TemplateArgument(readType(F, Record, Idx));
|
2010-06-19 23:29:09 +04:00
|
|
|
case TemplateArgument::Declaration:
|
2011-07-22 02:35:25 +04:00
|
|
|
return TemplateArgument(ReadDecl(F, Record, Idx));
|
2010-06-28 13:31:34 +04:00
|
|
|
case TemplateArgument::Integral: {
|
|
|
|
llvm::APSInt Value = ReadAPSInt(Record, Idx);
|
2011-07-22 04:38:23 +04:00
|
|
|
QualType T = readType(F, Record, Idx);
|
2010-06-28 13:31:34 +04:00
|
|
|
return TemplateArgument(Value, T);
|
|
|
|
}
|
2011-01-05 21:58:31 +03:00
|
|
|
case TemplateArgument::Template:
|
2011-01-15 09:45:20 +03:00
|
|
|
return TemplateArgument(ReadTemplateName(F, Record, Idx));
|
2011-01-05 21:58:31 +03:00
|
|
|
case TemplateArgument::TemplateExpansion: {
|
2011-01-15 09:45:20 +03:00
|
|
|
TemplateName Name = ReadTemplateName(F, Record, Idx);
|
2011-01-15 02:41:42 +03:00
|
|
|
llvm::Optional<unsigned> NumTemplateExpansions;
|
|
|
|
if (unsigned NumExpansions = Record[Idx++])
|
|
|
|
NumTemplateExpansions = NumExpansions - 1;
|
|
|
|
return TemplateArgument(Name, NumTemplateExpansions);
|
2011-01-05 20:40:24 +03:00
|
|
|
}
|
2010-06-19 23:29:09 +04:00
|
|
|
case TemplateArgument::Expression:
|
2010-10-05 19:59:54 +04:00
|
|
|
return TemplateArgument(ReadExpr(F));
|
2010-06-19 23:29:09 +04:00
|
|
|
case TemplateArgument::Pack: {
|
|
|
|
unsigned NumArgs = Record[Idx++];
|
2010-11-08 02:05:16 +03:00
|
|
|
TemplateArgument *Args = new (*Context) TemplateArgument[NumArgs];
|
|
|
|
for (unsigned I = 0; I != NumArgs; ++I)
|
|
|
|
Args[I] = ReadTemplateArgument(F, Record, Idx);
|
|
|
|
return TemplateArgument(Args, NumArgs);
|
2010-06-19 23:29:09 +04:00
|
|
|
}
|
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2010-06-19 23:29:09 +04:00
|
|
|
assert(0 && "Unhandled template argument kind!");
|
|
|
|
return TemplateArgument();
|
|
|
|
}
|
|
|
|
|
2010-06-23 17:48:30 +04:00
|
|
|
TemplateParameterList *
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadTemplateParameterList(Module &F,
|
2010-10-05 19:59:54 +04:00
|
|
|
const RecordData &Record, unsigned &Idx) {
|
|
|
|
SourceLocation TemplateLoc = ReadSourceLocation(F, Record, Idx);
|
|
|
|
SourceLocation LAngleLoc = ReadSourceLocation(F, Record, Idx);
|
|
|
|
SourceLocation RAngleLoc = ReadSourceLocation(F, Record, Idx);
|
2010-06-23 17:48:30 +04:00
|
|
|
|
|
|
|
unsigned NumParams = Record[Idx++];
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<NamedDecl *, 16> Params;
|
2010-06-23 17:48:30 +04:00
|
|
|
Params.reserve(NumParams);
|
|
|
|
while (NumParams--)
|
2011-07-22 02:35:25 +04:00
|
|
|
Params.push_back(ReadDeclAs<NamedDecl>(F, Record, Idx));
|
2010-10-21 07:16:25 +04:00
|
|
|
|
|
|
|
TemplateParameterList* TemplateParams =
|
2010-06-23 17:48:30 +04:00
|
|
|
TemplateParameterList::Create(*Context, TemplateLoc, LAngleLoc,
|
|
|
|
Params.data(), Params.size(), RAngleLoc);
|
|
|
|
return TemplateParams;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-08-19 03:56:43 +04:00
|
|
|
ASTReader::
|
2011-07-23 14:55:15 +04:00
|
|
|
ReadTemplateArgumentList(SmallVector<TemplateArgument, 8> &TemplArgs,
|
2011-07-22 20:00:58 +04:00
|
|
|
Module &F, const RecordData &Record,
|
2010-10-05 19:59:54 +04:00
|
|
|
unsigned &Idx) {
|
2010-06-23 17:48:30 +04:00
|
|
|
unsigned NumTemplateArgs = Record[Idx++];
|
|
|
|
TemplArgs.reserve(NumTemplateArgs);
|
|
|
|
while (NumTemplateArgs--)
|
2010-10-05 19:59:54 +04:00
|
|
|
TemplArgs.push_back(ReadTemplateArgument(F, Record, Idx));
|
2010-06-23 17:48:30 +04:00
|
|
|
}
|
|
|
|
|
2010-07-02 15:55:32 +04:00
|
|
|
/// \brief Read a UnresolvedSet structure.
|
2011-07-22 20:00:58 +04:00
|
|
|
void ASTReader::ReadUnresolvedSet(Module &F, UnresolvedSetImpl &Set,
|
2010-07-02 15:55:32 +04:00
|
|
|
const RecordData &Record, unsigned &Idx) {
|
|
|
|
unsigned NumDecls = Record[Idx++];
|
|
|
|
while (NumDecls--) {
|
2011-07-22 02:35:25 +04:00
|
|
|
NamedDecl *D = ReadDeclAs<NamedDecl>(F, Record, Idx);
|
2010-07-02 15:55:32 +04:00
|
|
|
AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
|
|
|
|
Set.addDecl(D, AS);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-03 03:30:27 +04:00
|
|
|
CXXBaseSpecifier
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadCXXBaseSpecifier(Module &F,
|
2010-07-26 20:56:01 +04:00
|
|
|
const RecordData &Record, unsigned &Idx) {
|
2010-07-03 03:30:27 +04:00
|
|
|
bool isVirtual = static_cast<bool>(Record[Idx++]);
|
|
|
|
bool isBaseOfClass = static_cast<bool>(Record[Idx++]);
|
|
|
|
AccessSpecifier AS = static_cast<AccessSpecifier>(Record[Idx++]);
|
2011-02-05 22:23:19 +03:00
|
|
|
bool inheritConstructors = static_cast<bool>(Record[Idx++]);
|
2010-10-05 19:59:54 +04:00
|
|
|
TypeSourceInfo *TInfo = GetTypeSourceInfo(F, Record, Idx);
|
|
|
|
SourceRange Range = ReadSourceRange(F, Record, Idx);
|
2011-01-04 01:36:02 +03:00
|
|
|
SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Idx);
|
2011-02-05 22:23:19 +03:00
|
|
|
CXXBaseSpecifier Result(Range, isVirtual, isBaseOfClass, AS, TInfo,
|
2011-01-04 01:36:02 +03:00
|
|
|
EllipsisLoc);
|
2011-02-05 22:23:19 +03:00
|
|
|
Result.setInheritConstructors(inheritConstructors);
|
|
|
|
return Result;
|
2010-07-03 03:30:27 +04:00
|
|
|
}
|
|
|
|
|
2011-01-08 23:30:50 +03:00
|
|
|
std::pair<CXXCtorInitializer **, unsigned>
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadCXXCtorInitializers(Module &F, const RecordData &Record,
|
2011-01-08 23:30:50 +03:00
|
|
|
unsigned &Idx) {
|
|
|
|
CXXCtorInitializer **CtorInitializers = 0;
|
2010-08-09 14:54:12 +04:00
|
|
|
unsigned NumInitializers = Record[Idx++];
|
|
|
|
if (NumInitializers) {
|
|
|
|
ASTContext &C = *getContext();
|
|
|
|
|
2011-01-08 23:30:50 +03:00
|
|
|
CtorInitializers
|
|
|
|
= new (C) CXXCtorInitializer*[NumInitializers];
|
2010-08-09 14:54:12 +04:00
|
|
|
for (unsigned i=0; i != NumInitializers; ++i) {
|
|
|
|
TypeSourceInfo *BaseClassInfo = 0;
|
|
|
|
bool IsBaseVirtual = false;
|
|
|
|
FieldDecl *Member = 0;
|
2010-12-04 12:14:42 +03:00
|
|
|
IndirectFieldDecl *IndirectMember = 0;
|
2011-05-04 05:19:08 +04:00
|
|
|
CXXConstructorDecl *Target = 0;
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2011-05-04 05:19:08 +04:00
|
|
|
CtorInitializerType Type = (CtorInitializerType)Record[Idx++];
|
|
|
|
switch (Type) {
|
|
|
|
case CTOR_INITIALIZER_BASE:
|
2010-10-05 19:59:54 +04:00
|
|
|
BaseClassInfo = GetTypeSourceInfo(F, Record, Idx);
|
2010-08-09 14:54:12 +04:00
|
|
|
IsBaseVirtual = Record[Idx++];
|
2011-05-04 05:19:08 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CTOR_INITIALIZER_DELEGATING:
|
2011-07-22 02:35:25 +04:00
|
|
|
Target = ReadDeclAs<CXXConstructorDecl>(F, Record, Idx);
|
2011-05-04 05:19:08 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CTOR_INITIALIZER_MEMBER:
|
2011-07-22 02:35:25 +04:00
|
|
|
Member = ReadDeclAs<FieldDecl>(F, Record, Idx);
|
2011-05-04 05:19:08 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CTOR_INITIALIZER_INDIRECT_MEMBER:
|
2011-07-22 02:35:25 +04:00
|
|
|
IndirectMember = ReadDeclAs<IndirectFieldDecl>(F, Record, Idx);
|
2011-05-04 05:19:08 +04:00
|
|
|
break;
|
2010-08-09 14:54:12 +04:00
|
|
|
}
|
2011-05-04 05:19:08 +04:00
|
|
|
|
2011-01-04 03:32:56 +03:00
|
|
|
SourceLocation MemberOrEllipsisLoc = ReadSourceLocation(F, Record, Idx);
|
2010-10-05 19:59:54 +04:00
|
|
|
Expr *Init = ReadExpr(F);
|
|
|
|
SourceLocation LParenLoc = ReadSourceLocation(F, Record, Idx);
|
|
|
|
SourceLocation RParenLoc = ReadSourceLocation(F, Record, Idx);
|
2010-08-09 14:54:12 +04:00
|
|
|
bool IsWritten = Record[Idx++];
|
|
|
|
unsigned SourceOrderOrNumArrayIndices;
|
2011-07-23 14:55:15 +04:00
|
|
|
SmallVector<VarDecl *, 8> Indices;
|
2010-08-09 14:54:12 +04:00
|
|
|
if (IsWritten) {
|
|
|
|
SourceOrderOrNumArrayIndices = Record[Idx++];
|
|
|
|
} else {
|
|
|
|
SourceOrderOrNumArrayIndices = Record[Idx++];
|
|
|
|
Indices.reserve(SourceOrderOrNumArrayIndices);
|
|
|
|
for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i)
|
2011-07-22 02:35:25 +04:00
|
|
|
Indices.push_back(ReadDeclAs<VarDecl>(F, Record, Idx));
|
2010-08-09 14:54:12 +04:00
|
|
|
}
|
2010-10-21 07:16:25 +04:00
|
|
|
|
2011-01-08 23:30:50 +03:00
|
|
|
CXXCtorInitializer *BOMInit;
|
2011-05-04 05:19:08 +04:00
|
|
|
if (Type == CTOR_INITIALIZER_BASE) {
|
2011-01-08 23:30:50 +03:00
|
|
|
BOMInit = new (C) CXXCtorInitializer(C, BaseClassInfo, IsBaseVirtual,
|
|
|
|
LParenLoc, Init, RParenLoc,
|
|
|
|
MemberOrEllipsisLoc);
|
2011-05-04 05:19:08 +04:00
|
|
|
} else if (Type == CTOR_INITIALIZER_DELEGATING) {
|
|
|
|
BOMInit = new (C) CXXCtorInitializer(C, MemberOrEllipsisLoc, LParenLoc,
|
|
|
|
Target, Init, RParenLoc);
|
2010-08-09 14:54:12 +04:00
|
|
|
} else if (IsWritten) {
|
2010-12-04 12:14:42 +03:00
|
|
|
if (Member)
|
2011-01-08 23:30:50 +03:00
|
|
|
BOMInit = new (C) CXXCtorInitializer(C, Member, MemberOrEllipsisLoc,
|
|
|
|
LParenLoc, Init, RParenLoc);
|
2010-12-04 12:14:42 +03:00
|
|
|
else
|
2011-01-08 23:30:50 +03:00
|
|
|
BOMInit = new (C) CXXCtorInitializer(C, IndirectMember,
|
|
|
|
MemberOrEllipsisLoc, LParenLoc,
|
|
|
|
Init, RParenLoc);
|
2010-08-09 14:54:12 +04:00
|
|
|
} else {
|
2011-01-08 23:30:50 +03:00
|
|
|
BOMInit = CXXCtorInitializer::Create(C, Member, MemberOrEllipsisLoc,
|
|
|
|
LParenLoc, Init, RParenLoc,
|
|
|
|
Indices.data(), Indices.size());
|
2010-08-09 14:54:12 +04:00
|
|
|
}
|
|
|
|
|
2010-09-06 23:04:27 +04:00
|
|
|
if (IsWritten)
|
|
|
|
BOMInit->setSourceOrder(SourceOrderOrNumArrayIndices);
|
2011-01-08 23:30:50 +03:00
|
|
|
CtorInitializers[i] = BOMInit;
|
2010-08-09 14:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-08 23:30:50 +03:00
|
|
|
return std::make_pair(CtorInitializers, NumInitializers);
|
2010-08-09 14:54:12 +04:00
|
|
|
}
|
|
|
|
|
2010-05-08 01:43:38 +04:00
|
|
|
NestedNameSpecifier *
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadNestedNameSpecifier(Module &F,
|
2011-07-22 02:35:25 +04:00
|
|
|
const RecordData &Record, unsigned &Idx) {
|
2010-05-08 01:43:38 +04:00
|
|
|
unsigned N = Record[Idx++];
|
|
|
|
NestedNameSpecifier *NNS = 0, *Prev = 0;
|
|
|
|
for (unsigned I = 0; I != N; ++I) {
|
|
|
|
NestedNameSpecifier::SpecifierKind Kind
|
|
|
|
= (NestedNameSpecifier::SpecifierKind)Record[Idx++];
|
|
|
|
switch (Kind) {
|
|
|
|
case NestedNameSpecifier::Identifier: {
|
2011-07-29 00:55:49 +04:00
|
|
|
IdentifierInfo *II = GetIdentifierInfo(F, Record, Idx);
|
2010-05-08 01:43:38 +04:00
|
|
|
NNS = NestedNameSpecifier::Create(*Context, Prev, II);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NestedNameSpecifier::Namespace: {
|
2011-07-22 02:35:25 +04:00
|
|
|
NamespaceDecl *NS = ReadDeclAs<NamespaceDecl>(F, Record, Idx);
|
2010-05-08 01:43:38 +04:00
|
|
|
NNS = NestedNameSpecifier::Create(*Context, Prev, NS);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-02-24 05:36:08 +03:00
|
|
|
case NestedNameSpecifier::NamespaceAlias: {
|
2011-07-22 02:35:25 +04:00
|
|
|
NamespaceAliasDecl *Alias =ReadDeclAs<NamespaceAliasDecl>(F, Record, Idx);
|
2011-02-24 05:36:08 +03:00
|
|
|
NNS = NestedNameSpecifier::Create(*Context, Prev, Alias);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-05-08 01:43:38 +04:00
|
|
|
case NestedNameSpecifier::TypeSpec:
|
|
|
|
case NestedNameSpecifier::TypeSpecWithTemplate: {
|
2011-07-22 04:38:23 +04:00
|
|
|
const Type *T = readType(F, Record, Idx).getTypePtrOrNull();
|
2010-12-10 20:03:06 +03:00
|
|
|
if (!T)
|
|
|
|
return 0;
|
|
|
|
|
2010-05-08 01:43:38 +04:00
|
|
|
bool Template = Record[Idx++];
|
|
|
|
NNS = NestedNameSpecifier::Create(*Context, Prev, Template, T);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NestedNameSpecifier::Global: {
|
|
|
|
NNS = NestedNameSpecifier::GlobalSpecifier(*Context);
|
|
|
|
// No associated value, and there can't be a prefix.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-07-07 19:46:30 +04:00
|
|
|
Prev = NNS;
|
2010-05-08 01:43:38 +04:00
|
|
|
}
|
|
|
|
return NNS;
|
|
|
|
}
|
|
|
|
|
2011-02-25 03:36:19 +03:00
|
|
|
NestedNameSpecifierLoc
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadNestedNameSpecifierLoc(Module &F, const RecordData &Record,
|
2011-02-25 03:36:19 +03:00
|
|
|
unsigned &Idx) {
|
|
|
|
unsigned N = Record[Idx++];
|
2011-03-01 02:58:31 +03:00
|
|
|
NestedNameSpecifierLocBuilder Builder;
|
2011-02-25 03:36:19 +03:00
|
|
|
for (unsigned I = 0; I != N; ++I) {
|
|
|
|
NestedNameSpecifier::SpecifierKind Kind
|
|
|
|
= (NestedNameSpecifier::SpecifierKind)Record[Idx++];
|
|
|
|
switch (Kind) {
|
|
|
|
case NestedNameSpecifier::Identifier: {
|
2011-07-29 00:55:49 +04:00
|
|
|
IdentifierInfo *II = GetIdentifierInfo(F, Record, Idx);
|
2011-02-25 03:36:19 +03:00
|
|
|
SourceRange Range = ReadSourceRange(F, Record, Idx);
|
2011-03-01 02:58:31 +03:00
|
|
|
Builder.Extend(*Context, II, Range.getBegin(), Range.getEnd());
|
2011-02-25 03:36:19 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NestedNameSpecifier::Namespace: {
|
2011-07-22 02:35:25 +04:00
|
|
|
NamespaceDecl *NS = ReadDeclAs<NamespaceDecl>(F, Record, Idx);
|
2011-02-25 03:36:19 +03:00
|
|
|
SourceRange Range = ReadSourceRange(F, Record, Idx);
|
2011-03-01 02:58:31 +03:00
|
|
|
Builder.Extend(*Context, NS, Range.getBegin(), Range.getEnd());
|
2011-02-25 03:36:19 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NestedNameSpecifier::NamespaceAlias: {
|
2011-07-22 02:35:25 +04:00
|
|
|
NamespaceAliasDecl *Alias =ReadDeclAs<NamespaceAliasDecl>(F, Record, Idx);
|
2011-02-25 03:36:19 +03:00
|
|
|
SourceRange Range = ReadSourceRange(F, Record, Idx);
|
2011-03-01 02:58:31 +03:00
|
|
|
Builder.Extend(*Context, Alias, Range.getBegin(), Range.getEnd());
|
2011-02-25 03:36:19 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NestedNameSpecifier::TypeSpec:
|
|
|
|
case NestedNameSpecifier::TypeSpecWithTemplate: {
|
|
|
|
bool Template = Record[Idx++];
|
|
|
|
TypeSourceInfo *T = GetTypeSourceInfo(F, Record, Idx);
|
|
|
|
if (!T)
|
|
|
|
return NestedNameSpecifierLoc();
|
|
|
|
SourceLocation ColonColonLoc = ReadSourceLocation(F, Record, Idx);
|
2011-03-01 02:58:31 +03:00
|
|
|
|
|
|
|
// FIXME: 'template' keyword location not saved anywhere, so we fake it.
|
|
|
|
Builder.Extend(*Context,
|
|
|
|
Template? T->getTypeLoc().getBeginLoc() : SourceLocation(),
|
|
|
|
T->getTypeLoc(), ColonColonLoc);
|
2011-02-25 03:36:19 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NestedNameSpecifier::Global: {
|
|
|
|
SourceLocation ColonColonLoc = ReadSourceLocation(F, Record, Idx);
|
2011-03-01 02:58:31 +03:00
|
|
|
Builder.MakeGlobal(*Context, ColonColonLoc);
|
2011-02-25 03:36:19 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-01 02:58:31 +03:00
|
|
|
return Builder.getWithLocInContext(*Context);
|
2011-02-25 03:36:19 +03:00
|
|
|
}
|
|
|
|
|
2010-05-08 01:43:38 +04:00
|
|
|
SourceRange
|
2011-07-22 20:00:58 +04:00
|
|
|
ASTReader::ReadSourceRange(Module &F, const RecordData &Record,
|
2010-10-05 19:59:54 +04:00
|
|
|
unsigned &Idx) {
|
|
|
|
SourceLocation beg = ReadSourceLocation(F, Record, Idx);
|
|
|
|
SourceLocation end = ReadSourceLocation(F, Record, Idx);
|
2010-06-02 19:47:10 +04:00
|
|
|
return SourceRange(beg, end);
|
2010-05-08 01:43:38 +04:00
|
|
|
}
|
|
|
|
|
2009-04-13 22:14:40 +04:00
|
|
|
/// \brief Read an integral value
|
2010-08-19 03:56:43 +04:00
|
|
|
llvm::APInt ASTReader::ReadAPInt(const RecordData &Record, unsigned &Idx) {
|
2009-04-13 22:14:40 +04:00
|
|
|
unsigned BitWidth = Record[Idx++];
|
|
|
|
unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
|
|
|
|
llvm::APInt Result(BitWidth, NumWords, &Record[Idx]);
|
|
|
|
Idx += NumWords;
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Read a signed integral value
|
2010-08-19 03:56:43 +04:00
|
|
|
llvm::APSInt ASTReader::ReadAPSInt(const RecordData &Record, unsigned &Idx) {
|
2009-04-13 22:14:40 +04:00
|
|
|
bool isUnsigned = Record[Idx++];
|
|
|
|
return llvm::APSInt(ReadAPInt(Record, Idx), isUnsigned);
|
|
|
|
}
|
|
|
|
|
2009-04-15 01:55:33 +04:00
|
|
|
/// \brief Read a floating-point value
|
2010-08-19 03:56:43 +04:00
|
|
|
llvm::APFloat ASTReader::ReadAPFloat(const RecordData &Record, unsigned &Idx) {
|
2009-04-15 01:55:33 +04:00
|
|
|
return llvm::APFloat(ReadAPInt(Record, Idx));
|
|
|
|
}
|
|
|
|
|
2009-04-16 01:30:51 +04:00
|
|
|
// \brief Read a string
|
2010-08-19 03:56:43 +04:00
|
|
|
std::string ASTReader::ReadString(const RecordData &Record, unsigned &Idx) {
|
2009-04-16 01:30:51 +04:00
|
|
|
unsigned Len = Record[Idx++];
|
2009-05-21 13:52:38 +04:00
|
|
|
std::string Result(Record.data() + Idx, Record.data() + Idx + Len);
|
2009-04-16 01:30:51 +04:00
|
|
|
Idx += Len;
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
Implement a new 'availability' attribute, that allows one to specify
which versions of an OS provide a certain facility. For example,
void foo()
__attribute__((availability(macosx,introduced=10.2,deprecated=10.4,obsoleted=10.6)));
says that the function "foo" was introduced in 10.2, deprecated in
10.4, and completely obsoleted in 10.6. This attribute ties in with
the deployment targets (e.g., -mmacosx-version-min=10.1 specifies that
we want to deploy back to Mac OS X 10.1). There are several concrete
behaviors that this attribute enables, as illustrated with the
function foo() above:
- If we choose a deployment target >= Mac OS X 10.4, uses of "foo"
will result in a deprecation warning, as if we had placed
attribute((deprecated)) on it (but with a better diagnostic)
- If we choose a deployment target >= Mac OS X 10.6, uses of "foo"
will result in an "unavailable" warning (in C)/error (in C++), as
if we had placed attribute((unavailable)) on it
- If we choose a deployment target prior to 10.2, foo() is
weak-imported (if it is a kind of entity that can be weak
imported), as if we had placed the weak_import attribute on it.
Naturally, there can be multiple availability attributes on a
declaration, for different platforms; only the current platform
matters when checking availability attributes.
The only platforms this attribute currently works for are "ios" and
"macosx", since we already have -mxxxx-version-min flags for them and we
have experience there with macro tricks translating down to the
deprecated/unavailable/weak_import attributes. The end goal is to open
this up to other platforms, and even extension to other "platforms"
that are really libraries (say, through a #pragma clang
define_system), but that hasn't yet been designed and we may want to
shake out more issues with this narrower problem first.
Addresses <rdar://problem/6690412>.
As a drive-by bug-fix, if an entity is both deprecated and
unavailable, we only emit the "unavailable" diagnostic.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@128127 91177308-0d34-0410-b5e6-96231b3b80d8
2011-03-23 03:50:03 +03:00
|
|
|
VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
|
|
|
|
unsigned &Idx) {
|
|
|
|
unsigned Major = Record[Idx++];
|
|
|
|
unsigned Minor = Record[Idx++];
|
|
|
|
unsigned Subminor = Record[Idx++];
|
|
|
|
if (Minor == 0)
|
|
|
|
return VersionTuple(Major);
|
|
|
|
if (Subminor == 0)
|
|
|
|
return VersionTuple(Major, Minor - 1);
|
|
|
|
return VersionTuple(Major, Minor - 1, Subminor - 1);
|
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
CXXTemporary *ASTReader::ReadCXXTemporary(Module &F,
|
2011-07-22 02:35:25 +04:00
|
|
|
const RecordData &Record,
|
2010-05-10 04:25:06 +04:00
|
|
|
unsigned &Idx) {
|
2011-07-22 02:35:25 +04:00
|
|
|
CXXDestructorDecl *Decl = ReadDeclAs<CXXDestructorDecl>(F, Record, Idx);
|
2010-05-10 04:25:06 +04:00
|
|
|
return CXXTemporary::Create(*Context, Decl);
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
DiagnosticBuilder ASTReader::Diag(unsigned DiagID) {
|
2009-04-11 03:10:45 +04:00
|
|
|
return Diag(SourceLocation(), DiagID);
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
DiagnosticBuilder ASTReader::Diag(SourceLocation Loc, unsigned DiagID) {
|
2010-11-18 23:06:41 +03:00
|
|
|
return Diags.Report(Loc, DiagID);
|
2009-04-11 00:39:37 +04:00
|
|
|
}
|
2009-04-17 04:04:06 +04:00
|
|
|
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
/// \brief Retrieve the identifier table associated with the
|
|
|
|
/// preprocessor.
|
2010-08-19 03:56:43 +04:00
|
|
|
IdentifierTable &ASTReader::getIdentifierTable() {
|
2009-06-19 04:03:23 +04:00
|
|
|
assert(PP && "Forgot to set Preprocessor ?");
|
|
|
|
return PP->getIdentifierTable();
|
Lazy deserialization of the declaration chains associated with
identifiers from a precompiled header.
This patch changes the primary name lookup method for entities within
a precompiled header. Previously, we would load all of the names of
declarations at translation unit scope into a large DenseMap (inside
the TranslationUnitDecl's DeclContext), and then perform a special
"last resort" lookup into this DeclContext when we knew there was a
PCH file (see Sema::LookupName). Now, when we see an identifier named
for the first time, we load all of the declarations with that name
that are visible from the translation unit into the IdentifierInfo's
chain of declarations. Thus, the explicit "look into the translation
unit's DeclContext" code is gone, and Sema effectively uses the same
IdentifierInfo-based name lookup mechanism whether we are using a PCH
file or not.
This approach should help PCH scale with the size of the input program
rather than the size of the PCH file. The "Hello, World!" application
with Carbon.h as a PCH file now loads 20% of the identifiers in the
PCH file rather than 85% of the identifiers.
90% of the 20% of identifiers loaded are actually loaded when we
deserialize the preprocessor state. The next step is to make the
preprocessor load macros lazily, which should drastically reduce the
number of types, declarations, and identifiers loaded for "Hello,
World".
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@69737 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-22 02:25:48 +04:00
|
|
|
}
|
|
|
|
|
2009-04-17 04:04:06 +04:00
|
|
|
/// \brief Record that the given ID maps to the given switch-case
|
|
|
|
/// statement.
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::RecordSwitchCaseID(SwitchCase *SC, unsigned ID) {
|
2009-04-17 04:04:06 +04:00
|
|
|
assert(SwitchCaseStmts[ID] == 0 && "Already have a SwitchCase with this ID");
|
|
|
|
SwitchCaseStmts[ID] = SC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Retrieve the switch-case statement with the given ID.
|
2010-08-19 03:56:43 +04:00
|
|
|
SwitchCase *ASTReader::getSwitchCaseWithID(unsigned ID) {
|
2009-04-17 04:04:06 +04:00
|
|
|
assert(SwitchCaseStmts[ID] != 0 && "No SwitchCase with this ID");
|
|
|
|
return SwitchCaseStmts[ID];
|
|
|
|
}
|
2009-04-17 22:18:49 +04:00
|
|
|
|
2010-10-28 13:29:32 +04:00
|
|
|
void ASTReader::ClearSwitchCaseIDs() {
|
|
|
|
SwitchCaseStmts.clear();
|
|
|
|
}
|
|
|
|
|
2010-08-19 03:56:43 +04:00
|
|
|
void ASTReader::FinishedDeserializing() {
|
2010-07-30 14:03:16 +04:00
|
|
|
assert(NumCurrentElementsDeserializing &&
|
|
|
|
"FinishedDeserializing not paired with StartedDeserializing");
|
|
|
|
if (NumCurrentElementsDeserializing == 1) {
|
2009-07-06 22:54:52 +04:00
|
|
|
// If any identifiers with corresponding top-level declarations have
|
|
|
|
// been loaded, load those declarations now.
|
2010-07-30 14:03:16 +04:00
|
|
|
while (!PendingIdentifierInfos.empty()) {
|
|
|
|
SetGloballyVisibleDecls(PendingIdentifierInfos.front().II,
|
|
|
|
PendingIdentifierInfos.front().DeclIDs, true);
|
|
|
|
PendingIdentifierInfos.pop_front();
|
2009-07-06 22:54:52 +04:00
|
|
|
}
|
2010-07-07 19:46:26 +04:00
|
|
|
|
2011-02-12 10:50:47 +03:00
|
|
|
// Ready to load previous declarations of Decls that were delayed.
|
|
|
|
while (!PendingPreviousDecls.empty()) {
|
|
|
|
loadAndAttachPreviousDecl(PendingPreviousDecls.front().first,
|
|
|
|
PendingPreviousDecls.front().second);
|
|
|
|
PendingPreviousDecls.pop_front();
|
|
|
|
}
|
|
|
|
|
2010-07-07 19:46:26 +04:00
|
|
|
// We are not in recursive loading, so it's safe to pass the "interesting"
|
|
|
|
// decls to the consumer.
|
2010-07-30 14:03:16 +04:00
|
|
|
if (Consumer)
|
|
|
|
PassInterestingDeclsToConsumer();
|
2010-10-24 21:26:31 +04:00
|
|
|
|
|
|
|
assert(PendingForwardRefs.size() == 0 &&
|
|
|
|
"Some forward refs did not get linked to the definition!");
|
2009-07-06 22:54:52 +04:00
|
|
|
}
|
2010-07-30 14:03:16 +04:00
|
|
|
--NumCurrentElementsDeserializing;
|
2009-07-06 22:54:52 +04:00
|
|
|
}
|
2010-08-19 04:28:17 +04:00
|
|
|
|
2010-08-24 04:50:04 +04:00
|
|
|
ASTReader::ASTReader(Preprocessor &PP, ASTContext *Context,
|
2011-07-22 20:35:34 +04:00
|
|
|
StringRef isysroot, bool DisableValidation,
|
2011-02-05 22:42:43 +03:00
|
|
|
bool DisableStatCache)
|
2010-08-24 04:50:04 +04:00
|
|
|
: Listener(new PCHValidator(PP, *this)), DeserializationListener(0),
|
|
|
|
SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
|
|
|
|
Diags(PP.getDiagnostics()), SemaObj(0), PP(&PP), Context(Context),
|
2011-07-28 21:20:23 +04:00
|
|
|
Consumer(0), ModuleMgr(FileMgr.getFileSystemOptions()),
|
|
|
|
RelocatablePCH(false), isysroot(isysroot),
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
DisableValidation(DisableValidation),
|
2011-02-05 22:42:43 +03:00
|
|
|
DisableStatCache(DisableStatCache), NumStatHits(0), NumStatMisses(0),
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
NumSLocEntriesRead(0), TotalNumSLocEntries(0),
|
2011-02-05 22:42:43 +03:00
|
|
|
NumStatementsRead(0), TotalNumStatements(0), NumMacrosRead(0),
|
|
|
|
TotalNumMacros(0), NumSelectorsRead(0), NumMethodPoolEntriesRead(0),
|
|
|
|
NumMethodPoolMisses(0), TotalNumMethodPoolEntries(0),
|
|
|
|
NumLexicalDeclContextsRead(0), TotalLexicalDeclContexts(0),
|
2011-07-22 01:15:19 +04:00
|
|
|
NumVisibleDeclContextsRead(0), TotalVisibleDeclContexts(0),
|
|
|
|
TotalModulesSizeInBits(0), NumCurrentElementsDeserializing(0),
|
|
|
|
NumCXXBaseSpecifiersLoaded(0)
|
2011-02-05 22:42:43 +03:00
|
|
|
{
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
SourceMgr.setExternalSLocEntrySource(this);
|
2010-08-24 04:50:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
ASTReader::ASTReader(SourceManager &SourceMgr, FileManager &FileMgr,
|
2011-07-22 20:35:34 +04:00
|
|
|
Diagnostic &Diags, StringRef isysroot,
|
2011-02-05 22:42:43 +03:00
|
|
|
bool DisableValidation, bool DisableStatCache)
|
2010-08-24 04:50:04 +04:00
|
|
|
: DeserializationListener(0), SourceMgr(SourceMgr), FileMgr(FileMgr),
|
2011-07-28 21:20:23 +04:00
|
|
|
Diags(Diags), SemaObj(0), PP(0), Context(0),
|
|
|
|
Consumer(0), ModuleMgr(FileMgr.getFileSystemOptions()),
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
RelocatablePCH(false), isysroot(isysroot),
|
|
|
|
DisableValidation(DisableValidation), DisableStatCache(DisableStatCache),
|
|
|
|
NumStatHits(0), NumStatMisses(0), NumSLocEntriesRead(0),
|
|
|
|
TotalNumSLocEntries(0), NumStatementsRead(0),
|
|
|
|
TotalNumStatements(0), NumMacrosRead(0), TotalNumMacros(0),
|
|
|
|
NumSelectorsRead(0), NumMethodPoolEntriesRead(0), NumMethodPoolMisses(0),
|
2010-09-23 00:19:08 +04:00
|
|
|
TotalNumMethodPoolEntries(0), NumLexicalDeclContextsRead(0),
|
|
|
|
TotalLexicalDeclContexts(0), NumVisibleDeclContextsRead(0),
|
2011-07-22 01:15:19 +04:00
|
|
|
TotalVisibleDeclContexts(0), TotalModulesSizeInBits(0),
|
|
|
|
NumCurrentElementsDeserializing(0), NumCXXBaseSpecifiersLoaded(0)
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
{
|
|
|
|
SourceMgr.setExternalSLocEntrySource(this);
|
2010-08-24 04:50:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
ASTReader::~ASTReader() {
|
|
|
|
// Delete all visible decl lookup tables
|
|
|
|
for (DeclContextOffsetsMap::iterator I = DeclContextOffsets.begin(),
|
|
|
|
E = DeclContextOffsets.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
for (DeclContextInfos::iterator J = I->second.begin(), F = I->second.end();
|
|
|
|
J != F; ++J) {
|
|
|
|
if (J->NameLookupTableData)
|
|
|
|
delete static_cast<ASTDeclContextNameLookupTable*>(
|
|
|
|
J->NameLookupTableData);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (DeclContextVisibleUpdatesPending::iterator
|
|
|
|
I = PendingVisibleUpdates.begin(),
|
|
|
|
E = PendingVisibleUpdates.end();
|
|
|
|
I != E; ++I) {
|
|
|
|
for (DeclContextVisibleUpdates::iterator J = I->second.begin(),
|
|
|
|
F = I->second.end();
|
|
|
|
J != F; ++J)
|
2011-08-03 19:48:04 +04:00
|
|
|
delete static_cast<ASTDeclContextNameLookupTable*>(J->first);
|
2010-08-24 04:50:04 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
Module::Module(ModuleKind Kind)
|
|
|
|
: Kind(Kind), SizeInBits(0), LocalNumSLocEntries(0), SLocEntryBaseID(0),
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
SLocEntryBaseOffset(0), SLocEntryOffsets(0),
|
|
|
|
SLocFileOffsets(0), LocalNumIdentifiers(0),
|
2011-08-02 14:56:51 +04:00
|
|
|
IdentifierOffsets(0), BaseIdentifierID(0), IdentifierTableData(0),
|
|
|
|
IdentifierLookupTable(0), BasePreprocessedEntityID(0),
|
|
|
|
LocalNumMacroDefinitions(0), MacroDefinitionOffsets(0),
|
|
|
|
BaseMacroDefinitionID(0), LocalNumHeaderFileInfos(0),
|
Revamp the SourceManager to separate the representation of parsed
source locations from source locations loaded from an AST/PCH file.
Previously, loading an AST/PCH file involved carefully pre-allocating
space at the beginning of the source manager for the source locations
and FileIDs that correspond to the prefix, and then appending the
source locations/FileIDs used for parsing the remaining translation
unit. This design forced us into loading PCH files early, as a prefix,
whic has become a rather significant limitation.
This patch splits the SourceManager space into two parts: for source
location "addresses", the lower values (growing upward) are used to
describe parsed code, while upper values (growing downward) are used
for source locations loaded from AST/PCH files. Similarly, positive
FileIDs are used to describe parsed code while negative FileIDs are
used to file/macro locations loaded from AST/PCH files. As a result,
we can load PCH/AST files even during parsing, making various
improvemnts in the future possible, e.g., teaching #include <foo.h> to
look for and load <foo.h.gch> if it happens to be already available.
This patch was originally written by Sebastian Redl, then brought
forward to the modern age by Jonathan Turner, and finally
polished/finished by me to be committed.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@135484 91177308-0d34-0410-b5e6-96231b3b80d8
2011-07-19 20:10:42 +04:00
|
|
|
HeaderFileInfoTableData(0), HeaderFileInfoTable(0),
|
2011-07-28 08:50:02 +04:00
|
|
|
HeaderFileFrameworkStrings(0),
|
2011-08-02 14:56:51 +04:00
|
|
|
LocalNumSelectors(0), SelectorOffsets(0), BaseSelectorID(0),
|
2010-09-22 04:42:27 +04:00
|
|
|
SelectorLookupTableData(0), SelectorLookupTable(0), LocalNumDecls(0),
|
2011-08-02 14:56:51 +04:00
|
|
|
DeclOffsets(0), BaseDeclID(0),
|
|
|
|
LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(0),
|
Change the hashing function for DeclContext lookup within an AST file
by eliminating the type ID from constructor, destructor, and
conversion function names. There are several reasons for this change:
- A given type (say, int*) isn't guaranteed to have a single, unique
type ID within a chain of PCH files. Hence, we could end up hashing
based on the wrong type ID, causing name lookup to fail.
- The mapping from types back to type IDs required one DenseMap
entry for every type that was ever deserialized, which was an
unacceptable cost to support just the name lookup of constructors,
destructors, and conversion functions. Plus, this mapping could
never actually work with chained or multiple PCH, based on the first
bullet.
Once we have eliminated the type from the hash function, these
problems go away, as does my horrible "reverse type remap" hack, which
was doomed from the start (see bullet #1 above) and far too
complicated.
However, note that removing the type from the hash function means that
all constructors, destructors, and conversion functions have the same
hash key, so I've updated the caller to double-check that the
declarations found have the appropriate name.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@136708 91177308-0d34-0410-b5e6-96231b3b80d8
2011-08-02 22:32:54 +04:00
|
|
|
LocalNumTypes(0), TypeOffsets(0), BaseTypeIndex(0), StatCache(0),
|
2011-07-29 03:15:22 +04:00
|
|
|
NumPreallocatedPreprocessingEntities(0)
|
2010-08-19 04:28:17 +04:00
|
|
|
{}
|
|
|
|
|
2011-07-22 20:00:58 +04:00
|
|
|
Module::~Module() {
|
2010-08-19 04:28:17 +04:00
|
|
|
delete static_cast<ASTIdentifierLookupTable *>(IdentifierLookupTable);
|
Implement two related optimizations that make de-serialization of
AST/PCH files more lazy:
- Don't preload all of the file source-location entries when reading
the AST file. Instead, load them lazily, when needed.
- Only look up header-search information (whether a header was already
#import'd, how many times it's been included, etc.) when it's needed
by the preprocessor, rather than pre-populating it.
Previously, we would pre-load all of the file source-location entries,
which also populated the header-search information structure. This was
a relatively minor performance issue, since we would end up stat()'ing
all of the headers stored within a AST/PCH file when the AST/PCH file
was loaded. In the normal PCH use case, the stat()s were cached, so
the cost--of preloading ~860 source-location entries in the Cocoa.h
case---was relatively low.
However, the recent optimization that replaced stat+open with
open+fstat turned this into a major problem, since the preloading of
source-location entries would now end up opening those files. Worse,
those files wouldn't be closed until the file manager was destroyed,
so just opening a Cocoa.h PCH file would hold on to ~860 file
descriptors, and it was easy to blow through the process's limit on
the number of open file descriptors.
By eliminating the preloading of these files, we neither open nor stat
the headers stored in the PCH/AST file until they're actually needed
for something. Concretely, we went from
*** HeaderSearch Stats:
835 files tracked.
364 #import/#pragma once files.
823 included exactly once.
6 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
835 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
with a trivial program that uses a chained PCH including a Cocoa PCH
to
*** HeaderSearch Stats:
4 files tracked.
1 #import/#pragma once files.
3 included exactly once.
2 max times a file is included.
3 #include/#include_next/#import.
0 #includes skipped due to the multi-include optimization.
1 framework lookups.
0 subframework lookups.
*** Source Manager Stats:
3 files mapped, 3 mem buffers mapped.
37460 SLocEntry's allocated, 11215575B of Sloc address space used.
62 bytes of files mapped, 0 files with line #'s computed.
for the same program.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@125286 91177308-0d34-0410-b5e6-96231b3b80d8
2011-02-10 20:09:37 +03:00
|
|
|
delete static_cast<HeaderFileInfoLookupTable *>(HeaderFileInfoTable);
|
2010-08-19 04:28:17 +04:00
|
|
|
delete static_cast<ASTSelectorLookupTable *>(SelectorLookupTable);
|
|
|
|
}
|
2011-07-26 22:21:30 +04:00
|
|
|
|
2011-08-02 15:12:41 +04:00
|
|
|
template<typename Key, typename Offset, unsigned InitialCapacity>
|
|
|
|
static void
|
|
|
|
dumpLocalRemap(StringRef Name,
|
|
|
|
const ContinuousRangeMap<Key, Offset, InitialCapacity> &Map) {
|
|
|
|
if (Map.begin() == Map.end())
|
|
|
|
return;
|
|
|
|
|
|
|
|
typedef ContinuousRangeMap<Key, Offset, InitialCapacity> MapType;
|
|
|
|
llvm::errs() << " " << Name << ":\n";
|
|
|
|
for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
|
|
|
|
I != IEnd; ++I) {
|
|
|
|
llvm::errs() << " " << I->first << " -> " << I->second
|
|
|
|
<< "\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Module::dump() {
|
|
|
|
llvm::errs() << "\nModule: " << FileName << "\n";
|
|
|
|
if (!Imports.empty()) {
|
|
|
|
llvm::errs() << " Imports: ";
|
|
|
|
for (unsigned I = 0, N = Imports.size(); I != N; ++I) {
|
|
|
|
if (I)
|
|
|
|
llvm::errs() << ", ";
|
|
|
|
llvm::errs() << Imports[I]->FileName;
|
|
|
|
}
|
|
|
|
llvm::errs() << "\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remapping tables.
|
|
|
|
llvm::errs() << " Base source location offset: " << SLocEntryBaseOffset
|
|
|
|
<< '\n';
|
2011-08-04 23:00:50 +04:00
|
|
|
dumpLocalRemap("Source location offset local -> global map", SLocRemap);
|
|
|
|
|
2011-08-04 01:49:18 +04:00
|
|
|
llvm::errs() << " Base identifier ID: " << BaseIdentifierID << '\n'
|
2011-08-04 22:56:47 +04:00
|
|
|
<< " Number of identifiers: " << LocalNumIdentifiers << '\n';
|
2011-08-04 23:00:50 +04:00
|
|
|
dumpLocalRemap("Identifier ID local -> global map", IdentifierRemap);
|
|
|
|
|
|
|
|
llvm::errs() << " Base selector ID: " << BaseSelectorID << '\n'
|
|
|
|
<< " Number of selectors: " << LocalNumSelectors << '\n';
|
|
|
|
dumpLocalRemap("Selector ID local -> global map", SelectorRemap);
|
|
|
|
|
2011-08-04 22:56:47 +04:00
|
|
|
llvm::errs() << " Base preprocessed entity ID: " << BasePreprocessedEntityID
|
|
|
|
<< '\n'
|
2011-08-12 04:15:20 +04:00
|
|
|
<< " Number of preprocessed entities: "
|
2011-08-04 22:56:47 +04:00
|
|
|
<< NumPreallocatedPreprocessingEntities << '\n';
|
2011-08-04 23:00:50 +04:00
|
|
|
dumpLocalRemap("Preprocessed entity ID local -> global map",
|
|
|
|
PreprocessedEntityRemap);
|
|
|
|
|
2011-08-04 20:36:56 +04:00
|
|
|
llvm::errs() << " Base macro definition ID: " << BaseMacroDefinitionID
|
|
|
|
<< '\n'
|
|
|
|
<< " Number of macro definitions: " << LocalNumMacroDefinitions
|
|
|
|
<< '\n';
|
2011-08-04 23:00:50 +04:00
|
|
|
dumpLocalRemap("Macro definition ID local -> global map",
|
|
|
|
MacroDefinitionRemap);
|
|
|
|
|
|
|
|
llvm::errs() << " Base type index: " << BaseTypeIndex << '\n'
|
|
|
|
<< " Number of types: " << LocalNumTypes << '\n';
|
|
|
|
dumpLocalRemap("Type index local -> global map", TypeRemap);
|
|
|
|
|
2011-08-03 19:48:04 +04:00
|
|
|
llvm::errs() << " Base decl ID: " << BaseDeclID << '\n'
|
|
|
|
<< " Number of decls: " << LocalNumDecls << '\n';
|
2011-08-04 23:00:50 +04:00
|
|
|
dumpLocalRemap("Decl ID local -> global map", DeclRemap);
|
2011-08-02 15:12:41 +04:00
|
|
|
}
|
|
|
|
|
2011-07-28 21:20:23 +04:00
|
|
|
Module *ModuleManager::lookup(StringRef Name) {
|
|
|
|
const FileEntry *Entry = FileMgr.getFile(Name);
|
|
|
|
return Modules[Entry];
|
|
|
|
}
|
|
|
|
|
2011-08-02 21:40:32 +04:00
|
|
|
llvm::MemoryBuffer *ModuleManager::lookupBuffer(StringRef Name) {
|
|
|
|
const FileEntry *Entry = FileMgr.getFile(Name);
|
|
|
|
return InMemoryBuffers[Entry];
|
|
|
|
}
|
|
|
|
|
2011-07-26 22:21:30 +04:00
|
|
|
/// \brief Creates a new module and adds it to the list of known modules
|
|
|
|
Module &ModuleManager::addModule(StringRef FileName, ModuleKind Type) {
|
|
|
|
Module *Prev = !size() ? 0 : &getLastModule();
|
|
|
|
Module *Current = new Module(Type);
|
|
|
|
|
|
|
|
Current->FileName = FileName.str();
|
|
|
|
|
|
|
|
Chain.push_back(Current);
|
2011-07-28 21:20:23 +04:00
|
|
|
const FileEntry *Entry = FileMgr.getFile(FileName);
|
|
|
|
Modules[Entry] = Current;
|
2011-07-26 22:21:30 +04:00
|
|
|
|
2011-07-29 22:09:09 +04:00
|
|
|
if (Prev) {
|
|
|
|
Current->ImportedBy.insert(Prev);
|
|
|
|
Prev->Imports.insert(Current);
|
|
|
|
}
|
|
|
|
|
2011-07-26 22:21:30 +04:00
|
|
|
return *Current;
|
|
|
|
}
|
|
|
|
|
2011-08-02 21:40:32 +04:00
|
|
|
void ModuleManager::addInMemoryBuffer(StringRef FileName,
|
|
|
|
llvm::MemoryBuffer *Buffer) {
|
|
|
|
|
|
|
|
const FileEntry *Entry = FileMgr.getVirtualFile(FileName,
|
|
|
|
Buffer->getBufferSize(), 0);
|
|
|
|
InMemoryBuffers[Entry] = Buffer;
|
|
|
|
}
|
2011-07-26 22:21:30 +04:00
|
|
|
/// \brief Exports the list of loaded modules with their corresponding names
|
|
|
|
void ModuleManager::exportLookup(SmallVector<ModuleOffset, 16> &Target) {
|
|
|
|
Target.reserve(size());
|
2011-07-28 21:20:23 +04:00
|
|
|
for (ModuleConstIterator I = Chain.begin(), E = Chain.end();
|
2011-07-26 22:21:30 +04:00
|
|
|
I != E; ++I) {
|
2011-07-28 21:20:23 +04:00
|
|
|
Target.push_back(ModuleOffset((*I)->SLocEntryBaseOffset,
|
|
|
|
(*I)->FileName));
|
2011-07-26 22:21:30 +04:00
|
|
|
}
|
|
|
|
std::sort(Target.begin(), Target.end());
|
|
|
|
}
|
|
|
|
|
2011-07-28 21:20:23 +04:00
|
|
|
ModuleManager::ModuleManager(const FileSystemOptions &FSO) : FileMgr(FSO) { }
|
|
|
|
|
2011-07-26 22:21:30 +04:00
|
|
|
ModuleManager::~ModuleManager() {
|
|
|
|
for (unsigned i = 0, e = Chain.size(); i != e; ++i)
|
|
|
|
delete Chain[e - i - 1];
|
|
|
|
}
|